[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 4/4] dom_cow is needed for mem-sharing only



Hi Jan,

On 04/06/2019 13:44, Jan Beulich wrote:
A couple of adjustments are needed to code checking for dom_cow, but
since there are pretty few it is probably better to adjust those than
to set up and keep around a never used domain.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Acked-by: Julien Grall <julien.grall@xxxxxxx>

Cheers,

---
v2: Use #if/#else. Split out emul-priv-op.c change.
---
While for now this avoids creating the domain on Arm only, Tamas'es
patch switching to CONFIG_MEM_SHARING will make x86 leverage this too.

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -967,8 +967,8 @@ get_page_from_l1e(
          return flip;
      }
- if ( unlikely( (real_pg_owner != pg_owner) &&
-                   (real_pg_owner != dom_cow) ) )
+    if ( unlikely((real_pg_owner != pg_owner) &&
+                  (!dom_cow || (real_pg_owner != dom_cow))) )
      {
          /*
           * Let privileged domains transfer the right to map their target
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -568,7 +568,8 @@ struct page_info *p2m_get_page_from_gfn(
              }
              else if ( !get_page(page, p2m->domain) &&
                        /* Page could be shared */
-                      (!p2m_is_shared(*t) || !get_page(page, dom_cow)) )
+                      (!dom_cow || !p2m_is_shared(*t) ||
+                       !get_page(page, dom_cow)) )
                  page = NULL;
          }
          p2m_read_unlock(p2m);
@@ -941,7 +942,8 @@ guest_physmap_add_entry(struct domain *d
      /* Then, look for m->p mappings for this range and deal with them */
      for ( i = 0; i < (1UL << page_order); i++ )
      {
-        if ( page_get_owner(mfn_to_page(mfn_add(mfn, i))) == dom_cow )
+        if ( dom_cow &&
+             page_get_owner(mfn_to_page(mfn_add(mfn, i))) == dom_cow )
          {
              /* This is no way to add a shared page to your physmap! */
              gdprintk(XENLOG_ERR, "Adding shared mfn %lx directly to dom%d physmap 
not allowed.\n",
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -74,7 +74,9 @@ integer_param("hardware_dom", hardware_d
  /* Private domain structs for DOMID_XEN, DOMID_IO, etc. */
  struct domain *__read_mostly dom_xen;
  struct domain *__read_mostly dom_io;
+#ifdef CONFIG_HAS_MEM_SHARING
  struct domain *__read_mostly dom_cow;
+#endif
struct vcpu *idle_vcpu[NR_CPUS] __read_mostly; @@ -547,6 +549,7 @@ void __init setup_special_domains(void)
      if ( IS_ERR(dom_io) )
          panic("Failed to create d[IO]: %ld\n", PTR_ERR(dom_io));
+#ifdef CONFIG_HAS_MEM_SHARING
      /*
       * Initialise our COW domain.
       * This domain owns sharable pages.
@@ -554,6 +557,7 @@ void __init setup_special_domains(void)
      dom_cow = domain_create(DOMID_COW, NULL, false);
      if ( IS_ERR(dom_cow) )
          panic("Failed to create d[COW]: %ld\n", PTR_ERR(dom_cow));
+#endif
  }
void domain_update_node_affinity(struct domain *d)
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1095,7 +1095,7 @@ map_grant_ref(
              host_map_created = true;
          }
      }
-    else if ( owner == rd || owner == dom_cow )
+    else if ( owner == rd || (dom_cow && owner == dom_cow) )
      {
          if ( (op->flags & GNTMAP_device_map) && !(op->flags & 
GNTMAP_readonly) )
          {
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -643,7 +643,12 @@ static inline void filtered_flush_tlb_ma
  }
/* Private domain structs for DOMID_XEN, DOMID_IO, etc. */
-extern struct domain *dom_xen, *dom_io, *dom_cow;
+extern struct domain *dom_xen, *dom_io;
+#ifdef CONFIG_HAS_MEM_SHARING
+extern struct domain *dom_cow;
+#else
+# define dom_cow NULL
+#endif
enum XENSHARE_flags {
      SHARE_rw,





--
Julien Grall

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.