[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2] x86/MM: Introduce a p2m class.



Use the class to differentiate between host and nested p2m's, and
potentially other classes in the future.

Fix p2m class checks that implicitly assume nested and host are
the only two classes that will ever exist.

Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>

---

Changes in v2:
    - prepend missing area prefix to description
    - don't give enum members specific numeric values
    - constify p2m passed into class checkers
---
 xen/arch/x86/hvm/hvm.c           |  2 +-
 xen/arch/x86/mm/guest_walk.c     |  2 +-
 xen/arch/x86/mm/hap/guest_walk.c |  4 ++--
 xen/arch/x86/mm/p2m-ept.c        |  4 ++--
 xen/arch/x86/mm/p2m.c            | 10 ++++++----
 xen/include/asm-x86/p2m.h        | 17 ++++++++++++++++-
 6 files changed, 28 insertions(+), 11 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c7984d1..4c38a40 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2859,7 +2859,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long 
gla,
     /* Mem sharing: unshare the page and try again */
     if ( npfec.write_access && (p2mt == p2m_ram_shared) )
     {
-        ASSERT(!p2m_is_nestedp2m(p2m));
+        ASSERT(p2m_is_hostp2m(p2m));
         sharing_enomem = 
             (mem_sharing_unshare_page(p2m->domain, gfn, 0) < 0);
         rc = 1;
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 1b26175..d8f5a35 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -99,7 +99,7 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t 
*mfn,
                                  q);
     if ( p2m_is_paging(*p2mt) )
     {
-        ASSERT(!p2m_is_nestedp2m(p2m));
+        ASSERT(p2m_is_hostp2m(p2m));
         if ( page )
             put_page(page);
         p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
diff --git a/xen/arch/x86/mm/hap/guest_walk.c b/xen/arch/x86/mm/hap/guest_walk.c
index 25d9792..381a196 100644
--- a/xen/arch/x86/mm/hap/guest_walk.c
+++ b/xen/arch/x86/mm/hap/guest_walk.c
@@ -64,7 +64,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
                                      &p2mt, NULL, P2M_ALLOC | P2M_UNSHARE);
     if ( p2m_is_paging(p2mt) )
     {
-        ASSERT(!p2m_is_nestedp2m(p2m));
+        ASSERT(p2m_is_hostp2m(p2m));
         pfec[0] = PFEC_page_paged;
         if ( top_page )
             put_page(top_page);
@@ -106,7 +106,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
             put_page(page);
         if ( p2m_is_paging(p2mt) )
         {
-            ASSERT(!p2m_is_nestedp2m(p2m));
+            ASSERT(p2m_is_hostp2m(p2m));
             pfec[0] = PFEC_page_paged;
             p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
             return INVALID_GFN;
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index e21a92d..c2d7720 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -769,8 +769,8 @@ out:
     if ( needs_sync != sync_off )
         ept_sync_domain(p2m);
 
-    /* For non-nested p2m, may need to change VT-d page table.*/
-    if ( rc == 0 && !p2m_is_nestedp2m(p2m) && need_iommu(d) &&
+    /* For host p2m, may need to change VT-d page table.*/
+    if ( rc == 0 && p2m_is_hostp2m(p2m) && need_iommu(d) &&
          need_modify_vtd_table )
     {
         if ( iommu_hap_pt_share )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index efa49dd..4434b9a 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -71,6 +71,7 @@ static int p2m_initialise(struct domain *d, struct p2m_domain 
*p2m)
 
     p2m->domain = d;
     p2m->default_access = p2m_access_rwx;
+    p2m->p2m_class = p2m_host;
 
     p2m->np2m_base = P2M_BASE_EADDR;
 
@@ -158,6 +159,7 @@ static int p2m_init_nestedp2m(struct domain *d)
             p2m_teardown_nestedp2m(d);
             return -ENOMEM;
         }
+        p2m->p2m_class = p2m_nested;
         p2m->write_p2m_entry = nestedp2m_write_p2m_entry;
         list_add(&p2m->np2m_list, &p2m_get_hostp2m(d)->np2m_list);
     }
@@ -202,7 +204,7 @@ int p2m_init(struct domain *d)
 int p2m_is_logdirty_range(struct p2m_domain *p2m, unsigned long start,
                           unsigned long end)
 {
-    ASSERT(!p2m_is_nestedp2m(p2m));
+    ASSERT(p2m_is_hostp2m(p2m));
     if ( p2m->global_logdirty ||
          rangeset_contains_range(p2m->logdirty_ranges, start, end) )
         return 1;
@@ -263,7 +265,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, 
unsigned long gfn,
 
     if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) )
     {
-        ASSERT(!p2m_is_nestedp2m(p2m));
+        ASSERT(p2m_is_hostp2m(p2m));
         /* Try to unshare. If we fail, communicate ENOMEM without
          * sleeping. */
         if ( mem_sharing_unshare_page(p2m->domain, gfn, 0) < 0 )
@@ -431,7 +433,7 @@ int p2m_alloc_table(struct p2m_domain *p2m)
 
     p2m_lock(p2m);
 
-    if ( !p2m_is_nestedp2m(p2m)
+    if ( p2m_is_hostp2m(p2m)
          && !page_list_empty(&d->page_list) )
     {
         P2M_ERROR("dom %d already has memory allocated\n", d->domain_id);
@@ -1708,7 +1710,7 @@ p2m_flush_table(struct p2m_domain *p2m)
 
     /* "Host" p2m tables can have shared entries &c that need a bit more 
      * care when discarding them */
-    ASSERT(p2m_is_nestedp2m(p2m));
+    ASSERT(!p2m_is_hostp2m(p2m));
     /* Nested p2m's do not do pod, hence the asserts (and no pod lock)*/
     ASSERT(page_list_empty(&p2m->pod.super));
     ASSERT(page_list_empty(&p2m->pod.single));
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 2cf73ca..8742725 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -172,6 +172,11 @@ typedef unsigned int p2m_query_t;
                              (P2M_RAM_TYPES | P2M_GRANT_TYPES |  \
                               p2m_to_mask(p2m_map_foreign)))
 
+typedef enum {
+    p2m_host,
+    p2m_nested,
+} p2m_class_t;
+
 /* Per-p2m-table state */
 struct p2m_domain {
     /* Lock that protects updates to the p2m */
@@ -188,6 +193,8 @@ struct p2m_domain {
 
     struct domain     *domain;   /* back pointer to domain */
 
+    p2m_class_t       p2m_class; /* host/nested/? */
+
     /* Nested p2ms only: nested p2m base value that this p2m shadows.
      * This can be cleared to P2M_BASE_EADDR under the per-p2m lock but
      * needs both the per-p2m lock and the per-domain nestedp2m lock
@@ -297,7 +304,15 @@ struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v, 
uint64_t np2m_base);
  */
 struct p2m_domain *p2m_get_p2m(struct vcpu *v);
 
-#define p2m_is_nestedp2m(p2m)   ((p2m) != p2m_get_hostp2m((p2m->domain)))
+static inline bool_t p2m_is_hostp2m(const struct p2m_domain *p2m)
+{
+    return p2m->p2m_class == p2m_host;
+}
+
+static inline bool_t p2m_is_nestedp2m(const struct p2m_domain *p2m)
+{
+    return p2m->p2m_class == p2m_nested;
+}
 
 #define p2m_get_pagetable(p2m)  ((p2m)->phys_table)
 
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.