[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 10/16] x86/mm: put nested p2m code under CONFIG_HVM



These functions are only useful for nested hvm, which isn't enabled
when CONFIG_HVM is false.

Enclose relevant code and fields in CONFIG_HVM. Guard np2m_schedule
with nestedhvm_enabled.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/x86/domain.c        |  6 ++++--
 xen/arch/x86/mm/p2m.c        | 18 ++++++++++++++----
 xen/include/asm-x86/domain.h |  2 ++
 xen/include/asm-x86/p2m.h    |  2 ++
 4 files changed, 22 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 313ebb3221..7c945a2428 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1691,7 +1691,8 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
     {
         _update_runstate_area(prev);
         vpmu_switch_from(prev);
-        np2m_schedule(NP2M_SCHEDLE_OUT);
+        if ( nestedhvm_enabled(prevd) )
+            np2m_schedule(NP2M_SCHEDLE_OUT);
     }
 
     if ( is_hvm_domain(prevd) && !list_empty(&prev->arch.hvm.tm_list) )
@@ -1758,7 +1759,8 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
 
         /* Must be done with interrupts enabled */
         vpmu_switch_to(next);
-        np2m_schedule(NP2M_SCHEDLE_IN);
+        if ( nestedhvm_enabled(nextd) )
+            np2m_schedule(NP2M_SCHEDLE_IN);
     }
 
     /* Ensure that the vcpu has an up-to-date time base. */
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 79d0e7203a..7a12cd37e8 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -58,22 +58,22 @@ static int p2m_initialise(struct domain *d, struct 
p2m_domain *p2m)
 #endif
 
     mm_rwlock_init(&p2m->lock);
-    INIT_LIST_HEAD(&p2m->np2m_list);
     INIT_PAGE_LIST_HEAD(&p2m->pages);
 
 #ifdef CONFIG_HVM
     mm_lock_init(&p2m->pod.lock);
     INIT_PAGE_LIST_HEAD(&p2m->pod.super);
     INIT_PAGE_LIST_HEAD(&p2m->pod.single);
+    INIT_LIST_HEAD(&p2m->np2m_list);
+
+    p2m->np2m_base = P2M_BASE_EADDR;
+    p2m->np2m_generation = 0;
 #endif
 
     p2m->domain = d;
     p2m->default_access = p2m_access_rwx;
     p2m->p2m_class = p2m_host;
 
-    p2m->np2m_base = P2M_BASE_EADDR;
-    p2m->np2m_generation = 0;
-
 #ifdef CONFIG_HVM
     for ( i = 0; i < ARRAY_SIZE(p2m->pod.mrp.list); ++i )
         p2m->pod.mrp.list[i] = gfn_x(INVALID_GFN);
@@ -149,6 +149,7 @@ static void p2m_teardown_hostp2m(struct domain *d)
     }
 }
 
+#ifdef CONFIG_HVM
 static void p2m_teardown_nestedp2m(struct domain *d)
 {
     unsigned int i;
@@ -186,6 +187,7 @@ static int p2m_init_nestedp2m(struct domain *d)
 
     return 0;
 }
+#endif
 
 static void p2m_teardown_altp2m(struct domain *d)
 {
@@ -233,6 +235,7 @@ int p2m_init(struct domain *d)
     if ( rc )
         return rc;
 
+#ifdef CONFIG_HVM
     /* Must initialise nestedp2m unconditionally
      * since nestedhvm_enabled(d) returns false here.
      * (p2m_init runs too early for HVM_PARAM_* options) */
@@ -242,12 +245,15 @@ int p2m_init(struct domain *d)
         p2m_teardown_hostp2m(d);
         return rc;
     }
+#endif
 
     rc = p2m_init_altp2m(d);
     if ( rc )
     {
         p2m_teardown_hostp2m(d);
+#ifdef CONFIG_HVM
         p2m_teardown_nestedp2m(d);
+#endif
     }
 
     return rc;
@@ -699,7 +705,9 @@ void p2m_final_teardown(struct domain *d)
      * we initialise them unconditionally.
      */
     p2m_teardown_altp2m(d);
+#ifdef CONFIG_HVM
     p2m_teardown_nestedp2m(d);
+#endif
 
     /* Iterate over all p2m tables per domain */
     p2m_teardown_hostp2m(d);
@@ -1725,6 +1733,7 @@ void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
         p2m_switch_vcpu_altp2m_by_id(v, idx);
 }
 
+#ifdef CONFIG_HVM
 static struct p2m_domain *
 p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
 {
@@ -1982,6 +1991,7 @@ void np2m_schedule(int dir)
         p2m_unlock(p2m);
     }
 }
+#endif
 
 unsigned long paging_gva_to_gfn(struct vcpu *v,
                                 unsigned long va,
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 4da4353de7..b46cfb0ce4 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -333,9 +333,11 @@ struct arch_domain
         void (*tail)(struct vcpu *);
     } *ctxt_switch;
 
+#ifdef CONFIG_HVM
     /* nestedhvm: translate l2 guest physical to host physical */
     struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
     mm_lock_t nested_p2m_lock;
+#endif
 
     /* altp2m: allow multiple copies of host p2m */
     bool_t altp2m_active;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 3785598f54..20cf3f1a25 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -204,6 +204,7 @@ struct p2m_domain {
 
     p2m_class_t       p2m_class; /* host/nested/alternate */
 
+#ifdef CONFIG_HVM
     /* Nested p2ms only: nested p2m base value that this p2m shadows.
      * This can be cleared to P2M_BASE_EADDR under the per-p2m lock but
      * needs both the per-p2m lock and the per-domain nestedp2m lock
@@ -216,6 +217,7 @@ struct p2m_domain {
      * The host p2m hasolds the head of the list and the np2ms are 
      * threaded on in LRU order. */
     struct list_head   np2m_list;
+#endif
 
     /* Host p2m: Log-dirty ranges registered for the domain. */
     struct rangeset   *logdirty_ranges;
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.