[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH for-4.12 V2] x86/altp2m: fix HVMOP_altp2m_set_domain_state race



HVMOP_altp2m_set_domain_state does not domain_pause(), presumably
on purpose (as it was originally supposed to cater to a in-guest
agent, and a domain pausing itself is not a good idea).

This can lead to domain crashes in the vmx_vmexit_handler() code
that checks if the guest has the ability to switch EPTP without an
exit. That code can __vmread() the host p2m's EPT_POINTER
(before HVMOP_altp2m_set_domain_state "for_each_vcpu()" has a
chance to run altp2m_vcpu_initialise(), but after
d->arch.altp2m_active is set).

This patch reorganizes the code so that d->arch.altp2m_active
is set to true only after all the init work has been done, and
to false before the uninit work begins. This required adding
a new bool parameter altp2m_vcpu_update_p2m(), which relied
on d->arch.altp2m_active being set before it's called.

While at it, I've changed a couple of bool_t's to bool's.

Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c        | 17 ++++++++++++++---
 xen/arch/x86/hvm/vmx/vmx.c    |  4 ++--
 xen/arch/x86/mm/altp2m.c      |  4 ++--
 xen/arch/x86/mm/p2m.c         |  4 ++--
 xen/include/asm-x86/altp2m.h  |  2 +-
 xen/include/asm-x86/domain.h  |  2 +-
 xen/include/asm-x86/hvm/hvm.h |  6 +++---
 7 files changed, 25 insertions(+), 14 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 21944e9..4d1d13d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4525,7 +4525,7 @@ static int do_altp2m_op(
     case HVMOP_altp2m_set_domain_state:
     {
         struct vcpu *v;
-        bool_t ostate;
+        bool ostate, nstate;
 
         if ( nestedhvm_enabled(d) )
         {
@@ -4534,12 +4534,16 @@ static int do_altp2m_op(
         }
 
         ostate = d->arch.altp2m_active;
-        d->arch.altp2m_active = !!a.u.domain_state.state;
+        nstate = !!a.u.domain_state.state;
 
         /* If the alternate p2m state has changed, handle appropriately */
-        if ( d->arch.altp2m_active != ostate &&
+        if ( nstate != ostate &&
              (ostate || !(rc = p2m_init_altp2m_by_id(d, 0))) )
         {
+            /* First mark altp2m as disabled, then altp2m_vcpu_destroy(). */
+            if ( ostate )
+                d->arch.altp2m_active = false;
+
             for_each_vcpu( d, v )
             {
                 if ( !ostate )
@@ -4550,7 +4554,14 @@ static int do_altp2m_op(
 
             if ( ostate )
                 p2m_flush_altp2m(d);
+            else
+                /*
+                 * Wait until altp2m_vcpu_initialise() is done before marking
+                 * altp2m as being enabled for the domain.
+                 */
+                d->arch.altp2m_active = true;
         }
+
         break;
     }
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 64af8bf..e542568 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2150,13 +2150,13 @@ static bool_t vmx_is_singlestep_supported(void)
     return !!cpu_has_monitor_trap_flag;
 }
 
-static void vmx_vcpu_update_eptp(struct vcpu *v)
+static void vmx_vcpu_update_eptp(struct vcpu *v, bool altp2m_enabled)
 {
     struct domain *d = v->domain;
     struct p2m_domain *p2m = NULL;
     struct ept_data *ept;
 
-    if ( altp2m_active(d) )
+    if ( altp2m_enabled )
         p2m = p2m_get_altp2m(v);
     if ( !p2m )
         p2m = p2m_get_hostp2m(d);
diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c
index 930bdc2..c51303b 100644
--- a/xen/arch/x86/mm/altp2m.c
+++ b/xen/arch/x86/mm/altp2m.c
@@ -39,7 +39,7 @@ altp2m_vcpu_initialise(struct vcpu *v)
     vcpu_altp2m(v).p2midx = 0;
     atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
 
-    altp2m_vcpu_update_p2m(v);
+    altp2m_vcpu_update_p2m(v, true);
 
     if ( v != current )
         vcpu_unpause(v);
@@ -58,7 +58,7 @@ altp2m_vcpu_destroy(struct vcpu *v)
 
     altp2m_vcpu_reset(v);
 
-    altp2m_vcpu_update_p2m(v);
+    altp2m_vcpu_update_p2m(v, false);
     altp2m_vcpu_update_vmfunc_ve(v);
 
     if ( v != current )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index d14ce57..f9e96fc 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -2332,7 +2332,7 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, 
unsigned int idx)
             atomic_dec(&p2m_get_altp2m(v)->active_vcpus);
             vcpu_altp2m(v).p2midx = idx;
             atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
-            altp2m_vcpu_update_p2m(v);
+            altp2m_vcpu_update_p2m(v, altp2m_active(d));
         }
         rc = 1;
     }
@@ -2573,7 +2573,7 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d, 
unsigned int idx)
                 atomic_dec(&p2m_get_altp2m(v)->active_vcpus);
                 vcpu_altp2m(v).p2midx = idx;
                 atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
-                altp2m_vcpu_update_p2m(v);
+                altp2m_vcpu_update_p2m(v, altp2m_active(d));
             }
 
         rc = 0;
diff --git a/xen/include/asm-x86/altp2m.h b/xen/include/asm-x86/altp2m.h
index 3befcf6..e3df27f 100644
--- a/xen/include/asm-x86/altp2m.h
+++ b/xen/include/asm-x86/altp2m.h
@@ -33,7 +33,7 @@ static inline bool altp2m_active(const struct domain *d)
 /* Alternate p2m VCPU */
 void altp2m_vcpu_initialise(struct vcpu *v);
 void altp2m_vcpu_destroy(struct vcpu *v);
-void altp2m_vcpu_reset(struct vcpu *v);
+void altp2m_vcpu_disable_ve(struct vcpu *v);
 
 static inline uint16_t altp2m_vcpu_idx(const struct vcpu *v)
 {
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 277f99f..a4e8f5a 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -337,7 +337,7 @@ struct arch_domain
     mm_lock_t nested_p2m_lock;
 
     /* altp2m: allow multiple copies of host p2m */
-    bool_t altp2m_active;
+    bool altp2m_active;
     struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
     mm_lock_t altp2m_list_lock;
     uint64_t *altp2m_eptp;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 0a10b51..de0969b 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -214,7 +214,7 @@ struct hvm_function_table {
     bool_t (*is_singlestep_supported)(void);
 
     /* Alternate p2m */
-    void (*altp2m_vcpu_update_p2m)(struct vcpu *v);
+    void (*altp2m_vcpu_update_p2m)(struct vcpu *v, bool altp2m_enabled);
     void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v);
     bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
     int (*altp2m_vcpu_emulate_vmfunc)(const struct cpu_user_regs *regs);
@@ -639,10 +639,10 @@ static inline bool hvm_altp2m_supported(void)
 }
 
 /* updates the current hardware p2m */
-static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
+static inline void altp2m_vcpu_update_p2m(struct vcpu *v, bool altp2m_enabled)
 {
     if ( hvm_funcs.altp2m_vcpu_update_p2m )
-        hvm_funcs.altp2m_vcpu_update_p2m(v);
+        hvm_funcs.altp2m_vcpu_update_p2m(v, altp2m_enabled);
 }
 
 /* updates VMCS fields related to VMFUNC and #VE */
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.