[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86/hypercall: Make the HVM hcall_preempted boolean common



HVM guests currently make use of arch.hvm_vcpu.hcall_preempted to track
hypercall preemption in struct vcpu.  Move this boolean to being common at the
top level of struct vcpu, which will allow it to be reused elsewhere.

Alter the PV preemption logic to use this boolean.  This simplifies the code
by removing guest-type-specific knowledge, and removes the risk of accidently
skipping backwards or forwards multiple times and corrupting %rip.

In pv_hypercall() the old_rip bodge can be removed, and parameter clobbering
can happen based on a more obvious condition.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
---
 xen/arch/x86/domain.c          | 16 ++--------------
 xen/arch/x86/hvm/hypercall.c   |  8 ++++----
 xen/arch/x86/hypercall.c       | 17 ++++++++++++-----
 xen/include/asm-x86/hvm/vcpu.h |  1 -
 xen/include/xen/sched.h        |  2 ++
 5 files changed, 20 insertions(+), 24 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 71c0e3c..b199c70 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2198,20 +2198,12 @@ void sync_vcpu_execstate(struct vcpu *v)
 
 void hypercall_cancel_continuation(void)
 {
-    struct cpu_user_regs *regs = guest_cpu_user_regs();
     struct mc_state *mcs = &current->mc_state;
 
     if ( mcs->flags & MCSF_in_multicall )
-    {
         __clear_bit(_MCSF_call_preempted, &mcs->flags);
-    }
     else
-    {
-        if ( is_pv_vcpu(current) )
-            regs->rip += 2; /* skip re-execute 'syscall' / 'int $xx' */
-        else
-            current->arch.hvm_vcpu.hcall_preempted = 0;
-    }
+        current->hcall_preempted = false;
 }
 
 unsigned long hypercall_create_continuation(
@@ -2239,11 +2231,7 @@ unsigned long hypercall_create_continuation(
 
         regs->rax = op;
 
-        /* Ensure the hypercall trap instruction is re-executed. */
-        if ( is_pv_vcpu(curr) )
-            regs->rip -= 2;  /* re-execute 'syscall' / 'int $xx' */
-        else
-            curr->arch.hvm_vcpu.hcall_preempted = 1;
+        curr->hcall_preempted = true;
 
         if ( is_pv_vcpu(curr) ?
              !is_pv_32bit_vcpu(curr) :
diff --git a/xen/arch/x86/hvm/hypercall.c b/xen/arch/x86/hvm/hypercall.c
index 5dbd54a..fe7802b 100644
--- a/xen/arch/x86/hvm/hypercall.c
+++ b/xen/arch/x86/hvm/hypercall.c
@@ -176,7 +176,7 @@ int hvm_hypercall(struct cpu_user_regs *regs)
         return HVM_HCALL_completed;
     }
 
-    curr->arch.hvm_vcpu.hcall_preempted = 0;
+    curr->hcall_preempted = false;
 
     if ( mode == 8 )
     {
@@ -210,7 +210,7 @@ int hvm_hypercall(struct cpu_user_regs *regs)
         curr->arch.hvm_vcpu.hcall_64bit = 0;
 
 #ifndef NDEBUG
-        if ( !curr->arch.hvm_vcpu.hcall_preempted )
+        if ( !curr->hcall_preempted )
         {
             /* Deliberately corrupt parameter regs used by this hypercall. */
             switch ( hypercall_args_table[eax].native )
@@ -254,7 +254,7 @@ int hvm_hypercall(struct cpu_user_regs *regs)
                                                     ebp);
 
 #ifndef NDEBUG
-        if ( !curr->arch.hvm_vcpu.hcall_preempted )
+        if ( !curr->hcall_preempted )
         {
             /* Deliberately corrupt parameter regs used by this hypercall. */
             switch ( hypercall_args_table[eax].compat )
@@ -272,7 +272,7 @@ int hvm_hypercall(struct cpu_user_regs *regs)
 
     HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%lu -> %lx", eax, regs->rax);
 
-    if ( curr->arch.hvm_vcpu.hcall_preempted )
+    if ( curr->hcall_preempted )
         return HVM_HCALL_preempted;
 
     if ( unlikely(currd->arch.hvm_domain.qemu_mapcache_invalidate) &&
diff --git a/xen/arch/x86/hypercall.c b/xen/arch/x86/hypercall.c
index 8dd19de..945afa0 100644
--- a/xen/arch/x86/hypercall.c
+++ b/xen/arch/x86/hypercall.c
@@ -141,9 +141,6 @@ static const hypercall_table_t pv_hypercall_table[] = {
 void pv_hypercall(struct cpu_user_regs *regs)
 {
     struct vcpu *curr = current;
-#ifndef NDEBUG
-    unsigned long old_rip = regs->rip;
-#endif
     unsigned long eax;
 
     ASSERT(guest_kernel_mode(curr, regs));
@@ -160,6 +157,8 @@ void pv_hypercall(struct cpu_user_regs *regs)
         return;
     }
 
+    curr->hcall_preempted = false;
+
     if ( !is_pv_32bit_vcpu(curr) )
     {
         unsigned long rdi = regs->rdi;
@@ -191,7 +190,7 @@ void pv_hypercall(struct cpu_user_regs *regs)
         regs->rax = pv_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8, r9);
 
 #ifndef NDEBUG
-        if ( regs->rip == old_rip )
+        if ( !curr->hcall_preempted )
         {
             /* Deliberately corrupt parameter regs used by this hypercall. */
             switch ( hypercall_args_table[eax].native )
@@ -238,7 +237,7 @@ void pv_hypercall(struct cpu_user_regs *regs)
         regs->_eax = pv_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi, 
ebp);
 
 #ifndef NDEBUG
-        if ( regs->rip == old_rip )
+        if ( !curr->hcall_preempted )
         {
             /* Deliberately corrupt parameter regs used by this hypercall. */
             switch ( hypercall_args_table[eax].compat )
@@ -254,6 +253,14 @@ void pv_hypercall(struct cpu_user_regs *regs)
 #endif
     }
 
+    /*
+     * PV guests use SYSCALL or INT $0x82 to make a hypercall, both of which
+     * have trap semantics.  If the hypercall has been preempted, rewind the
+     * instruction pointer to reexecute the instruction.
+     */
+    if ( curr->hcall_preempted )
+        regs->rip -= 2;
+
     perfc_incr(hypercalls);
 }
 
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index e5eeb5f..6d5553d 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -166,7 +166,6 @@ struct hvm_vcpu {
     bool                debug_state_latch;
     bool                single_step;
 
-    bool                hcall_preempted;
     bool                hcall_64bit;
 
     struct hvm_vcpu_asid n1asid;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 32893de..5b62238 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -202,6 +202,8 @@ struct vcpu
     bool             paused_for_shutdown;
     /* VCPU need affinity restored */
     bool             affinity_broken;
+    /* A hypercall has been preempted. */
+    bool             hcall_preempted;
 
 
     /*
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.