WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86 hvm: Make a couple of hypercall state

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86 hvm: Make a couple of hypercall state flags per-vcpu
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 18 Nov 2010 11:00:38 -0800
Delivery-date: Thu, 18 Nov 2010 11:01:38 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1289911355 0
# Node ID 07bbfe6dd27d51f2ace6dc1e127f899eee672ba6
# Parent  379123d8dbab42351f9ebf41bcb0a21ef8440c24
x86 hvm: Make a couple of hypercall state flags per-vcpu

This is a prerequisite for allowing guest descheduling within a
hypercall.

Signed-off-by: Keir Fraser <keir@xxxxxxx>
---
 xen/arch/x86/domain.c                  |   12 ++++--------
 xen/arch/x86/hvm/hvm.c                 |   26 ++++++++++++--------------
 xen/include/asm-x86/hvm/guest_access.h |    3 ---
 xen/include/asm-x86/hvm/vcpu.h         |    3 +++
 xen/include/asm-x86/hypercall.h        |    9 ---------
 5 files changed, 19 insertions(+), 34 deletions(-)

diff -r 379123d8dbab -r 07bbfe6dd27d xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Tue Nov 16 11:44:09 2010 +0000
+++ b/xen/arch/x86/domain.c     Tue Nov 16 12:42:35 2010 +0000
@@ -1553,8 +1553,6 @@ void sync_vcpu_execstate(struct vcpu *v)
     __arg;                                                                  \
 })
 
-DEFINE_PER_CPU(char, hc_preempted);
-
 unsigned long hypercall_create_continuation(
     unsigned int op, const char *format, ...)
 {
@@ -1583,12 +1581,12 @@ unsigned long hypercall_create_continuat
     {
         regs       = guest_cpu_user_regs();
         regs->eax  = op;
-        /*
-         * For PV guest, we update EIP to re-execute 'syscall' / 'int 0x82';
-         * HVM does not need this since 'vmcall' / 'vmmcall' is fault-like.
-         */
+
+        /* Ensure the hypercall trap instruction is re-executed. */
         if ( !is_hvm_vcpu(current) )
             regs->eip -= 2;  /* re-execute 'syscall' / 'int 0x82' */
+        else
+            current->arch.hvm_vcpu.hcall_preempted = 1;
 
 #ifdef __x86_64__
         if ( !is_hvm_vcpu(current) ?
@@ -1629,8 +1627,6 @@ unsigned long hypercall_create_continuat
                 }
             }
         }
-
-        this_cpu(hc_preempted) = 1;
     }
 
     va_end(args);
diff -r 379123d8dbab -r 07bbfe6dd27d xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Tue Nov 16 11:44:09 2010 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Tue Nov 16 12:42:35 2010 +0000
@@ -2028,16 +2028,13 @@ enum hvm_copy_result hvm_fetch_from_gues
                       PFEC_page_present | pfec);
 }
 
+unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
+{
+    int rc;
+
 #ifdef __x86_64__
-DEFINE_PER_CPU(bool_t, hvm_64bit_hcall);
-#endif
-
-unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
-{
-    int rc;
-
-#ifdef __x86_64__
-    if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(to, len) )
+    if ( !current->arch.hvm_vcpu.hcall_64bit &&
+         is_compat_arg_xlat_range(to, len) )
     {
         memcpy(to, from, len);
         return 0;
@@ -2054,7 +2051,8 @@ unsigned long copy_from_user_hvm(void *t
     int rc;
 
 #ifdef __x86_64__
-    if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(from, len) )
+    if ( !current->arch.hvm_vcpu.hcall_64bit &&
+         is_compat_arg_xlat_range(from, len) )
     {
         memcpy(to, from, len);
         return 0;
@@ -2567,7 +2565,7 @@ int hvm_do_hypercall(struct cpu_user_reg
         return HVM_HCALL_completed;
     }
 
-    this_cpu(hc_preempted) = 0;
+    curr->arch.hvm_vcpu.hcall_preempted = 0;
 
 #ifdef __x86_64__
     if ( mode == 8 )
@@ -2575,13 +2573,13 @@ int hvm_do_hypercall(struct cpu_user_reg
         HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%lx, %lx, %lx, %lx, %lx)", eax,
                     regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8);
 
-        this_cpu(hvm_64bit_hcall) = 1;
+        curr->arch.hvm_vcpu.hcall_64bit = 1;
         regs->rax = hvm_hypercall64_table[eax](regs->rdi,
                                                regs->rsi,
                                                regs->rdx,
                                                regs->r10,
                                                regs->r8); 
-        this_cpu(hvm_64bit_hcall) = 0;
+        curr->arch.hvm_vcpu.hcall_64bit = 0;
     }
     else
 #endif
@@ -2601,7 +2599,7 @@ int hvm_do_hypercall(struct cpu_user_reg
     HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u -> %lx",
                 eax, (unsigned long)regs->eax);
 
-    if ( this_cpu(hc_preempted) )
+    if ( curr->arch.hvm_vcpu.hcall_preempted )
         return HVM_HCALL_preempted;
 
     if ( unlikely(curr->domain->arch.hvm_domain.qemu_mapcache_invalidate) &&
diff -r 379123d8dbab -r 07bbfe6dd27d xen/include/asm-x86/hvm/guest_access.h
--- a/xen/include/asm-x86/hvm/guest_access.h    Tue Nov 16 11:44:09 2010 +0000
+++ b/xen/include/asm-x86/hvm/guest_access.h    Tue Nov 16 12:42:35 2010 +0000
@@ -1,8 +1,5 @@
 #ifndef __ASM_X86_HVM_GUEST_ACCESS_H__
 #define __ASM_X86_HVM_GUEST_ACCESS_H__
-
-#include <xen/percpu.h>
-DECLARE_PER_CPU(bool_t, hvm_64bit_hcall);
 
 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len);
 unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len);
diff -r 379123d8dbab -r 07bbfe6dd27d xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h    Tue Nov 16 11:44:09 2010 +0000
+++ b/xen/include/asm-x86/hvm/vcpu.h    Tue Nov 16 12:42:35 2010 +0000
@@ -63,6 +63,9 @@ struct hvm_vcpu {
     bool_t              debug_state_latch;
     bool_t              single_step;
 
+    bool_t              hcall_preempted;
+    bool_t              hcall_64bit;
+
     u64                 asid_generation;
     u32                 asid;
 
diff -r 379123d8dbab -r 07bbfe6dd27d xen/include/asm-x86/hypercall.h
--- a/xen/include/asm-x86/hypercall.h   Tue Nov 16 11:44:09 2010 +0000
+++ b/xen/include/asm-x86/hypercall.h   Tue Nov 16 12:42:35 2010 +0000
@@ -16,15 +16,6 @@
  * invocation of do_mmu_update() is resuming a previously preempted call.
  */
 #define MMU_UPDATE_PREEMPTED          (~(~0U>>1))
-
-/*
- * This gets set to a non-zero value whenever hypercall_create_continuation()
- * is used (outside of multicall context; in multicall context the second call
- * from do_multicall() itself will have this effect). Internal callers of
- * hypercall handlers interested in this condition must clear the flag prior
- * to invoking the respective handler(s).
- */
-DECLARE_PER_CPU(char, hc_preempted);
 
 extern long
 do_event_channel_op_compat(

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86 hvm: Make a couple of hypercall state flags per-vcpu, Xen patchbot-unstable <=