WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] x86: extend runstate area updates

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] x86: extend runstate area updates
From: "Jan Beulich" <JBeulich@xxxxxxxxxx>
Date: Tue, 18 Aug 2009 13:48:43 +0100
Delivery-date: Tue, 18 Aug 2009 05:49:10 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
In order to give guests a hint at whether their vCPU-s are currently
scheduled (so they can e.g. adapt their behavior in spin loops), update
the run state area (if registered) also when de-scheduling a vCPU.

Also fix an oversight in the compat mode implementation of
VCPUOP_register_runstate_memory_area.

Please also consider for the 3.4 and 3.3 branches.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- 2009-08-18.orig/xen/arch/x86/domain.c       2009-08-17 11:37:44.000000000 
+0200
+++ 2009-08-18/xen/arch/x86/domain.c    2009-08-18 14:18:08.000000000 +0200
@@ -1265,6 +1265,26 @@ static void paravirt_ctxt_switch_to(stru
     }
 }
 
+/* Update per-VCPU guest runstate shared memory area (if registered). */
+static void update_runstate_area(struct vcpu *v)
+{
+    if ( guest_handle_is_null(runstate_guest(v)) )
+        return;
+
+#ifdef CONFIG_COMPAT
+    if ( is_pv_32on64_domain(v->domain) )
+    {
+        struct compat_vcpu_runstate_info info;
+
+        XLAT_vcpu_runstate_info(&info, &v->runstate);
+        __copy_to_guest(v->runstate_guest.compat, &info, 1);
+        return;
+    }
+#endif
+
+    __copy_to_guest(runstate_guest(v), &v->runstate, 1);
+}
+
 static inline int need_full_gdt(struct vcpu *v)
 {
     return (!is_hvm_vcpu(v) && !is_idle_vcpu(v));
@@ -1356,6 +1376,9 @@ void context_switch(struct vcpu *prev, s
         flush_tlb_mask(&dirty_mask);
     }
 
+    if (prev != next)
+        update_runstate_area(prev);
+
     if ( is_hvm_vcpu(prev) && !list_empty(&prev->arch.hvm_vcpu.tm_list) )
         pt_save_timer(prev);
 
@@ -1395,21 +1418,8 @@ void context_switch(struct vcpu *prev, s
 
     context_saved(prev);
 
-    /* Update per-VCPU guest runstate shared memory area (if registered). */
-    if ( !guest_handle_is_null(runstate_guest(next)) )
-    {
-        if ( !is_pv_32on64_domain(next->domain) )
-            __copy_to_guest(runstate_guest(next), &next->runstate, 1);
-#ifdef CONFIG_COMPAT
-        else
-        {
-            struct compat_vcpu_runstate_info info;
-
-            XLAT_vcpu_runstate_info(&info, &next->runstate);
-            __copy_to_guest(next->runstate_guest.compat, &info, 1);
-        }
-#endif
-    }
+    if (prev != next)
+        update_runstate_area(next);
 
     schedule_tail(next);
     BUG();
--- 2009-08-18.orig/xen/arch/x86/x86_64/domain.c        2008-05-13 
11:02:22.000000000 +0200
+++ 2009-08-18/xen/arch/x86/x86_64/domain.c     2009-08-18 14:18:08.000000000 
+0200
@@ -56,7 +56,7 @@ arch_compat_vcpu_op(
             struct vcpu_runstate_info runstate;
 
             vcpu_runstate_get(v, &runstate);
-            XLAT_vcpu_runstate_info(&info, &v->runstate);
+            XLAT_vcpu_runstate_info(&info, &runstate);
         }
         __copy_to_guest(v->runstate_guest.compat, &info, 1);
 




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>