[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/8] x86/vm-event: proper vCPU-paused checks at resume



A VM_EVENT_FLAG_VCPU_PAUSED flag in a vm-event response should only be treated
as informative that the toolstack user wants the vm-event subsystem to unpause
the target vCPU, but not be relied upon to decide if the target vCPU is actually
paused.

That being said, this patch does the following:

* Fixes (replaces) the old behavior in vm_event_resume, which relied on
  VM_EVENT_FLAG_VCPU_PAUSED to determine if the target vCPU is paused, by
  actually checking the vCPU vm-event pause-count.

* ASSERTs that the vCPU is paused in vm_event_set_registers and
  vm_event_toggle_singlestep.

* Ignores VM_EVENT_FLAG_DENY @ vm_event_register_write_resume if the target vCPU
  is not paused. Also adjusts comment in public/vm_event.h to reflect that.

Signed-off-by: Corneliu ZUZU <czuzu@xxxxxxxxxxxxxxx>
---
 xen/arch/x86/vm_event.c       | 10 +++++++++-
 xen/common/vm_event.c         |  6 ++++--
 xen/include/public/vm_event.h |  1 +
 3 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index a9d3861..80f84d6 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -61,9 +61,11 @@ void vm_event_cleanup_domain(struct domain *d)
 
 void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
 {
-    if ( !is_hvm_domain(d) || !atomic_read(&v->vm_event_pause_count) )
+    if ( !is_hvm_domain(d) )
         return;
 
+    ASSERT(atomic_read(&v->vm_event_pause_count));
+
     hvm_toggle_singlestep(v);
 }
 
@@ -75,6 +77,10 @@ void vm_event_register_write_resume(struct vcpu *v, 
vm_event_response_t *rsp)
 
         ASSERT(w);
 
+        /* deny flag requires the vCPU to be paused */
+        if ( !atomic_read(&v->vm_event_pause_count) )
+            return;
+
         switch ( rsp->reason )
         {
         case VM_EVENT_REASON_MOV_TO_MSR:
@@ -100,6 +106,8 @@ void vm_event_register_write_resume(struct vcpu *v, 
vm_event_response_t *rsp)
 
 void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp)
 {
+    ASSERT(atomic_read(&v->vm_event_pause_count));
+
     v->arch.user_regs.eax = rsp->data.regs.x86.rax;
     v->arch.user_regs.ebx = rsp->data.regs.x86.rbx;
     v->arch.user_regs.ecx = rsp->data.regs.x86.rcx;
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index b303180..17d2716 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -417,7 +417,8 @@ void vm_event_resume(struct domain *d, struct 
vm_event_domain *ved)
         if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
             p2m_altp2m_check(v, rsp.altp2m_idx);
 
-        if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
+        /* Check flags which apply only when the vCPU is paused */
+        if ( atomic_read(&v->vm_event_pause_count) )
         {
             if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
                 vm_event_set_registers(v, &rsp);
@@ -425,7 +426,8 @@ void vm_event_resume(struct domain *d, struct 
vm_event_domain *ved)
             if ( rsp.flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP )
                 vm_event_toggle_singlestep(d, v);
 
-            vm_event_vcpu_unpause(v);
+            if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
+                vm_event_vcpu_unpause(v);
         }
     }
 }
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index 9270d52..f888fdd 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -77,6 +77,7 @@
  /*
   * Deny completion of the operation that triggered the event.
   * Currently only useful for MSR, CR0, CR3 and CR4 write events.
+  * Requires the vCPU to be paused already (synchronous events only).
   */
 #define VM_EVENT_FLAG_DENY               (1 << 6)
 /*
-- 
2.5.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.