[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] mem_event: Allow memory access listener to perform single step execution


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Aravindh Puthiyaparambil <aravindh@xxxxxxxxxxxx>
  • Date: Thu, 26 May 2011 20:39:05 -0700
  • Delivery-date: Thu, 26 May 2011 22:29:41 -0700
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=virtuata.com; s=google; h=content-type:mime-version:content-transfer-encoding:subject :x-mercurial-node:message-id:user-agent:date:from:to; b=SsVO4i+1tcfyJvIh+Lot5ddCpfuLKeYVGx55W3Lk4I0v1XvWIsUXuXsvPwyhDmqvfy kHBg8Zmg6ekc1Uh7J+9iIbc5viQfOp/EZkGVYZ2a8v2ylK0C9gXEt7sUqR5b9osxzaW7 OOS0wW42/hgFrX6W0pNRuaYXJ5yegP5F3s5yQ=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

# HG changeset patch
# User Aravindh Puthiyaparambil <aravindh@xxxxxxxxxxxx>
# Date 1306467255 25200
# Node ID 193b5d38ae4b5f4e6c18c68328d67d5cf0d76bef
# Parent  37c77bacb52aa7795978b994f9d371b979b2cb07
mem_event: Allow memory access listener to perform single step execution.
Add a new memory event that handles single step. This allows the memory access 
listener to handle instructions that modify data within the execution page.
This can be enabled in the listener by doing:
xc_set_hvm_param(xch, domain_id, HVM_PARAM_MEMORY_EVENT_SINGLE_STEP, 
HVMPME_mode_sync)

Now the listener can start single stepping by:
xc_domain_debug_control(xch, domain_id, XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON, 
vcpu_id)

And stop single stepping by:
xc_domain_debug_control(xch, domain_id, XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF, 
vcpu_id)

Signed-off-by: Aravindh Puthiyaparambil <aravindh@xxxxxxxxxxxx>

diff -r 37c77bacb52a -r 193b5d38ae4b xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Mon May 23 17:38:28 2011 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Thu May 26 20:34:15 2011 -0700
@@ -3489,7 +3489,8 @@
                     rc = -EPERM;
                 break;
             case HVM_PARAM_MEMORY_EVENT_INT3:
-                if ( d == current->domain ) 
+            case HVM_PARAM_MEMORY_EVENT_SINGLE_STEP:
+                if ( d == current->domain )
                 {
                     rc = -EPERM;
                     break;
@@ -3522,6 +3523,7 @@
                 switch( a.index )
                 {
                 case HVM_PARAM_MEMORY_EVENT_INT3:
+                case HVM_PARAM_MEMORY_EVENT_SINGLE_STEP:
                 {
                     domain_pause(d);
                     domain_unpause(d); /* Causes guest to latch new status */
@@ -4040,11 +4042,21 @@
             rc = -ENOSYS;
             if ( !cpu_has_monitor_trap_flag )
                 break;
-            rc = 0;
-            vcpu_pause(v);
+
+            rc = mem_event_check_ring(v->domain);
+            /* rc ==0 p2m_mem_access_check() has already paused the vcpu */
+            if ( rc < 0 )
+                vcpu_pause(v);
+
             v->arch.hvm_vcpu.single_step =
                 (op == XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON);
-            vcpu_unpause(v); /* guest will latch new state */
+
+            /* rc ==0 p2m_mem_access_resume() will unpause the vcpu */
+            if ( rc < 0 )
+            {
+                vcpu_unpause(v); /* guest will latch new state */
+                rc = 0;
+            }
             break;
         default:
             rc = -ENOSYS;
@@ -4133,6 +4145,18 @@
                                   MEM_EVENT_REASON_INT3,
                                   gfn, 0, 1, gla);
 }
+
+int hvm_memory_event_single_step(unsigned long gla)
+{
+    uint32_t pfec = PFEC_page_present;
+    unsigned long gfn;
+    gfn = paging_gva_to_gfn(current, gla, &pfec);
+
+    return hvm_memory_event_traps(current->domain->arch.hvm_domain
+            .params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP],
+            MEM_EVENT_REASON_SINGLESTEP,
+            gfn, 0, 1, gla);
+}
 #endif /* __x86_64__ */
 
 int nhvm_vcpu_hostrestore(struct vcpu *v, struct cpu_user_regs *regs)
diff -r 37c77bacb52a -r 193b5d38ae4b xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Mon May 23 17:38:28 2011 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Thu May 26 20:34:15 2011 -0700
@@ -1081,8 +1081,9 @@
         hvm_asid_flush_vcpu(v);
     }
 
-    debug_state = v->domain->debugger_attached 
-                  || 
v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3];
+    debug_state = v->domain->debugger_attached
+                  || 
v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_INT3]
+                  || 
v->domain->arch.hvm_domain.params[HVM_PARAM_MEMORY_EVENT_SINGLE_STEP];
 
     if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
     {
diff -r 37c77bacb52a -r 193b5d38ae4b xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Mon May 23 17:38:28 2011 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu May 26 20:34:15 2011 -0700
@@ -2494,8 +2494,12 @@
     case EXIT_REASON_MONITOR_TRAP_FLAG:
         v->arch.hvm_vmx.exec_control &= ~CPU_BASED_MONITOR_TRAP_FLAG;
         vmx_update_cpu_exec_control(v);
-        if ( v->domain->debugger_attached && v->arch.hvm_vcpu.single_step )
-            domain_pause_for_debugger();
+        if ( v->arch.hvm_vcpu.single_step ) {
+          hvm_memory_event_single_step(regs->eip);
+          if ( v->domain->debugger_attached )
+              domain_pause_for_debugger();
+        }
+
         break;
 
     case EXIT_REASON_PAUSE_INSTRUCTION:
diff -r 37c77bacb52a -r 193b5d38ae4b xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Mon May 23 17:38:28 2011 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Thu May 26 20:34:15 2011 -0700
@@ -405,6 +405,10 @@
 void hvm_memory_event_cr4(unsigned long value, unsigned long old);
 /* Called for current VCPU on int3: returns -1 if no listener */
 int hvm_memory_event_int3(unsigned long gla);
+
+/* Called for current VCPU on single step: returns -1 if no listener */
+int hvm_memory_event_single_step(unsigned long gla);
+
 #else
 static inline void hvm_memory_event_cr0(unsigned long value, unsigned long old)
 { }
@@ -414,6 +418,8 @@
 { }
 static inline int hvm_memory_event_int3(unsigned long gla)
 { return 0; }
+static inline int hvm_memory_event_single_step(unsigned long gla)
+{ return 0; }
 #endif
 
 /*
diff -r 37c77bacb52a -r 193b5d38ae4b xen/include/public/hvm/params.h
--- a/xen/include/public/hvm/params.h   Mon May 23 17:38:28 2011 +0100
+++ b/xen/include/public/hvm/params.h   Thu May 26 20:34:15 2011 -0700
@@ -126,10 +126,11 @@
 
 /* Enable blocking memory events, async or sync (pause vcpu until response) 
  * onchangeonly indicates messages only on a change of value */
-#define HVM_PARAM_MEMORY_EVENT_CR0   20
-#define HVM_PARAM_MEMORY_EVENT_CR3   21
-#define HVM_PARAM_MEMORY_EVENT_CR4   22
-#define HVM_PARAM_MEMORY_EVENT_INT3  23
+#define HVM_PARAM_MEMORY_EVENT_CR0          20
+#define HVM_PARAM_MEMORY_EVENT_CR3          21
+#define HVM_PARAM_MEMORY_EVENT_CR4          22
+#define HVM_PARAM_MEMORY_EVENT_INT3         23
+#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP  25
 
 #define HVMPME_MODE_MASK       (3 << 0)
 #define HVMPME_mode_disabled   0
@@ -140,6 +141,6 @@
 /* Boolean: Enable nestedhvm (hvm only) */
 #define HVM_PARAM_NESTEDHVM    24
 
-#define HVM_NR_PARAMS          25
+#define HVM_NR_PARAMS          26
 
 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
diff -r 37c77bacb52a -r 193b5d38ae4b xen/include/public/mem_event.h
--- a/xen/include/public/mem_event.h    Mon May 23 17:38:28 2011 +0100
+++ b/xen/include/public/mem_event.h    Thu May 26 20:34:15 2011 -0700
@@ -46,6 +46,7 @@
 #define MEM_EVENT_REASON_CR3         3    /* CR3 was hit: gfn is CR3 value */
 #define MEM_EVENT_REASON_CR4         4    /* CR4 was hit: gfn is CR4 value */
 #define MEM_EVENT_REASON_INT3        5    /* int3 was hit: gla/gfn are RIP */
+#define MEM_EVENT_REASON_SINGLESTEP  6    /* single step was invoked: gla/gfn 
are RIP */
 
 typedef struct mem_event_shared_page {
     uint32_t port;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.