[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2 of 2] x86/mm: New mem access type to log access


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
  • Date: Tue, 29 Nov 2011 16:58:25 -0500
  • Cc: andres@xxxxxxxxxxxxxx, keir.xen@xxxxxxxxx, tim@xxxxxxx, JBeulich@xxxxxxxx, adin@xxxxxxxxxxxxxx
  • Delivery-date: Tue, 29 Nov 2011 21:59:11 +0000
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=lagarcavilla.org; h=content-type :mime-version:content-transfer-encoding:subject:message-id :in-reply-to:references:date:from:to:cc; q=dns; s= lagarcavilla.org; b=JPWp+9EpQvJdXzhKu6uC4xXBT8Ar3eRYP8UNAaDWGcYf fNPzsUxFAozyf+uMWG0ldf4Xx5m/8wjfXP4fnJ1bZv/o2mQ1qiTlmZZqAZ6RnPsy 4NObhWCiOHvjn6TXgECUcGKvAkozrneAkijkP/BV0ABy99ZkSORqK/fd1hfppdQ=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

 xen/arch/x86/hvm/hvm.c          |   1 +
 xen/arch/x86/mm/p2m-ept.c       |   1 +
 xen/arch/x86/mm/p2m.c           |  30 +++++++++++++++++++++---------
 xen/include/asm-x86/p2m.h       |   3 +++
 xen/include/public/hvm/hvm_op.h |   3 +++
 5 files changed, 29 insertions(+), 9 deletions(-)


This patch adds a new p2m access type, n2rwx. It allows for implement a "log
access" mode in the hypervisor, aking to log dirty but for all types of
accesses. Faults caused by this access mode automatically promote the
access rights of the ofending p2m entry, place the event in the ring, and
let the vcpu keep on executing.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Signed-off-by: Adin Scannell <adin@xxxxxxxxxxx>

diff -r d6354df726a0 -r 52d6aede6206 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1250,6 +1250,7 @@ int hvm_hap_nested_page_fault(unsigned l
         switch (p2ma) 
         {
         case p2m_access_n:
+        case p2m_access_n2rwx:
         default:
             violation = access_r || access_w || access_x;
             break;
diff -r d6354df726a0 -r 52d6aede6206 xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -111,6 +111,7 @@ static void ept_p2m_type_to_flags(ept_en
     switch (access) 
     {
         case p2m_access_n:
+        case p2m_access_n2rwx:
             entry->r = entry->w = entry->x = 0;
             break;
         case p2m_access_r:
diff -r d6354df726a0 -r 52d6aede6206 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1148,6 +1148,11 @@ int p2m_mem_access_check(unsigned long g
         p2m_unlock(p2m);
         return 1;
     }
+    else if ( p2ma == p2m_access_n2rwx )
+    {
+        ASSERT(access_w || access_r || access_x);
+        p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
+    }
     p2m_unlock(p2m);
 
     /* Otherwise, check if there is a memory event listener, and send the 
message along */
@@ -1162,10 +1167,13 @@ int p2m_mem_access_check(unsigned long g
         }
         else
         {
-            /* A listener is not required, so clear the access restrictions */
-            p2m_lock(p2m);
-            p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
-            p2m_unlock(p2m);
+            if ( p2ma != p2m_access_n2rwx )
+            {
+                /* A listener is not required, so clear the access 
restrictions */
+                p2m_lock(p2m);
+                p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, 
p2m_access_rwx);
+                p2m_unlock(p2m);
+            }
             return 1;
         }
 
@@ -1176,9 +1184,12 @@ int p2m_mem_access_check(unsigned long g
     req.type = MEM_EVENT_TYPE_ACCESS;
     req.reason = MEM_EVENT_REASON_VIOLATION;
 
-    /* Pause the current VCPU unconditionally */
-    vcpu_pause_nosync(v);
-    req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;    
+    /* Pause the current VCPU */
+    if ( p2ma != p2m_access_n2rwx )
+    {
+        vcpu_pause_nosync(v);
+        req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+    } 
 
     /* Send request to mem event */
     req.gfn = gfn;
@@ -1192,8 +1203,8 @@ int p2m_mem_access_check(unsigned long g
     req.vcpu_id = v->vcpu_id;
 
     (void)mem_event_put_request(d, &d->mem_access, &req);
-    /* VCPU paused */
-    return 0;
+    /* VCPU may be paused, return whether we promoted automatically */
+    return (p2ma == p2m_access_n2rwx);
 }
 
 void p2m_mem_access_resume(struct domain *d)
@@ -1237,6 +1248,7 @@ int p2m_set_mem_access(struct domain *d,
         p2m_access_wx,
         p2m_access_rwx,
         p2m_access_rx2rw,
+        p2m_access_n2rwx,
         p2m->default_access,
     };
 
diff -r d6354df726a0 -r 52d6aede6206 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -108,6 +108,9 @@ typedef enum {
     p2m_access_wx    = 6, 
     p2m_access_rwx   = 7,
     p2m_access_rx2rw = 8, /* Special: page goes from RX to RW on write */
+    p2m_access_n2rwx = 9, /* Special: page goes from N to RWX on access, *
+                           * generates an event but does not pause the
+                           * vcpu */
 
     /* NOTE: Assumed to be only 4 bits right now */
 } p2m_access_t;
diff -r d6354df726a0 -r 52d6aede6206 xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -174,6 +174,9 @@ typedef enum {
     HVMMEM_access_rwx,
     HVMMEM_access_rx2rw,       /* Page starts off as r-x, but automatically
                                 * change to r-w on a write */
+    HVMMEM_access_n2rwx,       /* Log access: starts off as n, automatically 
+                                * goes to rwx, generating an event without
+                                * pausing the vcpu */
     HVMMEM_access_default      /* Take the domain default */
 } hvmmem_access_t;
 /* Notify that a region of memory is to have specific access types */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.