WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] mem_access: mem event additions for acces

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] mem_access: mem event additions for access
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Mon, 17 Jan 2011 07:58:50 -0800
Delivery-date: Mon, 17 Jan 2011 08:05:35 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Joe Epstein <jepstein98@xxxxxxxxx>
# Date 1294401280 0
# Node ID 02efc054da7bf67540db5ae4f9e7b0369685d72c
# Parent  f14b296d263f95f101589bc12844e035139dbfa3
mem_access: mem event additions for access

* Adds an ACCESS memory event type, with RESUME as the action.

* Refactors the bits in the memory event to store whether the memory event
  was a read, write, or execute (for access memory events only).  I used
  bits sparingly to keep the structure somewhat the same size.

* Modified VMX to report the needed information in its nested page fault.
  SVM is not implemented in this patch series.

Signed-off-by: Joe Epstein <jepstein98@xxxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
Acked-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---
 tools/xenpaging/xenpaging.c      |    2 
 xen/arch/x86/hvm/hvm.c           |   58 +++++++++++++++++++++++-
 xen/arch/x86/hvm/svm/svm.c       |    2 
 xen/arch/x86/hvm/vmx/vmx.c       |    9 +++
 xen/arch/x86/mm/Makefile         |    1 
 xen/arch/x86/mm/mem_access.c     |   59 +++++++++++++++++++++++++
 xen/arch/x86/mm/mem_event.c      |   40 +++++++++++++----
 xen/arch/x86/mm/p2m.c            |   91 +++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/hvm/hvm.h    |    7 ++-
 xen/include/asm-x86/mem_access.h |   35 +++++++++++++++
 xen/include/asm-x86/mem_event.h  |    2 
 xen/include/asm-x86/p2m.h        |    7 +++
 xen/include/public/domctl.h      |   15 ++++++
 xen/include/public/mem_event.h   |   26 ++++++++++-
 14 files changed, 335 insertions(+), 19 deletions(-)

diff -r f14b296d263f -r 02efc054da7b tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c       Fri Jan 07 11:54:36 2011 +0000
+++ b/tools/xenpaging/xenpaging.c       Fri Jan 07 11:54:40 2011 +0000
@@ -658,7 +658,7 @@ int main(int argc, char *argv[])
             {
                 DPRINTF("page already populated (domain = %d; vcpu = %d;"
                         " p2mt = %x;"
-                        " gfn = %"PRIx64"; paused = %"PRId64")\n",
+                        " gfn = %"PRIx64"; paused = %d)\n",
                         paging->mem_event.domain_id, req.vcpu_id,
                         req.p2mt,
                         req.gfn, req.flags & MEM_EVENT_FLAG_VCPU_PAUSED);
diff -r f14b296d263f -r 02efc054da7b xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Jan 07 11:54:36 2011 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Fri Jan 07 11:54:40 2011 +0000
@@ -61,6 +61,8 @@
 #include <public/hvm/ioreq.h>
 #include <public/version.h>
 #include <public/memory.h>
+#include <asm/mem_event.h>
+#include <public/mem_event.h>
 
 bool_t __read_mostly hvm_enabled;
 
@@ -1086,14 +1088,64 @@ void hvm_triple_fault(void)
     domain_shutdown(v->domain, SHUTDOWN_reboot);
 }
 
-bool_t hvm_hap_nested_page_fault(unsigned long gfn)
-{
+bool_t hvm_hap_nested_page_fault(unsigned long gpa,
+                                 bool_t gla_valid,
+                                 unsigned long gla,
+                                 bool_t access_valid,
+                                 bool_t access_r,
+                                 bool_t access_w,
+                                 bool_t access_x)
+{
+    unsigned long gfn = gpa >> PAGE_SHIFT;
     p2m_type_t p2mt;
+    p2m_access_t p2ma;
     mfn_t mfn;
     struct vcpu *v = current;
     struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
 
-    mfn = gfn_to_mfn_guest(p2m, gfn, &p2mt);
+    mfn = gfn_to_mfn_type_current(p2m, gfn, &p2mt, &p2ma, p2m_guest);
+
+    /* Check access permissions first, then handle faults */
+    if ( access_valid && (mfn_x(mfn) != INVALID_MFN) )
+    {
+        int violation = 0;
+        /* If the access is against the permissions, then send to mem_event */
+        switch (p2ma) 
+        {
+        case p2m_access_n:
+        default:
+            violation = access_r || access_w || access_x;
+            break;
+        case p2m_access_r:
+            violation = access_w || access_x;
+            break;
+        case p2m_access_w:
+            violation = access_r || access_x;
+            break;
+        case p2m_access_x:
+            violation = access_r || access_w;
+            break;
+        case p2m_access_rx:
+        case p2m_access_rx2rw:
+            violation = access_w;
+            break;
+        case p2m_access_wx:
+            violation = access_r;
+            break;
+        case p2m_access_rw:
+            violation = access_x;
+            break;
+        case p2m_access_rwx:
+            break;
+        }
+
+        if ( violation )
+        {
+            p2m_mem_access_check(gpa, gla_valid, gla, access_r, access_w, 
access_x);
+
+            return 1;
+        }
+    }
 
     /*
      * If this GFN is emulated MMIO or marked as read-only, pass the fault
diff -r f14b296d263f -r 02efc054da7b xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Fri Jan 07 11:54:36 2011 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Fri Jan 07 11:54:40 2011 +0000
@@ -979,7 +979,7 @@ static void svm_do_nested_pgfault(paddr_
         __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     }
 
-    if ( hvm_hap_nested_page_fault(gfn) )
+    if ( hvm_hap_nested_page_fault(gpa, 0, ~0ull, 0, 0, 0, 0) )
         return;
 
     /* Everything else is an error. */
diff -r f14b296d263f -r 02efc054da7b xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Fri Jan 07 11:54:36 2011 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Fri Jan 07 11:54:40 2011 +0000
@@ -2079,7 +2079,14 @@ static void ept_handle_violation(unsigne
         __trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
     }
 
-    if ( hvm_hap_nested_page_fault(gfn) )
+    if ( hvm_hap_nested_page_fault(gpa,
+                                   qualification & EPT_GLA_VALID       ? 1 : 0,
+                                   qualification & EPT_GLA_VALID
+                                     ? __vmread(GUEST_LINEAR_ADDRESS) : ~0ull,
+                                   1, /* access types are as follows */
+                                   qualification & EPT_READ_VIOLATION  ? 1 : 0,
+                                   qualification & EPT_WRITE_VIOLATION ? 1 : 0,
+                                   qualification & EPT_EXEC_VIOLATION  ? 1 : 
0) )
         return;
 
     /* Everything else is an error. */
diff -r f14b296d263f -r 02efc054da7b xen/arch/x86/mm/Makefile
--- a/xen/arch/x86/mm/Makefile  Fri Jan 07 11:54:36 2011 +0000
+++ b/xen/arch/x86/mm/Makefile  Fri Jan 07 11:54:40 2011 +0000
@@ -9,6 +9,7 @@ obj-$(x86_64) += mem_event.o
 obj-$(x86_64) += mem_event.o
 obj-$(x86_64) += mem_paging.o
 obj-$(x86_64) += mem_sharing.o
+obj-$(x86_64) += mem_access.o
 
 guest_walk_%.o: guest_walk.c Makefile
        $(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
diff -r f14b296d263f -r 02efc054da7b xen/arch/x86/mm/mem_access.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/x86/mm/mem_access.c      Fri Jan 07 11:54:40 2011 +0000
@@ -0,0 +1,59 @@
+/******************************************************************************
+ * arch/x86/mm/mem_access.c
+ *
+ * Memory access support.
+ *
+ * Copyright (c) 2011 Virtuata, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+#include <asm/p2m.h>
+#include <asm/mem_event.h>
+
+
+int mem_access_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
+                      XEN_GUEST_HANDLE(void) u_domctl)
+{
+    int rc;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+    switch( mec->op )
+    {
+    case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME:
+    {
+        p2m_mem_access_resume(p2m);
+        rc = 0;
+    }
+    break;
+
+    default:
+        rc = -ENOSYS;
+        break;
+    }
+
+    return rc;
+}
+
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r f14b296d263f -r 02efc054da7b xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c       Fri Jan 07 11:54:36 2011 +0000
+++ b/xen/arch/x86/mm/mem_event.c       Fri Jan 07 11:54:40 2011 +0000
@@ -26,6 +26,7 @@
 #include <asm/p2m.h>
 #include <asm/mem_event.h>
 #include <asm/mem_paging.h>
+#include <asm/mem_access.h>
 
 /* for public/io/ring.h macros */
 #define xen_mb()   mb()
@@ -66,6 +67,9 @@ static int mem_event_enable(struct domai
                     PAGE_SIZE);
 
     mem_event_ring_lock_init(d);
+
+    /* Wake any VCPUs paused for memory events */
+    mem_event_unpause_vcpus(d);
 
     return 0;
 
@@ -143,12 +147,21 @@ void mem_event_unpause_vcpus(struct doma
             vcpu_wake(v);
 }
 
+void mem_event_mark_and_pause(struct vcpu *v)
+{
+    set_bit(_VPF_mem_event, &v->pause_flags);
+    vcpu_sleep_nosync(v);
+}
+
 int mem_event_check_ring(struct domain *d)
 {
     struct vcpu *curr = current;
     int free_requests;
     int ring_full;
 
+    if ( !d->mem_event.ring_page )
+        return -1;
+
     mem_event_ring_lock(d);
 
     free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
@@ -157,7 +170,7 @@ int mem_event_check_ring(struct domain *
         gdprintk(XENLOG_INFO, "free request slots: %d\n", free_requests);
         WARN_ON(free_requests == 0);
     }
-    ring_full = free_requests < MEM_EVENT_RING_THRESHOLD;
+    ring_full = free_requests < MEM_EVENT_RING_THRESHOLD ? 1 : 0;
 
     if ( (curr->domain->domain_id == d->domain_id) && ring_full )
     {
@@ -203,7 +216,11 @@ int mem_event_domctl(struct domain *d, x
         return rc;
 #endif
 
-    if ( mec->mode == 0 )
+    rc = -ENOSYS;
+
+    switch ( mec-> mode ) 
+    {
+    case 0:
     {
         switch( mec->op )
         {
@@ -268,13 +285,18 @@ int mem_event_domctl(struct domain *d, x
             rc = -ENOSYS;
             break;
         }
-    }
-    else
-    {
-        rc = -ENOSYS;
-
-        if ( mec->mode & XEN_DOMCTL_MEM_EVENT_OP_PAGING )
-            rc = mem_paging_domctl(d, mec, u_domctl);
+        break;
+    }
+    case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
+    {
+        rc = mem_paging_domctl(d, mec, u_domctl);
+        break;
+    }
+    case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: 
+    {
+        rc = mem_access_domctl(d, mec, u_domctl);
+        break;
+    }
     }
 
     return rc;
diff -r f14b296d263f -r 02efc054da7b xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Fri Jan 07 11:54:36 2011 +0000
+++ b/xen/arch/x86/mm/p2m.c     Fri Jan 07 11:54:40 2011 +0000
@@ -2858,6 +2858,97 @@ void p2m_mem_paging_resume(struct p2m_do
 }
 #endif /* __x86_64__ */
 
+void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long 
gla, 
+                          bool_t access_r, bool_t access_w, bool_t access_x)
+{
+    struct vcpu *v = current;
+    mem_event_request_t req;
+    unsigned long gfn = gpa >> PAGE_SHIFT;
+    struct domain *d = v->domain;    
+    struct p2m_domain* p2m = p2m_get_hostp2m(d);
+    int res;
+    mfn_t mfn;
+    p2m_type_t p2mt;
+    p2m_access_t p2ma;
+    
+    /* First, handle rx2rw conversion automatically */
+    p2m_lock(p2m);
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, p2m_query);
+
+    if ( access_w && p2ma == p2m_access_rx2rw ) 
+    {
+        p2m->set_entry(p2m, gfn, mfn, 0, p2mt, p2m_access_rw);
+        p2m_unlock(p2m);
+        return;
+    }
+    p2m_unlock(p2m);
+
+    /* Otherwise, check if there is a memory event listener, and send the 
message along */
+    res = mem_event_check_ring(d);
+    if ( res < 0 ) 
+    {
+        /* No listener */
+        if ( p2m->access_required ) 
+        {
+            printk(XENLOG_INFO 
+                   "Memory access permissions failure, no mem_event listener: 
pausing VCPU %d, dom %d\n",
+                   v->vcpu_id, d->domain_id);
+
+            mem_event_mark_and_pause(v);
+        }
+        else
+        {
+            /* A listener is not required, so clear the access restrictions */
+            p2m_lock(p2m);
+            p2m->set_entry(p2m, gfn, mfn, 0, p2mt, p2m_access_rwx);
+            p2m_unlock(p2m);
+        }
+
+        return;
+    }
+    else if ( res > 0 )
+        return;  /* No space in buffer; VCPU paused */
+
+    memset(&req, 0, sizeof(req));
+    req.type = MEM_EVENT_TYPE_ACCESS;
+    req.reason = MEM_EVENT_REASON_VIOLATION;
+
+    /* Pause the current VCPU unconditionally */
+    vcpu_pause_nosync(v);
+    req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;    
+
+    /* Send request to mem event */
+    req.gfn = gfn;
+    req.offset = gpa & ((1 << PAGE_SHIFT) - 1);
+    req.gla_valid = gla_valid;
+    req.gla = gla;
+    req.access_r = access_r;
+    req.access_w = access_w;
+    req.access_x = access_x;
+    
+    req.vcpu_id = v->vcpu_id;
+
+    mem_event_put_request(d, &req);   
+
+    /* VCPU paused, mem event request sent */
+}
+
+void p2m_mem_access_resume(struct p2m_domain *p2m)
+{
+    struct domain *d = p2m->domain;
+    mem_event_response_t rsp;
+
+    mem_event_get_response(d, &rsp);
+
+    /* Unpause domain */
+    if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED )
+        vcpu_unpause(d->vcpu[rsp.vcpu_id]);
+
+    /* Unpause any domains that were paused because the ring was full or no 
listener 
+     * was available */
+    mem_event_unpause_vcpus(d);
+}
+
 /*
  * Local variables:
  * mode: C
diff -r f14b296d263f -r 02efc054da7b xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Fri Jan 07 11:54:36 2011 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h     Fri Jan 07 11:54:40 2011 +0000
@@ -356,7 +356,12 @@ static inline void hvm_set_info_guest(st
 
 int hvm_debug_op(struct vcpu *v, int32_t op);
 
-bool_t hvm_hap_nested_page_fault(unsigned long gfn);
+bool_t hvm_hap_nested_page_fault(unsigned long gpa,
+                                 bool_t gla_valid, unsigned long gla,
+                                 bool_t access_valid, 
+                                 bool_t access_r,
+                                 bool_t access_w,
+                                 bool_t access_x);
 
 #define hvm_msr_tsc_aux(v) ({                                               \
     struct domain *__d = (v)->domain;                                       \
diff -r f14b296d263f -r 02efc054da7b xen/include/asm-x86/mem_access.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-x86/mem_access.h  Fri Jan 07 11:54:40 2011 +0000
@@ -0,0 +1,35 @@
+/******************************************************************************
+ * include/asm-x86/mem_paging.h
+ *
+ * Memory access support.
+ *
+ * Copyright (c) 2011 Virtuata, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+
+int mem_access_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
+                      XEN_GUEST_HANDLE(void) u_domctl);
+
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r f14b296d263f -r 02efc054da7b xen/include/asm-x86/mem_event.h
--- a/xen/include/asm-x86/mem_event.h   Fri Jan 07 11:54:36 2011 +0000
+++ b/xen/include/asm-x86/mem_event.h   Fri Jan 07 11:54:40 2011 +0000
@@ -24,6 +24,8 @@
 #ifndef __MEM_EVENT_H__
 #define __MEM_EVENT_H__
 
+/* Pauses VCPU while marking pause flag for mem event */
+void mem_event_mark_and_pause(struct vcpu *v);
 int mem_event_check_ring(struct domain *d);
 void mem_event_put_request(struct domain *d, mem_event_request_t *req);
 void mem_event_get_response(struct domain *d, mem_event_response_t *rsp);
diff -r f14b296d263f -r 02efc054da7b xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Fri Jan 07 11:54:36 2011 +0000
+++ b/xen/include/asm-x86/p2m.h Fri Jan 07 11:54:40 2011 +0000
@@ -522,6 +522,13 @@ static inline void p2m_mem_paging_popula
 { }
 #endif
 
+/* Send mem event based on the access (gla is -1ull if not available).  Handles
+ * the rw2rx conversion */
+void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long 
gla, 
+                          bool_t access_r, bool_t access_w, bool_t access_x);
+/* Resumes the running of the VCPU, restarting the last instruction */
+void p2m_mem_access_resume(struct p2m_domain *p2m);
+
 struct page_info *p2m_alloc_ptp(struct p2m_domain *p2m, unsigned long type);
 
 #endif /* _XEN_P2M_H */
diff -r f14b296d263f -r 02efc054da7b xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Fri Jan 07 11:54:36 2011 +0000
+++ b/xen/include/public/domctl.h       Fri Jan 07 11:54:40 2011 +0000
@@ -714,13 +714,26 @@ struct xen_domctl_gdbsx_domstatus {
 /*
  * Page memory in and out. 
  */
-#define XEN_DOMCTL_MEM_EVENT_OP_PAGING (1 << 0)
+#define XEN_DOMCTL_MEM_EVENT_OP_PAGING            1
 
 /* Domain memory paging */
 #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_NOMINATE   0
 #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_EVICT      1
 #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_PREP       2
 #define XEN_DOMCTL_MEM_EVENT_OP_PAGING_RESUME     3
+
+/*
+ * Access permissions.
+ *
+ * There are HVM hypercalls to set the per-page access permissions of every
+ * page in a domain.  When one of these permissions--independent, read, 
+ * write, and execute--is violated, the VCPU is paused and a memory event 
+ * is sent with what happened.  (See public/mem_event.h)  The memory event 
+ * handler can then resume the VCPU and redo the access with an 
+ * ACCESS_RESUME mode for the following domctl.
+ */
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS            2
+#define XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME     0 
 
 struct xen_domctl_mem_event_op {
     uint32_t       op;           /* XEN_DOMCTL_MEM_EVENT_OP_* */
diff -r f14b296d263f -r 02efc054da7b xen/include/public/mem_event.h
--- a/xen/include/public/mem_event.h    Fri Jan 07 11:54:36 2011 +0000
+++ b/xen/include/public/mem_event.h    Fri Jan 07 11:54:40 2011 +0000
@@ -26,18 +26,40 @@
 #include "xen.h"
 #include "io/ring.h"
 
+/* Memory event type */
+#define MEM_EVENT_TYPE_SHARED   0
+#define MEM_EVENT_TYPE_PAGING   1
+#define MEM_EVENT_TYPE_ACCESS   2
+
 /* Memory event flags */
 #define MEM_EVENT_FLAG_VCPU_PAUSED  (1 << 0)
+
+/* Reasons for the memory event request */
+#define MEM_EVENT_REASON_UNKNOWN     0    /* typical reason */
+#define MEM_EVENT_REASON_VIOLATION   1    /* access violation, GFN is address 
*/
 
 typedef struct mem_event_shared_page {
     uint32_t port;
 } mem_event_shared_page_t;
 
 typedef struct mem_event_st {
+    uint16_t type;
+    uint16_t flags;
+    uint32_t vcpu_id;
+
     uint64_t gfn;
+    uint64_t offset;
+    uint64_t gla; /* if gla_valid */
+
     uint32_t p2mt;
-    uint32_t vcpu_id;
-    uint64_t flags;
+
+    uint16_t access_r:1;
+    uint16_t access_w:1;
+    uint16_t access_x:1;
+    uint16_t gla_valid:1;
+    uint16_t available:12;
+
+    uint16_t reason;
 } mem_event_request_t, mem_event_response_t;
 
 DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] mem_access: mem event additions for access, Xen patchbot-unstable <=