[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/3] x86/mem_access: Make the mem_access ops generic



This patch does the following:
1. Deprecate the HVMOP_[sg]et_mem_access HVM ops.
2. Move the ops under XENMEM_access_opi.
3. Rename enums and structs to be more generic rather than HVM specific.
4. Remove the enums and structs associated with the HVM ops.

Signed-off-by: Aravindh Puthiyaparambil <aravindp@xxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>

---
Changes from the RFC version of the patch:
1. Removed pointless braces.
2. Change preemption handling to use upper "cmd" bits from
do_memory_op().
3. Delete old interface enum and structs.
4. Remove xenmem_ prefix.
5. Make access uint8_t and place above domid.

Thanks,
Aravindh

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 38c491e..eeaa72e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4589,79 +4589,10 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
     }
 
     case HVMOP_set_mem_access:
-    {
-        struct xen_hvm_set_mem_access a;
-        struct domain *d;
-
-        if ( copy_from_guest(&a, arg, 1) )
-            return -EFAULT;
-
-        rc = rcu_lock_remote_domain_by_id(a.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto param_fail5;
-
-        rc = xsm_hvm_param(XSM_TARGET, d, op);
-        if ( rc )
-            goto param_fail5;
-
-        rc = -EINVAL;
-        if ( (a.first_pfn != ~0ull) &&
-             (a.nr < start_iter ||
-              ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
-              ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d))) )
-            goto param_fail5;
-            
-        rc = p2m_set_mem_access(d, a.first_pfn, a.nr, start_iter,
-                                HVMOP_op_mask, a.hvmmem_access);
-        if ( rc > 0 )
-        {
-            start_iter = rc;
-            rc = -EAGAIN;
-        }
-
-    param_fail5:
-        rcu_unlock_domain(d);
-        break;
-    }
-
     case HVMOP_get_mem_access:
     {
-        struct xen_hvm_get_mem_access a;
-        struct domain *d;
-        hvmmem_access_t access;
-
-        if ( copy_from_guest(&a, arg, 1) )
-            return -EFAULT;
-
-        rc = rcu_lock_remote_domain_by_id(a.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto param_fail6;
-
-        rc = xsm_hvm_param(XSM_TARGET, d, op);
-        if ( rc )
-            goto param_fail6;
-
-        rc = -EINVAL;
-        if ( (a.pfn > domain_get_maximum_gpfn(d)) && a.pfn != ~0ull )
-            goto param_fail6;
-
-        rc = p2m_get_mem_access(d, a.pfn, &access);
-        if ( rc != 0 )
-            goto param_fail6;
-
-        a.hvmmem_access = access;
-        rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
-
-    param_fail6:
-        rcu_unlock_domain(d);
+        gdprintk(XENLOG_DEBUG, "Deprecated HVM op %ld.\n", op);
+        rc = -ENOSYS;
         break;
     }
 
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index fdc5ed3..6f93dc1 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4631,7 +4631,8 @@ int xenmem_add_to_physmap_one(
     return rc;
 }
 
-long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
+long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg,
+                    unsigned long start_extent)
 {
     int rc;
 
@@ -4853,7 +4854,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) 
arg)
     }
 
     default:
-        return subarch_memory_op(op, arg);
+        return subarch_memory_op(op, arg, start_extent);
     }
 
     return 0;
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 50aaf27..8eac2bf 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -21,31 +21,98 @@
  */
 
 
+#include <xen/sched.h>
+#include <xen/guest_access.h>
+#include <xen/hypercall.h>
 #include <asm/p2m.h>
 #include <asm/mem_event.h>
+#include <xsm/xsm.h>
 
 
-int mem_access_memop(struct domain *d, xen_mem_event_op_t *meo)
+#define ACCESS_op_mask  0xff
+
+int mem_access_memop(XEN_GUEST_HANDLE_PARAM(void) arg, unsigned long 
start_iter)
 {
-    int rc;
+    long rc;
+    xen_mem_access_op_t mao;
+    struct domain *d;
+
+    if ( copy_from_guest(&mao, arg, 1) )
+        return -EFAULT;
+
+    rc = rcu_lock_live_remote_domain_by_id(mao.domid, &d);
+    if ( rc )
+        return rc;
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    rc = xsm_mem_event_op(XSM_TARGET, d, XENMEM_access_op);
+    if ( rc )
+        goto out;
 
     if ( unlikely(!d->mem_event->access.ring_page) )
         return -ENODEV;
 
-    switch( meo->op )
+    switch ( mao.op )
     {
     case XENMEM_access_op_resume:
     {
         p2m_mem_access_resume(d);
         rc = 0;
+        break;
+    }
+
+    case XENMEM_access_op_set_access:
+        rc = -EINVAL;
+
+        /*
+         * Undo adjustment done in do_memory_op() so that it matches
+         * p2m_set_mem_access()
+         */
+        start_iter <<= MEMOP_EXTENT_SHIFT;
+        if ( (mao.pfn != ~0ull) &&
+             (mao.nr < start_iter ||
+              ((mao.pfn + mao.nr - 1) < mao.pfn) ||
+              ((mao.pfn + mao.nr - 1) > domain_get_maximum_gpfn(d))) )
+            break;
+
+        rc = p2m_set_mem_access(d, mao.pfn, mao.nr, start_iter,
+                                ACCESS_op_mask, mao.access);
+        if ( rc > 0 )
+        {
+            ASSERT(!(rc & ACCESS_op_mask));
+            rc = hypercall_create_continuation(__HYPERVISOR_memory_op, "lh",
+                                               XENMEM_access_op | rc,
+                                               arg);
+        }
+        break;
+
+    case XENMEM_access_op_get_access:
+    {
+        xenmem_access_t access;
+
+        rc = -EINVAL;
+        if ( (mao.pfn > domain_get_maximum_gpfn(d)) && mao.pfn != ~0ull )
+            break;
+
+        rc = p2m_get_mem_access(d, mao.pfn, &access);
+        if ( rc != 0 )
+            break;
+
+        mao.access = access;
+        rc = __copy_to_guest(arg, &mao, 1) ? -EFAULT : 0;
+
+        break;
     }
-    break;
 
     default:
         rc = -ENOSYS;
         break;
     }
 
+ out:
+    rcu_unlock_domain(d);
     return rc;
 }
 
diff --git a/xen/arch/x86/mm/mem_event.c b/xen/arch/x86/mm/mem_event.c
index d00e404..36b9dba 100644
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -458,9 +458,6 @@ int do_mem_event_op(int op, uint32_t domain, void *arg)
         case XENMEM_paging_op:
             ret = mem_paging_memop(d, (xen_mem_event_op_t *) arg);
             break;
-        case XENMEM_access_op:
-            ret = mem_access_memop(d, (xen_mem_event_op_t *) arg);
-            break;
         case XENMEM_sharing_op:
             ret = mem_sharing_memop(d, (xen_mem_sharing_op_t *) arg);
             break;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index c38f334..3d7c969 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1350,7 +1350,7 @@ void p2m_mem_access_resume(struct domain *d)
 /* Set access type for a region of pfns.
  * If start_pfn == -1ul, sets the default access type */
 long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
-                        uint32_t start, uint32_t mask, hvmmem_access_t access)
+                        uint32_t start, uint32_t mask, xenmem_access_t access)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     p2m_access_t a, _a;
@@ -1359,7 +1359,7 @@ long p2m_set_mem_access(struct domain *d, unsigned long 
pfn, uint32_t nr,
     long rc = 0;
 
     static const p2m_access_t memaccess[] = {
-#define ACCESS(ac) [HVMMEM_access_##ac] = p2m_access_##ac
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
         ACCESS(n),
         ACCESS(r),
         ACCESS(w),
@@ -1378,7 +1378,7 @@ long p2m_set_mem_access(struct domain *d, unsigned long 
pfn, uint32_t nr,
     case 0 ... ARRAY_SIZE(memaccess) - 1:
         a = memaccess[access];
         break;
-    case HVMMEM_access_default:
+    case XENMEM_access_default:
         a = p2m->default_access;
         break;
     default:
@@ -1416,23 +1416,23 @@ long p2m_set_mem_access(struct domain *d, unsigned long 
pfn, uint32_t nr,
 /* Get access type for a pfn
  * If pfn == -1ul, gets the default access type */
 int p2m_get_mem_access(struct domain *d, unsigned long pfn, 
-                       hvmmem_access_t *access)
+                       xenmem_access_t *access)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     p2m_type_t t;
     p2m_access_t a;
     mfn_t mfn;
 
-    static const hvmmem_access_t memaccess[] = {
-        HVMMEM_access_n,
-        HVMMEM_access_r,
-        HVMMEM_access_w,
-        HVMMEM_access_rw,
-        HVMMEM_access_x,
-        HVMMEM_access_rx,
-        HVMMEM_access_wx,
-        HVMMEM_access_rwx,
-        HVMMEM_access_rx2rw
+    static const xenmem_access_t memaccess[] = {
+        XENMEM_access_n,
+        XENMEM_access_r,
+        XENMEM_access_w,
+        XENMEM_access_rw,
+        XENMEM_access_x,
+        XENMEM_access_rx,
+        XENMEM_access_wx,
+        XENMEM_access_rwx,
+        XENMEM_access_rx2rw
     };
 
     /* If request to get default access */
diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c
index 0a8408b..40feedd 100644
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -4,6 +4,7 @@
 #include <compat/xen.h>
 #include <asm/mem_event.h>
 #include <asm/mem_sharing.h>
+#include <asm/mem_access.h>
 
 int compat_set_gdt(XEN_GUEST_HANDLE_PARAM(uint) frame_list, unsigned int 
entries)
 {
@@ -44,7 +45,8 @@ int compat_update_descriptor(u32 pa_lo, u32 pa_hi, u32 
desc_lo, u32 desc_hi)
                                 desc_lo | ((u64)desc_hi << 32));
 }
 
-int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
+int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg,
+                          unsigned long start_extent)
 {
     struct compat_machphys_mfn_list xmml;
     l2_pgentry_t l2e;
@@ -68,7 +70,7 @@ int compat_arch_memory_op(int op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         XLAT_foreign_memory_map(nat, &cmp);
 #undef XLAT_memory_map_HNDL_buffer
 
-        rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
+        rc = arch_memory_op(op, guest_handle_from_ptr(nat, void), 
start_extent);
 
         break;
     }
@@ -87,7 +89,7 @@ int compat_arch_memory_op(int op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         XLAT_memory_map(nat, &cmp);
 #undef XLAT_memory_map_HNDL_buffer
 
-        rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
+        rc = arch_memory_op(op, guest_handle_from_ptr(nat, void), 
start_extent);
         if ( rc < 0 )
             break;
 
@@ -111,7 +113,7 @@ int compat_arch_memory_op(int op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 
         XLAT_pod_target(nat, &cmp);
 
-        rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
+        rc = arch_memory_op(op, guest_handle_from_ptr(nat, void), 
start_extent);
         if ( rc < 0 )
             break;
 
@@ -185,7 +187,6 @@ int compat_arch_memory_op(int op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         return mem_sharing_get_nr_shared_mfns();
 
     case XENMEM_paging_op:
-    case XENMEM_access_op:
     {
         xen_mem_event_op_t meo;
         if ( copy_from_guest(&meo, arg, 1) )
@@ -195,6 +196,10 @@ int compat_arch_memory_op(int op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             return -EFAULT;
         break;
     }
+    case XENMEM_access_op:
+        rc = mem_access_memop(arg, start_extent);
+        break;
+
     case XENMEM_sharing_op:
     {
         xen_mem_sharing_op_t mso;
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index f6ea012..783a722 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -36,6 +36,7 @@
 #include <asm/numa.h>
 #include <asm/mem_event.h>
 #include <asm/mem_sharing.h>
+#include <asm/mem_access.h>
 #include <public/memory.h>
 
 /* Parameters for PFN/MADDR compression. */
@@ -948,7 +949,8 @@ void __init subarch_init_memory(void)
     }
 }
 
-long subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
+long subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg,
+                       unsigned long start_extent)
 {
     struct xen_machphys_mfn_list xmml;
     l3_pgentry_t l3e;
@@ -1007,7 +1009,6 @@ long subarch_memory_op(int op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         return mem_sharing_get_nr_shared_mfns();
 
     case XENMEM_paging_op:
-    case XENMEM_access_op:
     {
         xen_mem_event_op_t meo;
         if ( copy_from_guest(&meo, arg, 1) )
@@ -1017,6 +1018,10 @@ long subarch_memory_op(int op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             return -EFAULT;
         break;
     }
+    case XENMEM_access_op:
+        rc = mem_access_memop(arg, start_extent);
+        break;
+
     case XENMEM_sharing_op:
     {
         xen_mem_sharing_op_t mso;
diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
index daa2e04..a38ecf6 100644
--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -271,7 +271,7 @@ int compat_memory_op(unsigned int cmd, 
XEN_GUEST_HANDLE_PARAM(void) compat)
         }
 
         default:
-            return compat_arch_memory_op(cmd, compat);
+            return compat_arch_memory_op(cmd, compat, start_extent);
         }
 
         rc = do_memory_op(cmd, nat.hnd);
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 4d6ffee..fd20ecd 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -964,7 +964,7 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         break;
 
     default:
-        rc = arch_memory_op(op, arg);
+        rc = arch_memory_op(op, arg, start_extent);
         break;
     }
 
diff --git a/xen/include/asm-x86/mem_access.h b/xen/include/asm-x86/mem_access.h
index 60c2834..f99fd1b 100644
--- a/xen/include/asm-x86/mem_access.h
+++ b/xen/include/asm-x86/mem_access.h
@@ -23,7 +23,8 @@
 #ifndef _XEN_ASM_MEM_ACCESS_H
 #define _XEN_ASM_MEM_ACCESS_H
 
-int mem_access_memop(struct domain *d, xen_mem_event_op_t *meo);
+int mem_access_memop(XEN_GUEST_HANDLE_PARAM(void) arg,
+                     unsigned long start_iter);
 int mem_access_send_req(struct domain *d, mem_event_request_t *req);
 
 #endif /* _XEN_ASM_MEM_ACCESS_H */
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index c835f76..d10f3bd 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -561,9 +561,12 @@ void *do_page_walk(struct vcpu *v, unsigned long addr);
 int __sync_local_execstate(void);
 
 /* Arch-specific portion of memory_op hypercall. */
-long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg);
-long subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg);
-int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
+long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg,
+                    unsigned long start_extent);
+long subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg,
+                       unsigned long start_extent);
+int compat_arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void),
+                          unsigned long start_extent);
 int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
 
 int steal_page(
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d644f82..743bb59 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -577,12 +577,12 @@ void p2m_mem_access_resume(struct domain *d);
 /* Set access type for a region of pfns.
  * If start_pfn == -1ul, sets the default access type */
 long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr,
-                        uint32_t start, uint32_t mask, hvmmem_access_t access);
+                        uint32_t start, uint32_t mask, xenmem_access_t access);
 
 /* Get access type for a pfn
  * If pfn == -1ul, gets the default access type */
-int p2m_get_mem_access(struct domain *d, unsigned long pfn, 
-                       hvmmem_access_t *access);
+int p2m_get_mem_access(struct domain *d, unsigned long pfn,
+                       xenmem_access_t *access);
 
 /* 
  * Internal functions, only called by other p2m code
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 3204ec4..f00f6d2 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -162,49 +162,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
 /* Following tools-only interfaces may change in future. */
 #if defined(__XEN__) || defined(__XEN_TOOLS__)
 
+/* Deprecated by XENMEM_access_op_set_access */
 #define HVMOP_set_mem_access        12
-typedef enum {
-    HVMMEM_access_n,
-    HVMMEM_access_r,
-    HVMMEM_access_w,
-    HVMMEM_access_rw,
-    HVMMEM_access_x,
-    HVMMEM_access_rx,
-    HVMMEM_access_wx,
-    HVMMEM_access_rwx,
-    HVMMEM_access_rx2rw,       /* Page starts off as r-x, but automatically
-                                * change to r-w on a write */
-    HVMMEM_access_n2rwx,       /* Log access: starts off as n, automatically 
-                                * goes to rwx, generating an event without
-                                * pausing the vcpu */
-    HVMMEM_access_default      /* Take the domain default */
-} hvmmem_access_t;
-/* Notify that a region of memory is to have specific access types */
-struct xen_hvm_set_mem_access {
-    /* Domain to be updated. */
-    domid_t domid;
-    /* Memory type */
-    uint16_t hvmmem_access; /* hvm_access_t */
-    /* Number of pages, ignored on setting default access */
-    uint32_t nr;
-    /* First pfn, or ~0ull to set the default access for new pages */
-    uint64_aligned_t first_pfn;
-};
-typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t);
 
+/* Deprecated by XENMEM_access_op_get_access */
 #define HVMOP_get_mem_access        13
-/* Get the specific access type for that region of memory */
-struct xen_hvm_get_mem_access {
-    /* Domain to be queried. */
-    domid_t domid;
-    /* Memory type: OUT */
-    uint16_t hvmmem_access; /* hvm_access_t */
-    /* pfn, or ~0ull for default access for new pages.  IN */
-    uint64_aligned_t pfn;
-};
-typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t);
 
 #define HVMOP_inject_trap            14
 /* Inject a trap into a VCPU, which will get taken up on the next
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index f19ac14..5bcd475 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -363,9 +363,6 @@ typedef struct xen_pod_target xen_pod_target_t;
 #define XENMEM_paging_op_evict              1
 #define XENMEM_paging_op_prep               2
 
-#define XENMEM_access_op                    21
-#define XENMEM_access_op_resume             0
-
 struct xen_mem_event_op {
     uint8_t     op;         /* XENMEM_*_op_* */
     domid_t     domain;
@@ -379,6 +376,56 @@ struct xen_mem_event_op {
 typedef struct xen_mem_event_op xen_mem_event_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t);
 
+#define XENMEM_access_op                    21
+#define XENMEM_access_op_resume             0
+#define XENMEM_access_op_set_access         1
+#define XENMEM_access_op_get_access         2
+
+typedef enum {
+    XENMEM_access_n,
+    XENMEM_access_r,
+    XENMEM_access_w,
+    XENMEM_access_rw,
+    XENMEM_access_x,
+    XENMEM_access_rx,
+    XENMEM_access_wx,
+    XENMEM_access_rwx,
+    /*
+     * Page starts off as r-x, but automatically
+     * change to r-w on a write
+     */
+    XENMEM_access_rx2rw,
+    /*
+     * Log access: starts off as n, automatically
+     * goes to rwx, generating an event without
+     * pausing the vcpu
+     */
+    XENMEM_access_n2rwx,
+    /* Take the domain default */
+    XENMEM_access_default
+} xenmem_access_t;
+
+struct xen_mem_access_op {
+    /* XENMEM_access_op_* */
+    uint8_t op;
+    /* xenmem_access_t */
+    uint8_t access;
+    domid_t domid;
+    /*
+     * Number of pages for set op
+     * Ignored on setting default access and other ops
+     */
+    uint32_t nr;
+    /*
+     * First pfn for set op
+     * pfn for get op
+     * ~0ull is used to set and get the default access for pages
+     */
+    uint64_aligned_t pfn;
+};
+typedef struct xen_mem_access_op xen_mem_access_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
+
 #define XENMEM_sharing_op                   22
 #define XENMEM_sharing_op_nominate_gfn      0
 #define XENMEM_sharing_op_nominate_gref     1
-- 
1.8.3.2


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.