[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 01/10] vm_event: Define VM_EVENT type



Define the type for each of the supported vm_event rings (paging,
monitor and sharing) and replace the ring param field with this type.

Replace XEN_DOMCTL_VM_EVENT_OP_ occurrences with their corresponding
XEN_VM_EVENT_TYPE_ counterpart.

Signed-off-by: Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx>
---
 tools/libxc/include/xenctrl.h |  1 +
 tools/libxc/xc_mem_paging.c   |  6 ++--
 tools/libxc/xc_memshr.c       |  6 ++--
 tools/libxc/xc_monitor.c      |  6 ++--
 tools/libxc/xc_private.h      |  8 ++---
 tools/libxc/xc_vm_event.c     | 58 +++++++++++++++-------------------
 xen/common/vm_event.c         | 12 ++++----
 xen/include/public/domctl.h   | 72 +++----------------------------------------
 xen/include/public/vm_event.h | 31 +++++++++++++++++++
 9 files changed, 81 insertions(+), 119 deletions(-)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 538007a..f3af710 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -46,6 +46,7 @@
 #include <xen/xsm/flask_op.h>
 #include <xen/kexec.h>
 #include <xen/platform.h>
+#include <xen/vm_event.h>
 
 #include "xentoollog.h"
 
diff --git a/tools/libxc/xc_mem_paging.c b/tools/libxc/xc_mem_paging.c
index a067706..a88c0cc 100644
--- a/tools/libxc/xc_mem_paging.c
+++ b/tools/libxc/xc_mem_paging.c
@@ -48,7 +48,7 @@ int xc_mem_paging_enable(xc_interface *xch, uint32_t 
domain_id,
 
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_ENABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
+                               XEN_VM_EVENT_TYPE_PAGING,
                                port);
 }
 
@@ -56,7 +56,7 @@ int xc_mem_paging_disable(xc_interface *xch, uint32_t 
domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
+                               XEN_VM_EVENT_TYPE_PAGING,
                                NULL);
 }
 
@@ -64,7 +64,7 @@ int xc_mem_paging_resume(xc_interface *xch, uint32_t 
domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_PAGING,
+                               XEN_VM_EVENT_TYPE_PAGING,
                                NULL);
 }
 
diff --git a/tools/libxc/xc_memshr.c b/tools/libxc/xc_memshr.c
index d5e135e..1c4a706 100644
--- a/tools/libxc/xc_memshr.c
+++ b/tools/libxc/xc_memshr.c
@@ -53,7 +53,7 @@ int xc_memshr_ring_enable(xc_interface *xch,
 
     return xc_vm_event_control(xch, domid,
                                XEN_VM_EVENT_ENABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
+                               XEN_VM_EVENT_TYPE_SHARING,
                                port);
 }
 
@@ -62,7 +62,7 @@ int xc_memshr_ring_disable(xc_interface *xch,
 {
     return xc_vm_event_control(xch, domid,
                                XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
+                               XEN_VM_EVENT_TYPE_SHARING,
                                NULL);
 }
 
@@ -205,7 +205,7 @@ int xc_memshr_domain_resume(xc_interface *xch,
 {
     return xc_vm_event_control(xch, domid,
                                XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_SHARING,
+                               XEN_VM_EVENT_TYPE_SHARING,
                                NULL);
 }
 
diff --git a/tools/libxc/xc_monitor.c b/tools/libxc/xc_monitor.c
index 4ac823e..f05b53d 100644
--- a/tools/libxc/xc_monitor.c
+++ b/tools/libxc/xc_monitor.c
@@ -24,7 +24,7 @@
 
 void *xc_monitor_enable(xc_interface *xch, uint32_t domain_id, uint32_t *port)
 {
-    return xc_vm_event_enable(xch, domain_id, HVM_PARAM_MONITOR_RING_PFN,
+    return xc_vm_event_enable(xch, domain_id, XEN_VM_EVENT_TYPE_MONITOR,
                               port);
 }
 
@@ -32,7 +32,7 @@ int xc_monitor_disable(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_DISABLE,
-                               XEN_DOMCTL_VM_EVENT_OP_MONITOR,
+                               XEN_VM_EVENT_TYPE_MONITOR,
                                NULL);
 }
 
@@ -40,7 +40,7 @@ int xc_monitor_resume(xc_interface *xch, uint32_t domain_id)
 {
     return xc_vm_event_control(xch, domain_id,
                                XEN_VM_EVENT_RESUME,
-                               XEN_DOMCTL_VM_EVENT_OP_MONITOR,
+                               XEN_VM_EVENT_TYPE_MONITOR,
                                NULL);
 }
 
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
index adc3b6a..e4f7c3a 100644
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -412,12 +412,12 @@ int xc_ffs64(uint64_t x);
  * vm_event operations. Internal use only.
  */
 int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
-                        unsigned int mode, uint32_t *port);
+                        unsigned int type, uint32_t *port);
 /*
- * Enables vm_event and returns the mapped ring page indicated by param.
- * param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
+ * Enables vm_event and returns the mapped ring page indicated by type.
+ * type can be XEN_VM_EVENT_TYPE_(PAGING/MONITOR/SHARING)
  */
-void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
+void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int type,
                          uint32_t *port);
 
 int do_dm_op(xc_interface *xch, uint32_t domid, unsigned int nr_bufs, ...);
diff --git a/tools/libxc/xc_vm_event.c b/tools/libxc/xc_vm_event.c
index a97c615..044bf71 100644
--- a/tools/libxc/xc_vm_event.c
+++ b/tools/libxc/xc_vm_event.c
@@ -23,7 +23,7 @@
 #include "xc_private.h"
 
 int xc_vm_event_control(xc_interface *xch, uint32_t domain_id, unsigned int op,
-                        unsigned int mode, uint32_t *port)
+                        unsigned int type, uint32_t *port)
 {
     DECLARE_DOMCTL;
     int rc;
@@ -31,7 +31,7 @@ int xc_vm_event_control(xc_interface *xch, uint32_t 
domain_id, unsigned int op,
     domctl.cmd = XEN_DOMCTL_vm_event_op;
     domctl.domain = domain_id;
     domctl.u.vm_event_op.op = op;
-    domctl.u.vm_event_op.mode = mode;
+    domctl.u.vm_event_op.type = type;
 
     rc = do_domctl(xch, &domctl);
     if ( !rc && port )
@@ -39,13 +39,13 @@ int xc_vm_event_control(xc_interface *xch, uint32_t 
domain_id, unsigned int op,
     return rc;
 }
 
-void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int param,
+void *xc_vm_event_enable(xc_interface *xch, uint32_t domain_id, int type,
                          uint32_t *port)
 {
     void *ring_page = NULL;
     uint64_t pfn;
     xen_pfn_t ring_pfn, mmap_pfn;
-    unsigned int op, mode;
+    unsigned int param;
     int rc1, rc2, saved_errno;
 
     if ( !port )
@@ -54,6 +54,25 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t 
domain_id, int param,
         return NULL;
     }
 
+    switch ( type )
+    {
+    case XEN_VM_EVENT_TYPE_PAGING:
+        param = HVM_PARAM_PAGING_RING_PFN;
+        break;
+
+    case XEN_VM_EVENT_TYPE_MONITOR:
+        param = HVM_PARAM_MONITOR_RING_PFN;
+        break;
+
+    case XEN_VM_EVENT_TYPE_SHARING:
+        param = HVM_PARAM_SHARING_RING_PFN;
+        break;
+
+    default:
+        errno = EINVAL;
+        return NULL;
+    }
+
     /* Pause the domain for ring page setup */
     rc1 = xc_domain_pause(xch, domain_id);
     if ( rc1 != 0 )
@@ -94,34 +113,7 @@ void *xc_vm_event_enable(xc_interface *xch, uint32_t 
domain_id, int param,
         goto out;
     }
 
-    switch ( param )
-    {
-    case HVM_PARAM_PAGING_RING_PFN:
-        op = XEN_VM_EVENT_ENABLE;
-        mode = XEN_DOMCTL_VM_EVENT_OP_PAGING;
-        break;
-
-    case HVM_PARAM_MONITOR_RING_PFN:
-        op = XEN_VM_EVENT_ENABLE;
-        mode = XEN_DOMCTL_VM_EVENT_OP_MONITOR;
-        break;
-
-    case HVM_PARAM_SHARING_RING_PFN:
-        op = XEN_VM_EVENT_ENABLE;
-        mode = XEN_DOMCTL_VM_EVENT_OP_SHARING;
-        break;
-
-    /*
-     * This is for the outside chance that the HVM_PARAM is valid but is 
invalid
-     * as far as vm_event goes.
-     */
-    default:
-        errno = EINVAL;
-        rc1 = -1;
-        goto out;
-    }
-
-    rc1 = xc_vm_event_control(xch, domain_id, op, mode, port);
+    rc1 = xc_vm_event_control(xch, domain_id, XEN_VM_EVENT_ENABLE, type, port);
     if ( rc1 != 0 )
     {
         PERROR("Failed to enable vm_event\n");
@@ -164,7 +156,7 @@ int xc_vm_event_get_version(xc_interface *xch)
     domctl.cmd = XEN_DOMCTL_vm_event_op;
     domctl.domain = DOMID_INVALID;
     domctl.u.vm_event_op.op = XEN_VM_EVENT_GET_VERSION;
-    domctl.u.vm_event_op.mode = XEN_DOMCTL_VM_EVENT_OP_MONITOR;
+    domctl.u.vm_event_op.type = XEN_VM_EVENT_TYPE_MONITOR;
 
     rc = do_domctl(xch, &domctl);
     if ( !rc )
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index e872680..56b506a 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -353,7 +353,7 @@ static int vm_event_resume(struct domain *d, struct 
vm_event_domain *ved)
     vm_event_response_t rsp;
 
     /*
-     * vm_event_resume() runs in either XEN_DOMCTL_VM_EVENT_OP_*, or
+     * vm_event_resume() runs in either XEN_VM_EVENT_* domctls, or
      * EVTCHN_send context from the introspection consumer. Both contexts
      * are guaranteed not to be the subject of vm_event responses.
      * While we could ASSERT(v != current) for each VCPU in d in the loop
@@ -580,7 +580,7 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec)
         return 0;
     }
 
-    rc = xsm_vm_event_control(XSM_PRIV, d, vec->mode, vec->op);
+    rc = xsm_vm_event_control(XSM_PRIV, d, vec->type, vec->op);
     if ( rc )
         return rc;
 
@@ -607,10 +607,10 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec)
 
     rc = -ENOSYS;
 
-    switch ( vec->mode )
+    switch ( vec->type )
     {
 #ifdef CONFIG_HAS_MEM_PAGING
-    case XEN_DOMCTL_VM_EVENT_OP_PAGING:
+    case XEN_VM_EVENT_TYPE_PAGING:
     {
         rc = -EINVAL;
 
@@ -666,7 +666,7 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec)
     break;
 #endif
 
-    case XEN_DOMCTL_VM_EVENT_OP_MONITOR:
+    case XEN_VM_EVENT_TYPE_MONITOR:
     {
         rc = -EINVAL;
 
@@ -704,7 +704,7 @@ int vm_event_domctl(struct domain *d, struct 
xen_domctl_vm_event_op *vec)
     break;
 
 #ifdef CONFIG_HAS_MEM_SHARING
-    case XEN_DOMCTL_VM_EVENT_OP_SHARING:
+    case XEN_VM_EVENT_TYPE_SHARING:
     {
         rc = -EINVAL;
 
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 19486d5..234d8c5 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -769,80 +769,18 @@ struct xen_domctl_gdbsx_domstatus {
  * VM event operations
  */
 
-/* XEN_DOMCTL_vm_event_op */
-
-/*
- * There are currently three rings available for VM events:
- * sharing, monitor and paging. This hypercall allows one to
- * control these rings (enable/disable), as well as to signal
- * to the hypervisor to pull responses (resume) from the given
- * ring.
+/* XEN_DOMCTL_vm_event_op.
+ * Use for teardown/setup of helper<->hypervisor interface for paging,
+ * access and sharing.
  */
 #define XEN_VM_EVENT_ENABLE               0
 #define XEN_VM_EVENT_DISABLE              1
 #define XEN_VM_EVENT_RESUME               2
 #define XEN_VM_EVENT_GET_VERSION          3
 
-/*
- * Domain memory paging
- * Page memory in and out.
- * Domctl interface to set up and tear down the
- * pager<->hypervisor interface. Use XENMEM_paging_op*
- * to perform per-page operations.
- *
- * The XEN_VM_EVENT_PAGING_ENABLE domctl returns several
- * non-standard error codes to indicate why paging could not be enabled:
- * ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
- * EMLINK - guest has iommu passthrough enabled
- * EXDEV  - guest has PoD enabled
- * EBUSY  - guest has or had paging enabled, ring buffer still active
- */
-#define XEN_DOMCTL_VM_EVENT_OP_PAGING            1
-
-/*
- * Monitor helper.
- *
- * As with paging, use the domctl for teardown/setup of the
- * helper<->hypervisor interface.
- *
- * The monitor interface can be used to register for various VM events. For
- * example, there are HVM hypercalls to set the per-page access permissions
- * of every page in a domain.  When one of these permissions--independent,
- * read, write, and execute--is violated, the VCPU is paused and a memory event
- * is sent with what happened. The memory event handler can then resume the
- * VCPU and redo the access with a XEN_VM_EVENT_RESUME option.
- *
- * See public/vm_event.h for the list of available events that can be
- * subscribed to via the monitor interface.
- *
- * The XEN_VM_EVENT_MONITOR_* domctls returns
- * non-standard error codes to indicate why access could not be enabled:
- * ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
- * EBUSY  - guest has or had access enabled, ring buffer still active
- *
- */
-#define XEN_DOMCTL_VM_EVENT_OP_MONITOR           2
-
-/*
- * Sharing ENOMEM helper.
- *
- * As with paging, use the domctl for teardown/setup of the
- * helper<->hypervisor interface.
- *
- * If setup, this ring is used to communicate failed allocations
- * in the unshare path. XENMEM_sharing_op_resume is used to wake up
- * vcpus that could not unshare.
- *
- * Note that shring can be turned on (as per the domctl below)
- * *without* this ring being setup.
- */
-#define XEN_DOMCTL_VM_EVENT_OP_SHARING           3
-
-/* Use for teardown/setup of helper<->hypervisor interface for paging,
- * access and sharing.*/
 struct xen_domctl_vm_event_op {
     uint32_t       op;           /* XEN_VM_EVENT_* */
-    uint32_t       mode;         /* XEN_DOMCTL_VM_EVENT_OP_* */
+    uint32_t       type;         /* XEN_VM_EVENT_TYPE_* */
 
     union {
         struct {
@@ -1004,7 +942,7 @@ struct xen_domctl_psr_cmt_op {
  * Enable/disable monitoring various VM events.
  * This domctl configures what events will be reported to helper apps
  * via the ring buffer "MONITOR". The ring has to be first enabled
- * with the domctl XEN_DOMCTL_VM_EVENT_OP_MONITOR.
+ * with XEN_VM_EVENT_ENABLE.
  *
  * GET_CAPABILITIES can be used to determine which of these features is
  * available on a given platform.
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index 959083d..c48bc21 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -36,6 +36,37 @@
 #include "io/ring.h"
 
 /*
+ * There are currently three types of VM events.
+ */
+
+/*
+ * Domain memory paging
+ *
+ * Page memory in and out.
+ */
+#define XEN_VM_EVENT_TYPE_PAGING         1
+
+/*
+ * Monitor.
+ *
+ * The monitor interface can be used to register for various VM events. For
+ * example, there are HVM hypercalls to set the per-page access permissions
+ * of every page in a domain.  When one of these permissions--independent,
+ * read, write, and execute--is violated, the VCPU is paused and a memory event
+ * is sent with what happened. The memory event handler can then resume the
+ * VCPU and redo the access with a XEN_VM_EVENT_RESUME option.
+ */
+#define XEN_VM_EVENT_TYPE_MONITOR        2
+
+/*
+ * Sharing ENOMEM.
+ *
+ * Used to communicate failed allocations in the unshare path.
+ * XENMEM_sharing_op_resume is used to wake up vcpus that could not unshare.
+ */
+#define XEN_VM_EVENT_TYPE_SHARING        3
+
+/*
  * Memory event flags
  */
 
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.