[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC v2 3/4] tools/libxc: Add APIs for PV mem_access



Add APIs, xc_set_mem_access_default(), xc_mem_access_create_ring_page()
and xc_mem_access_get_ring_mfn(). xc_mem_event_enable() will call
xc_mem_access_create_ring_page() before enabling mem_access for PV
domains. This is not needed for HVM domains as the page is created
during domain creation time. It can then call
xc_mem_access_get_ring_mfn() to get the mfn of the created page to map
in. This is requivalent to xc_get_hvm_param(HVM_PARAM_ACCESS_RING_PFN)
for HVM domains.

xc_set_mem_access_default sets the default permission for a PV domain.
This should not be called for HVM domains. A mem_access listener for a
HVM domain does this in two steps:
    xc_set_mem_access(xch, domid, default_access, ~0ull, 0);
    xc_set_mem_access(xch, domid, default_access, 0, max_pages);
However for a PV domain, this is not possible as the address
translations are done by the guest and the listener does not know all the
mfns that belong to the PV domain. This functions performs the operation
for the mem_access listener. Additionally this was not done as part of
step 1 due to the way hypercall continuation works in the hypervisor.

Signed-off-by: Aravindh Puthiyaparambil <aravindp@xxxxxxxxx>
Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>

---
Changes from RFC v1:
Added xc_set_mem_access_default() API.
The ring page setup has been moved to xc_mem_event_enable() because of
xsa-99 changes.
 
 tools/libxc/xc_mem_access.c | 42 ++++++++++++++++++++++++++++++++++++++++++
 tools/libxc/xc_mem_event.c  | 23 ++++++++++++++++++++++-
 tools/libxc/xc_private.h    |  9 +++++++++
 tools/libxc/xenctrl.h       | 28 +++++++++++++++++++++++++++-
 4 files changed, 100 insertions(+), 2 deletions(-)

diff --git a/tools/libxc/xc_mem_access.c b/tools/libxc/xc_mem_access.c
index 461f0e9..f7699fa 100644
--- a/tools/libxc/xc_mem_access.c
+++ b/tools/libxc/xc_mem_access.c
@@ -87,6 +87,48 @@ int xc_get_mem_access(xc_interface *xch,
     return rc;
 }
 
+int xc_set_mem_access_default(xc_interface *xch, domid_t domain_id,
+                              xenmem_access_t default_access)
+{
+    xen_mem_access_op_t mao =
+    {
+        .op     = XENMEM_access_op_set_default,
+        .domid  = domain_id,
+        .access = default_access
+    };
+
+    return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
+}
+
+int xc_mem_access_create_ring_page(xc_interface *xch, domid_t domain_id)
+{
+    xen_mem_access_op_t mao =
+    {
+        .op    = XENMEM_access_op_create_ring_page,
+        .domid = domain_id
+    };
+
+    return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
+}
+
+int xc_mem_access_get_ring_mfn(xc_interface *xch, domid_t domain_id,
+                               uint64_t *mfn)
+{
+    int rc;
+    xen_mem_access_op_t mao =
+    {
+        .op    = XENMEM_access_op_get_ring_mfn,
+        .domid = domain_id
+    };
+
+    rc = do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao));
+
+    if ( !rc )
+        *mfn = mao.pfn;
+
+    return rc;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/tools/libxc/xc_mem_event.c b/tools/libxc/xc_mem_event.c
index faf1cc6..41be04f 100644
--- a/tools/libxc/xc_mem_event.c
+++ b/tools/libxc/xc_mem_event.c
@@ -64,6 +64,7 @@ void *xc_mem_event_enable(xc_interface *xch, domid_t 
domain_id, int param,
     xen_pfn_t ring_pfn, mmap_pfn;
     unsigned int op, mode;
     int rc1, rc2, saved_errno;
+    xc_domaininfo_t dom_info;
 
     if ( !port )
     {
@@ -71,6 +72,13 @@ void *xc_mem_event_enable(xc_interface *xch, domid_t 
domain_id, int param,
         return NULL;
     }
 
+    rc1 = xc_domain_getinfolist(xch, domain_id, 1, &dom_info);
+    if ( rc1 != 1 || dom_info.domain != domain_id )
+    {
+        PERROR("Error getting domain info\n");
+        return NULL;
+     }
+
     /* Pause the domain for ring page setup */
     rc1 = xc_domain_pause(xch, domain_id);
     if ( rc1 != 0 )
@@ -80,7 +88,20 @@ void *xc_mem_event_enable(xc_interface *xch, domid_t 
domain_id, int param,
     }
 
     /* Get the pfn of the ring page */
-    rc1 = xc_hvm_param_get(xch, domain_id, param, &pfn);
+    if ( dom_info.flags & XEN_DOMINF_hvm_guest )
+        rc1 = xc_hvm_param_get(xch, domain_id, param, &pfn);
+    else if ( param == HVM_PARAM_ACCESS_RING_PFN )
+    {
+        rc1 = xc_mem_access_create_ring_page(xch, domain_id);
+        if ( rc1 != 0 )
+        {
+            PERROR("Failed to create ring page\n");
+            goto out;
+        }
+
+        rc1 = xc_mem_access_get_ring_mfn(xch, domain_id, &pfn);
+    }
+
     if ( rc1 != 0 )
     {
         PERROR("Failed to get pfn of ring page\n");
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
index 6cc0f2b..c583c26 100644
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -367,4 +367,13 @@ int xc_mem_event_memop(xc_interface *xch, domid_t 
domain_id,
 void *xc_mem_event_enable(xc_interface *xch, domid_t domain_id, int param,
                           uint32_t *port);
 
+/*
+ * Create the ring page for PV domains. This need not be called for HVM 
domains.
+ */
+int xc_mem_access_create_ring_page(xc_interface *xch, domid_t domain_id);
+
+/* Get the mfn of the ring page for PV domains. */
+int xc_mem_access_get_ring_mfn(xc_interface *xch, domid_t domain_id,
+                               uint64_t *mfn);
+
 #endif /* __XC_PRIVATE_H__ */
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 3578b09..2d25043 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -2260,15 +2260,25 @@ int xc_mem_paging_load(xc_interface *xch, domid_t 
domain_id,
  * Enables mem_access and returns the mapped ring page.
  * Will return NULL on error.
  * Caller has to unmap this page when done.
+ * Calling this for PV domains will enable shadow paging.
  */
 void *xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t 
*port);
+
+/*
+ * For PV domains, this function has to be called even if 
xc_mem_access_enable()
+ * returns an error. This is to disable shadow paging and destroy the 
mem_access
+ * ring page.
+ */
 int xc_mem_access_disable(xc_interface *xch, domid_t domain_id);
 int xc_mem_access_resume(xc_interface *xch, domid_t domain_id);
 
 /*
  * Set a range of memory to a specific access.
  * Allowed types are XENMEM_access_default, XENMEM_access_n, any combination of
- * XENMEM_access_ + (rwx), and XENMEM_access_rx2rw
+ * XENMEM_access_ + (rwx), XENMEM_access_rx2rw and XENMEM_access_n2rwx for HVM
+ * domains.
+ * Allowed types are XENMEM_access_default, XENMEM_access_r, XENMEM_access_rw,
+ * XENMEM_access_rwx and XENMEM_access_rx2rw for PV domains.
  */
 int xc_set_mem_access(xc_interface *xch, domid_t domain_id,
                       xenmem_access_t access, uint64_t first_pfn,
@@ -2280,6 +2290,22 @@ int xc_set_mem_access(xc_interface *xch, domid_t 
domain_id,
 int xc_get_mem_access(xc_interface *xch, domid_t domain_id,
                       uint64_t pfn, xenmem_access_t *access);
 
+/*
+ * Set the default permission for a PV domain. This should not be called for 
HVM
+ * domains. A mem_access listener for a HVM domain does this in two steps:
+ * xc_set_mem_access(xch, domid, default_access, ~0ull, 0);
+ * xc_set_mem_access(xch, domid, default_access, 0, max_pages);
+ * However for a PV domain, this is not possible as the address translations 
are
+ * done by the guest and the listener does not know all the mfns that belong to
+ * the PV domain. This functions performs the operation for the mem_access
+ * listener. Additionally this was not done as part of step 1 due to the way
+ * hypercall continuation works in the hypervisor.
+ * Allowed access types are XENMEM_access_r, XENMEM_access_rw, 
XENMEM_access_rwx
+ * and XENMEM_access_rx2rw.
+ */
+int xc_set_mem_access_default(xc_interface *xch, domid_t domain_id,
+                              xenmem_access_t default_access);
+
 /***
  * Memory sharing operations.
  *
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.