[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 3/3] xenpaging: updated code to use safer mem_event API's for setup and teardown.



On Sun, Aug 24, 2014 at 5:26 PM, Dushyant Behl <myselfdushyantbehl@xxxxxxxxx> wrote:
tools/libxc/xc_mem_paging.c: updated mem_paging enable and disable API's to use
the mem event enable and disable routines. The mem event API's take care of
security issues mentioned in XSA-99 and also provide more coarse grained behaviour.

tools/xenpaging/xenpaging.c: added calls to the new API's and removed the code
which duplicated the new API behaviour.

Signed-off-by: Dushyant Behl <myselfdushyantbehl@xxxxxxxxx>
---
Âtools/libxc/xc_mem_paging.c | 34 ++++++++++++++---------------
Âtools/libxc/xenctrl.h   Â| 14 ++++++++++--
Âtools/xenpaging/xenpaging.c | 52 ++++++---------------------------------------
Â3 files changed, 36 insertions(+), 64 deletions(-)
Reviewed-by Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>

Worth repeating this is meant to serve multiple future in-tree users of mempaging (e.g. lazy restore)

Andres

diff --git a/tools/libxc/xc_mem_paging.c b/tools/libxc/xc_mem_paging.c
index 8aa7d4d..173df1e 100644
--- a/tools/libxc/xc_mem_paging.c
+++ b/tools/libxc/xc_mem_paging.c
@@ -23,28 +23,28 @@

Â#include "xc_private.h"

-
+/*
+ * Enables mem_paging and sets arg ring page equal to mapped page.
+ * Will return 0 on success and -1 on error.
+ */
Âint xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
-Â Â Â Â Â Â Â Â Â Â Â Â Âuint32_t *port)
+            Âuint32_t *port, void *ring_page,
+Â Â Â Â Â Â Â Â Â Â Â Â Âmem_event_back_ring_t *back_ring)
Â{
-Â Â if ( !port )
-Â Â {
-Â Â Â Â errno = EINVAL;
-Â Â Â Â return -1;
-Â Â }
-
-Â Â return xc_mem_event_control(xch, domain_id,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â XEN_DOMCTL_MEM_EVENT_OP_PAGING,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â port);
+Â Â return xc_mem_event_enable(xch, domain_id,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â ÂHVM_PARAM_PAGING_RING_PFN,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âport, ring_page, back_ring);
Â}

-int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id)
+/*
+ * Disable mem_paging and unmap ring page.
+ * Will return 0 on success and -1 on error.
+ */
+int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id, void *ring_page)
Â{
-Â Â return xc_mem_event_control(xch, domain_id,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â XEN_DOMCTL_MEM_EVENT_OP_PAGING,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â NULL);
+Â Â return xc_mem_event_disable(xch, domain_id,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â HVM_PARAM_ACCESS_RING_PFN,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â ring_page);
Â}

Âint xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id, unsigned long gfn)
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 546e6f8..23ef496 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -2244,8 +2244,18 @@ int xc_tmem_restore_extra(xc_interface *xch, int dom, int fd);
 * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT
 * support is considered experimental.
 */
-int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id, uint32_t *port);
-int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id);
+/*
+ * Enables mem_paging and sets arg ring page equal to mapped page.
+ * Returns 0 on success and -1 on error.
+ */
+int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
+            Âuint32_t *port, void *ring_page,
+Â Â Â Â Â Â Â Â Â Â Â Â Âmem_event_back_ring_t *back_ring);
+/*
+ * Disables mem_paging and unmaps ring page.
+ * Returns 0 on success and -1 on error.
+ */
+int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id, void *ring_page);
Âint xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id,
              unsigned long gfn);
Âint xc_mem_paging_evict(xc_interface *xch, domid_t domain_id, unsigned long gfn);
diff --git a/tools/xenpaging/xenpaging.c b/tools/xenpaging/xenpaging.c
index 82c1ee4..4a841bf 100644
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -337,40 +337,12 @@ static struct xenpaging *xenpaging_init(int argc, char *argv[])
    ÂPERROR("Could not bind to xenpaging watch\n");
    Âgoto err;
  Â}
-
-Â Â /* Map the ring page */
-Â Â xc_get_hvm_param(xch, paging->mem_event.domain_id,
-Â Â Â Â Â Â Â Â Â Â Â Â HVM_PARAM_PAGING_RING_PFN, &ring_pfn);
-Â Â mmap_pfn = ring_pfn;
-Â Â paging->mem_event.ring_page =
-Â Â Â Â xc_map_foreign_batch(xch, paging->mem_event.domain_id,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â PROT_READ | PROT_WRITE, &mmap_pfn, 1);
-Â Â if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
-Â Â {
-Â Â Â Â /* Map failed, populate ring page */
-Â Â Â Â rc = xc_domain_populate_physmap_exact(paging->xc_handle,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â paging->mem_event.domain_id,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â 1, 0, 0, &ring_pfn);
-Â Â Â Â if ( rc != 0 )
-Â Â Â Â {
-Â Â Â Â Â Â PERROR("Failed to populate ring gfn\n");
-Â Â Â Â Â Â goto err;
-Â Â Â Â }
-
-Â Â Â Â mmap_pfn = ring_pfn;
-Â Â Â Â paging->mem_event.ring_page =
-Â Â Â Â Â Â xc_map_foreign_batch(xch, paging->mem_event.domain_id,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â PROT_READ | PROT_WRITE, &mmap_pfn, 1);
-Â Â Â Â if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
-Â Â Â Â {
-Â Â Â Â Â Â PERROR("Could not map the ring page\n");
-Â Â Â Â Â Â goto err;
-Â Â Â Â }
-Â Â }

-Â Â /* Initialise Xen */
+Â Â /* Enable mem paging and initialize shared ring to communicate with xen. */
  Ârc = xc_mem_paging_enable(xch, paging->mem_event.domain_id,
-Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â&paging->mem_event.evtchn_port);
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â &paging->mem_event.evtchn_port,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â paging->mem_event.ring_page,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â &paging->mem_event.back_ring);
  Âif ( rc != 0 )
  Â{
    Âswitch ( errno ) {
@@ -413,17 +385,6 @@ static struct xenpaging *xenpaging_init(int argc, char *argv[])

  Âpaging->mem_event.port = rc;

-Â Â /* Initialise ring */
-Â Â SHARED_RING_INIT((mem_event_sring_t *)paging->mem_event.ring_page);
-Â Â BACK_RING_INIT(&paging->mem_event.back_ring,
-Â Â Â Â Â Â Â Â Â Â(mem_event_sring_t *)paging->mem_event.ring_page,
-Â Â Â Â Â Â Â Â Â ÂPAGE_SIZE);
-
-Â Â /* Now that the ring is set, remove it from the guest's physmap */
-Â Â if ( xc_domain_decrease_reservation_exact(xch,
-Â Â Â Â Â Â Â Â Â Â paging->mem_event.domain_id, 1, 0, &ring_pfn) )
-Â Â Â Â PERROR("Failed to remove ring from guest physmap");
-
  Â/* Get max_pages from guest if not provided via cmdline */
  Âif ( !paging->max_pages )
  Â{
@@ -523,9 +484,10 @@ static void xenpaging_teardown(struct xenpaging *paging)
  Âxs_unwatch(paging->xs_handle, "@releaseDomain", watch_token);

  Âpaging->xc_handle = NULL;
+
  Â/* Tear down domain paging in Xen */
-Â Â munmap(paging->mem_event.ring_page, PAGE_SIZE);
-Â Â rc = xc_mem_paging_disable(xch, paging->mem_event.domain_id);
+Â Â rc = xc_mem_paging_disable(xch, paging->mem_event.domain_id,
+Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â Âpaging->mem_event.ring_page);
  Âif ( rc != 0 )
  Â{
    ÂPERROR("Error tearing down domain paging in xen");
--
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.