WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 16 of 25] libxc: convert memory op interface over to

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 16 of 25] libxc: convert memory op interface over to hypercall buffers
From: Ian Campbell <ian.campbell@xxxxxxxxxx>
Date: Fri, 22 Oct 2010 15:15:58 +0100
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Delivery-date: Fri, 22 Oct 2010 07:47:14 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1287756942@xxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1287756891 -3600
# Node ID 63c5a929ae7ca0c82406e0cd33f95c82f219d59f
# Parent  ba4bc1c93fee7072c32d8a0a1aef61d6fb50e757
libxc: convert memory op interface over to hypercall buffers

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

diff -r ba4bc1c93fee -r 63c5a929ae7c tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Fri Oct 22 15:14:51 2010 +0100
+++ b/tools/libxc/xc_domain.c   Fri Oct 22 15:14:51 2010 +0100
@@ -468,31 +468,30 @@ int xc_domain_set_memmap_limit(xc_interf
                                unsigned long map_limitkb)
 {
     int rc;
-
     struct xen_foreign_memory_map fmap = {
         .domid = domid,
         .map = { .nr_entries = 1 }
     };
+    DECLARE_HYPERCALL_BUFFER(struct e820entry, e820);
 
-    struct e820entry e820 = {
-        .addr = 0,
-        .size = (uint64_t)map_limitkb << 10,
-        .type = E820_RAM
-    };
+    e820 = xc_hypercall_buffer_alloc(xch, e820, sizeof(*e820));
 
-    set_xen_guest_handle(fmap.map.buffer, &e820);
+    if ( e820 == NULL )
+    {
+        PERROR("Could not allocate memory for xc_domain_set_memmap_limit 
hypercall");
+        return -1;
+    }
 
-    if ( lock_pages(xch, &e820, sizeof(e820)) )
-    {
-        PERROR("Could not lock memory for Xen hypercall");
-        rc = -1;
-        goto out;
-    }
+    e820->addr = 0;
+    e820->size = (uint64_t)map_limitkb << 10;
+    e820->type = E820_RAM;
+
+    xc_set_xen_guest_handle(fmap.map.buffer, e820);
 
     rc = do_memory_op(xch, XENMEM_set_memory_map, &fmap, sizeof(fmap));
 
- out:
-    unlock_pages(xch, &e820, sizeof(e820));
+    xc_hypercall_buffer_free(xch, e820);
+
     return rc;
 }
 #else
@@ -587,6 +586,7 @@ int xc_domain_increase_reservation(xc_in
                                    xen_pfn_t *extent_start)
 {
     int err;
+    DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
     struct xen_memory_reservation reservation = {
         .nr_extents   = nr_extents,
         .extent_order = extent_order,
@@ -595,18 +595,17 @@ int xc_domain_increase_reservation(xc_in
     };
 
     /* may be NULL */
-    if ( extent_start && lock_pages(xch, extent_start, nr_extents * 
sizeof(xen_pfn_t)) != 0 )
+    if ( xc_hypercall_bounce_pre(xch, extent_start) )
     {
-        PERROR("Could not lock memory for XENMEM_increase_reservation 
hypercall");
+        PERROR("Could not bounce memory for XENMEM_increase_reservation 
hypercall");
         return -1;
     }
 
-    set_xen_guest_handle(reservation.extent_start, extent_start);
+    xc_set_xen_guest_handle(reservation.extent_start, extent_start);
 
     err = do_memory_op(xch, XENMEM_increase_reservation, &reservation, 
sizeof(reservation));
 
-    if ( extent_start )
-        unlock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t));
+    xc_hypercall_bounce_post(xch, extent_start);
 
     return err;
 }
@@ -645,18 +644,13 @@ int xc_domain_decrease_reservation(xc_in
                                    xen_pfn_t *extent_start)
 {
     int err;
+    DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
     struct xen_memory_reservation reservation = {
         .nr_extents   = nr_extents,
         .extent_order = extent_order,
         .mem_flags    = 0,
         .domid        = domid
     };
-
-    if ( lock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t)) != 0 )
-    {
-        PERROR("Could not lock memory for XENMEM_decrease_reservation 
hypercall");
-        return -1;
-    }
 
     if ( extent_start == NULL )
     {
@@ -665,11 +659,16 @@ int xc_domain_decrease_reservation(xc_in
         return -1;
     }
 
-    set_xen_guest_handle(reservation.extent_start, extent_start);
+    if ( xc_hypercall_bounce_pre(xch, extent_start) )
+    {
+        PERROR("Could not bounce memory for XENMEM_decrease_reservation 
hypercall");
+        return -1;
+    }
+    xc_set_xen_guest_handle(reservation.extent_start, extent_start);
 
     err = do_memory_op(xch, XENMEM_decrease_reservation, &reservation, 
sizeof(reservation));
 
-    unlock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t));
+    xc_hypercall_bounce_post(xch, extent_start);
 
     return err;
 }
@@ -722,6 +721,7 @@ int xc_domain_populate_physmap(xc_interf
                                xen_pfn_t *extent_start)
 {
     int err;
+    DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * sizeof(*extent_start), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
     struct xen_memory_reservation reservation = {
         .nr_extents   = nr_extents,
         .extent_order = extent_order,
@@ -729,18 +729,16 @@ int xc_domain_populate_physmap(xc_interf
         .domid        = domid
     };
 
-    if ( lock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t)) != 0 )
+    if ( xc_hypercall_bounce_pre(xch, extent_start) )
     {
-        PERROR("Could not lock memory for XENMEM_populate_physmap hypercall");
+        PERROR("Could not bounce memory for XENMEM_populate_physmap 
hypercall");
         return -1;
     }
-
-    set_xen_guest_handle(reservation.extent_start, extent_start);
+    xc_set_xen_guest_handle(reservation.extent_start, extent_start);
 
     err = do_memory_op(xch, XENMEM_populate_physmap, &reservation, 
sizeof(reservation));
 
-    unlock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t));
-
+    xc_hypercall_bounce_post(xch, extent_start);
     return err;
 }
 
@@ -778,8 +776,9 @@ int xc_domain_memory_exchange_pages(xc_i
                                     unsigned int out_order,
                                     xen_pfn_t *out_extents)
 {
-    int rc;
-
+    int rc = -1;
+    DECLARE_HYPERCALL_BOUNCE(in_extents, nr_in_extents*sizeof(*in_extents), 
XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    DECLARE_HYPERCALL_BOUNCE(out_extents, nr_out_extents*sizeof(*out_extents), 
XC_HYPERCALL_BUFFER_BOUNCE_OUT);
     struct xen_memory_exchange exchange = {
         .in = {
             .nr_extents   = nr_in_extents,
@@ -792,10 +791,19 @@ int xc_domain_memory_exchange_pages(xc_i
             .domid        = domid
         }
     };
-    set_xen_guest_handle(exchange.in.extent_start, in_extents);
-    set_xen_guest_handle(exchange.out.extent_start, out_extents);
+
+    if ( xc_hypercall_bounce_pre(xch, in_extents) ||
+         xc_hypercall_bounce_pre(xch, out_extents))
+        goto out;
+
+    xc_set_xen_guest_handle(exchange.in.extent_start, in_extents);
+    xc_set_xen_guest_handle(exchange.out.extent_start, out_extents);
 
     rc = do_memory_op(xch, XENMEM_exchange, &exchange, sizeof(exchange));
+
+out:
+    xc_hypercall_bounce_post(xch, in_extents);
+    xc_hypercall_bounce_post(xch, out_extents);
 
     return rc;
 }
diff -r ba4bc1c93fee -r 63c5a929ae7c tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c  Fri Oct 22 15:14:51 2010 +0100
+++ b/tools/libxc/xc_private.c  Fri Oct 22 15:14:51 2010 +0100
@@ -430,23 +430,22 @@ int do_memory_op(xc_interface *xch, int 
 int do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len)
 {
     DECLARE_HYPERCALL;
+    DECLARE_HYPERCALL_BOUNCE(arg, len, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
     long ret = -EINVAL;
 
-    hypercall.op     = __HYPERVISOR_memory_op;
-    hypercall.arg[0] = (unsigned long)cmd;
-    hypercall.arg[1] = (unsigned long)arg;
-
-    if ( len && lock_pages(xch, arg, len) != 0 )
+    if ( xc_hypercall_bounce_pre(xch, arg) )
     {
-        PERROR("Could not lock memory for XENMEM hypercall");
+        PERROR("Could not bounce memory for XENMEM hypercall");
         goto out1;
     }
 
+    hypercall.op     = __HYPERVISOR_memory_op;
+    hypercall.arg[0] = (unsigned long) cmd;
+    hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
+
     ret = do_xen_hypercall(xch, &hypercall);
 
-    if ( len )
-        unlock_pages(xch, arg, len);
-
+    xc_hypercall_bounce_post(xch, arg);
  out1:
     return ret;
 }
@@ -476,24 +475,25 @@ int xc_machphys_mfn_list(xc_interface *x
                         xen_pfn_t *extent_start)
 {
     int rc;
+    DECLARE_HYPERCALL_BOUNCE(extent_start, max_extents * sizeof(xen_pfn_t), 
XC_HYPERCALL_BUFFER_BOUNCE_OUT);
     struct xen_machphys_mfn_list xmml = {
         .max_extents = max_extents,
     };
 
-    if ( lock_pages(xch, extent_start, max_extents * sizeof(xen_pfn_t)) != 0 )
+    if ( xc_hypercall_bounce_pre(xch, extent_start) )
     {
-        PERROR("Could not lock memory for XENMEM_machphys_mfn_list hypercall");
+        PERROR("Could not bounce memory for XENMEM_machphys_mfn_list 
hypercall");
         return -1;
     }
 
-    set_xen_guest_handle(xmml.extent_start, extent_start);
+    xc_set_xen_guest_handle(xmml.extent_start, extent_start);
     rc = do_memory_op(xch, XENMEM_machphys_mfn_list, &xmml, sizeof(xmml));
     if (rc || xmml.nr_extents != max_extents)
         rc = -1;
     else
         rc = 0;
 
-    unlock_pages(xch, extent_start, max_extents * sizeof(xen_pfn_t));
+    xc_hypercall_bounce_post(xch, extent_start);
 
     return rc;
 }

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>