WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 13 of 18] libxc: make do_memory_op's callers responsi

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 13 of 18] libxc: make do_memory_op's callers responsible for locking indirect buffers
From: Ian Campbell <ian.campbell@xxxxxxxxxx>
Date: Tue, 12 Oct 2010 15:16:31 +0100
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Delivery-date: Tue, 12 Oct 2010 07:40:32 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1286892978@xxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1286892402 -3600
# Node ID 91597ec2218db759eef6916dec73ea42560c1504
# Parent  400adff91720efab6413ad73bba8329c715f58ba
libxc: make do_memory_op's callers responsible for locking indirect buffers

Push responsibility for locking buffers refered to by the memory_op
argument up into the callers (which are now all internal to libxc).

This removes the last of the introspecation from do_memory_op and
generally makes the transistion to hypercall buffers smoother.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

diff -r 400adff91720 -r 91597ec2218d tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Tue Oct 12 15:06:42 2010 +0100
+++ b/tools/libxc/xc_domain.c   Tue Oct 12 15:06:42 2010 +0100
@@ -599,9 +599,18 @@ int xc_domain_increase_reservation(xc_in
     };
 
     /* may be NULL */
+    if ( extent_start && lock_pages(xch, extent_start, nr_extents * 
sizeof(xen_pfn_t)) != 0 )
+    {
+        PERROR("Could not lock memory for XENMEM_increase_reservation 
hypercall");
+        return -1;
+    }
+
     set_xen_guest_handle(reservation.extent_start, extent_start);
 
     err = do_memory_op(xch, XENMEM_increase_reservation, &reservation, 
sizeof(reservation));
+
+    if ( extent_start )
+        unlock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t));
 
     return err;
 }
@@ -647,7 +656,11 @@ int xc_domain_decrease_reservation(xc_in
         .domid        = domid
     };
 
-    set_xen_guest_handle(reservation.extent_start, extent_start);
+    if ( lock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t)) != 0 )
+    {
+        PERROR("Could not lock memory for XENMEM_decrease_reservation 
hypercall");
+        return -1;
+    }
 
     if ( extent_start == NULL )
     {
@@ -656,7 +669,11 @@ int xc_domain_decrease_reservation(xc_in
         return -1;
     }
 
+    set_xen_guest_handle(reservation.extent_start, extent_start);
+
     err = do_memory_op(xch, XENMEM_decrease_reservation, &reservation, 
sizeof(reservation));
+
+    unlock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t));
 
     return err;
 }
@@ -715,9 +732,18 @@ int xc_domain_populate_physmap(xc_interf
         .mem_flags    = mem_flags,
         .domid        = domid
     };
+
+    if ( lock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t)) != 0 )
+    {
+        PERROR("Could not lock memory for XENMEM_populate_physmap hypercall");
+        return -1;
+    }
+
     set_xen_guest_handle(reservation.extent_start, extent_start);
 
     err = do_memory_op(xch, XENMEM_populate_physmap, &reservation, 
sizeof(reservation));
+
+    unlock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t));
 
     return err;
 }
diff -r 400adff91720 -r 91597ec2218d tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c  Tue Oct 12 15:06:42 2010 +0100
+++ b/tools/libxc/xc_private.c  Tue Oct 12 15:06:42 2010 +0100
@@ -424,9 +424,6 @@ int do_memory_op(xc_interface *xch, int 
 int do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len)
 {
     DECLARE_HYPERCALL;
-    struct xen_memory_reservation *reservation = arg;
-    struct xen_machphys_mfn_list *xmml = arg;
-    xen_pfn_t *extent_start;
     long ret = -EINVAL;
 
     hypercall.op     = __HYPERVISOR_memory_op;
@@ -439,68 +436,10 @@ int do_memory_op(xc_interface *xch, int 
         goto out1;
     }
 
-    switch ( cmd )
-    {
-    case XENMEM_increase_reservation:
-    case XENMEM_decrease_reservation:
-    case XENMEM_populate_physmap:
-        get_xen_guest_handle(extent_start, reservation->extent_start);
-        if ( (extent_start != NULL) &&
-             (lock_pages(xch, extent_start,
-                    reservation->nr_extents * sizeof(xen_pfn_t)) != 0) )
-        {
-            PERROR("Could not lock");
-            unlock_pages(xch, reservation, sizeof(*reservation));
-            goto out1;
-        }
-        break;
-    case XENMEM_machphys_mfn_list:
-        get_xen_guest_handle(extent_start, xmml->extent_start);
-        if ( lock_pages(xch, extent_start,
-                   xmml->max_extents * sizeof(xen_pfn_t)) != 0 )
-        {
-            PERROR("Could not lock");
-            unlock_pages(xch, xmml, sizeof(*xmml));
-            goto out1;
-        }
-        break;
-    case XENMEM_add_to_physmap:
-    case XENMEM_current_reservation:
-    case XENMEM_maximum_reservation:
-    case XENMEM_maximum_gpfn:
-    case XENMEM_set_pod_target:
-    case XENMEM_get_pod_target:
-        break;
-    }
-
     ret = do_xen_hypercall(xch, &hypercall);
 
     if ( len )
         unlock_pages(xch, arg, len);
-
-    switch ( cmd )
-    {
-    case XENMEM_increase_reservation:
-    case XENMEM_decrease_reservation:
-    case XENMEM_populate_physmap:
-        get_xen_guest_handle(extent_start, reservation->extent_start);
-        if ( extent_start != NULL )
-            unlock_pages(xch, extent_start,
-                         reservation->nr_extents * sizeof(xen_pfn_t));
-        break;
-    case XENMEM_machphys_mfn_list:
-        get_xen_guest_handle(extent_start, xmml->extent_start);
-        unlock_pages(xch, extent_start,
-                     xmml->max_extents * sizeof(xen_pfn_t));
-        break;
-    case XENMEM_add_to_physmap:
-    case XENMEM_current_reservation:
-    case XENMEM_maximum_reservation:
-    case XENMEM_maximum_gpfn:
-    case XENMEM_set_pod_target:
-    case XENMEM_get_pod_target:
-        break;
-    }
 
  out1:
     return ret;
@@ -534,11 +473,23 @@ int xc_machphys_mfn_list(xc_interface *x
     struct xen_machphys_mfn_list xmml = {
         .max_extents = max_extents,
     };
+
+    if ( lock_pages(xch, extent_start, max_extents * sizeof(xen_pfn_t)) != 0 )
+    {
+        PERROR("Could not lock memory for XENMEM_machphys_mfn_list hypercall");
+        return -1;
+    }
+
     set_xen_guest_handle(xmml.extent_start, extent_start);
     rc = do_memory_op(xch, XENMEM_machphys_mfn_list, &xmml, sizeof(xmml));
     if (rc || xmml.nr_extents != max_extents)
-        return -1;
-    return 0;
+        rc = -1;
+    else
+        rc = 0;
+
+    unlock_pages(xch, extent_start, max_extents * sizeof(xen_pfn_t));
+
+    return rc;
 }
 
 #ifndef __ia64__

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>