[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 4/4] xen: introduce XENMEM_exchange_and_pin and XENMEM_unpin



Introduce two new hypercalls XENMEM_exchange_and_pin and XENMEM_unpin.

XENMEM_exchange_and_pin, it's like XENMEM_exchange but it also pins the
new pages: their p2m mapping are guaranteed not to be changed, until
XENMEM_unpin is called.  XENMEM_exchange_and_pin returns the DMA frame
numbers of the new pages to the caller, even if it's an autotranslate
guest.

The only effect of XENMEM_unpin is to "unpin" the previously
pinned pages. Afterwards the p2m mappings can be transparently changed by
the hypervisor as normal. The memory remains accessible from the guest.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>


Changes in v4:
- rename XENMEM_get_dma_buf to XENMEM_exchange_and_pin;
- rename XENMEM_get_dma_buf to XENMEM_unpin;
- move the pinning before we copy back the mfn to the guest;
- propagate errors returned by guest_physmap_pin_range;
- use xen_memory_exchange_t as parameter for XENMEM_exchange_and_pin;
- use an unsigned iterator in unpin;
- improve the documentation of the new hypercalls;
- add a note about out.address_bits for XENMEM_exchange.
---
 xen/common/memory.c         |   67 +++++++++++++++++++++++++++++++++++++++++-
 xen/include/public/memory.h |   38 ++++++++++++++++++++++++
 2 files changed, 103 insertions(+), 2 deletions(-)

diff --git a/xen/common/memory.c b/xen/common/memory.c
index 422b274..5d91d93 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -328,7 +328,7 @@ static void decrease_reservation(struct memop_args *a)
     a->nr_done = i;
 }
 
-static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
+static long memory_exchange(int op, 
XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
 {
     struct xen_memory_exchange exch;
     PAGE_LIST_HEAD(in_chunk_list);
@@ -549,6 +549,15 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
             {
                 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
                     set_gpfn_from_mfn(mfn + k, gpfn + k);
+            }
+            if ( op == XENMEM_exchange_and_pin )
+            {
+                rc = guest_physmap_pin_range(d, gpfn, exch.out.extent_order);
+                if ( rc )
+                    continue;
+            }
+            if ( op == XENMEM_exchange_and_pin || !paging_mode_translate(d) )
+            {
                 if ( __copy_to_guest_offset(exch.out.extent_start,
                                             (i << out_chunk_order) + j,
                                             &mfn, 1) )
@@ -591,6 +600,55 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
     return rc;
 }
 
+static long unpin(XEN_GUEST_HANDLE_PARAM(xen_unpin_t) arg)
+{
+    int rc;
+    xen_ulong_t i;
+    struct xen_unpin unpin;
+    xen_pfn_t gpfn;
+    struct domain *d;
+
+    if ( copy_from_guest(&unpin, arg, 1) )
+        return -EFAULT;
+
+    /* Various sanity checks. */
+    if ( /* Extent orders are sensible? */
+         (unpin.in.extent_order > MAX_ORDER) ||
+         /* Sizes of input list do not overflow a long? */
+         ((~0UL >> unpin.in.extent_order) < unpin.in.nr_extents) )
+        return -EFAULT;
+
+    if ( !guest_handle_okay(unpin.in.extent_start, unpin.in.nr_extents) )
+        return -EFAULT;
+
+    d = rcu_lock_domain_by_any_id(unpin.in.domid);
+    if ( d == NULL )
+    {
+        rc = -ESRCH;
+        goto fail;
+    }
+
+    for ( i = 0; i < unpin.in.nr_extents; i++ )
+    {
+        if ( unlikely(__copy_from_guest_offset(
+                      &gpfn, unpin.in.extent_start, i, 1)) )
+        {
+            rc = -EFAULT;
+            goto fail;
+        }
+
+        rc = guest_physmap_unpin_range(d, gpfn, unpin.in.extent_order);
+        if ( rc )
+            goto fail;
+    }
+
+    rc = 0;
+
+ fail:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
 long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     struct domain *d;
@@ -679,8 +737,13 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 
         break;
 
+    case XENMEM_exchange_and_pin:
     case XENMEM_exchange:
-        rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t));
+        rc = memory_exchange(op, guest_handle_cast(arg, 
xen_memory_exchange_t));
+        break;
+
+    case XENMEM_unpin:
+        rc = unpin(guest_handle_cast(arg, xen_unpin_t));
         break;
 
     case XENMEM_maximum_ram_page:
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 7a26dee..eb1c2a4 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -105,6 +105,7 @@ struct xen_memory_exchange {
     /*
      * [IN] Details of memory extents to be exchanged (GMFN bases).
      * Note that @in.address_bits is ignored and unused.
+     * @out.address_bits should contain the address mask for the new pages.
      */
     struct xen_memory_reservation in;
 
@@ -459,6 +460,43 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
  * The zero value is appropiate.
  */
 
+#define XENMEM_exchange_and_pin             26
+/*
+ * This hypercall is similar to XENMEM_exchange: it takes the same
+ * struct as an argument and it exchanges the pages passed in with a new
+ * set of pages. The new pages are going to be "pinned": it's guaranteed
+ * that their p2m mapping won't be changed until explicitly "unpinned".
+ * The content of the exchanged pages is lost.
+ * Only normal guest r/w memory can be pinned: no granted pages or
+ * ballooned pages.
+ * If return code is zero then @out.extent_list provides the DMA frame
+ * numbers of the newly-allocated memory.
+ * Returns zero on complete success, otherwise a negative error code:
+ *   -ENOSYS if not implemented
+ *   -EINVAL if the page is already pinned
+ *   -EFAULT if the physical to machine walk fails
+ * On complete success then always @nr_exchanged == @in.nr_extents.  On
+ * partial success @nr_exchanged indicates how much work was done and a
+ * negative error code is returned.
+ */
+
+#define XENMEM_unpin             27
+/*
+ * XENMEM_unpin unpins a set of pages, previously pinned by
+ * XENMEM_exchange_and_pin. After this call the p2m mapping of the pages can
+ * be transparently changed by the hypervisor, as usual. The pages are
+ * still accessible from the guest.
+ */
+struct xen_unpin {
+    /*
+     * [IN] Details of memory extents to be unpinned (GMFN bases).
+     * Note that @in.address_bits is ignored and unused.
+     */
+    struct xen_memory_reservation in;
+};
+typedef struct xen_unpin xen_unpin_t;
+DEFINE_XEN_GUEST_HANDLE(xen_unpin_t);
+
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 
 #endif /* __XEN_PUBLIC_MEMORY_H__ */
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.