[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 03/12] x86/mm: add HYPERVISOR_memory_op to acquire guest resources



Certain memory resources associated with a guest are not necessarily
present in the guest P2M and so are not necessarily available to be
foreign-mapped by a tools domain unless they are inserted, which risks
shattering a super-page mapping.

This patch adds a new memory op to allow such resourced to be priv-mapped
directly, by either a PV or HVM tools domain.

NOTE: Whilst the new op is not intrinsicly specific to the x86 architecture,
      I have no means to test it on an ARM platform and so cannot verify
      that it functions correctly. Hence it is currently only implemented
      for x86.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/mm.c           | 111 ++++++++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/mm/p2m.c       |   3 +-
 xen/include/asm-x86/p2m.h   |   3 ++
 xen/include/public/memory.h |  38 ++++++++++++++-
 4 files changed, 152 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index d71dc5f16a..c0cd63689a 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4700,6 +4700,107 @@ int xenmem_add_to_physmap_one(
     return rc;
 }
 
+static int xenmem_acquire_grant_table(struct domain *d,
+                                      unsigned long frame,
+                                      unsigned long nr_frames,
+                                      unsigned long mfn_list[])
+{
+    unsigned int i;
+
+    /*
+     * Iterate through the list backwards so that gnttab_get_frame() is
+     * first called for the highest numbered frame. This means that the
+     * out-of-bounds check will be done on the first iteration and, if
+     * the table needs to grow, it will only grow once.
+     */
+    i = nr_frames;
+    while ( i-- != 0 )
+    {
+        mfn_t mfn = gnttab_get_frame(d, frame + i);
+
+        if ( mfn_eq(mfn, INVALID_MFN) )
+            return -EINVAL;
+
+        mfn_list[i] = mfn_x(mfn);
+    }
+
+    return 0;
+}
+
+static int xenmem_acquire_resource(xen_mem_acquire_resource_t *xmar)
+{
+    struct domain *d, *currd = current->domain;
+    unsigned long *mfn_list;
+    int rc;
+
+    if ( xmar->nr_frames == 0 )
+        return -EINVAL;
+
+    d = rcu_lock_domain_by_any_id(xmar->domid);
+    if ( d == NULL )
+        return -ESRCH;
+
+    rc = xsm_domain_memory_map(XSM_TARGET, d);
+    if ( rc )
+        goto out;
+
+    mfn_list = xmalloc_array(unsigned long, xmar->nr_frames);
+
+    rc = -ENOMEM;
+    if ( !mfn_list )
+        goto out;
+
+    switch ( xmar->type )
+    {
+    case XENMEM_resource_grant_table:
+        rc = -EINVAL;
+        if ( xmar->id ) /* must be zero for grant_table */
+            break;
+
+        rc = xenmem_acquire_grant_table(d, xmar->frame, xmar->nr_frames,
+                                        mfn_list);
+        break;
+
+    default:
+        rc = -EOPNOTSUPP;
+        break;
+    }
+
+    if ( rc )
+        goto free_and_out;
+
+    if ( !paging_mode_translate(currd) )
+    {
+        if ( __copy_to_guest_offset(xmar->gmfn_list, 0, mfn_list,
+                                    xmar->nr_frames) )
+            rc = -EFAULT;
+    }
+    else
+    {
+        unsigned int i;
+
+        for ( i = 0; i < xmar->nr_frames; i++ )
+        {
+            xen_pfn_t gfn;
+
+            rc = -EFAULT;
+            if ( __copy_from_guest_offset(&gfn, xmar->gmfn_list, i, 1) )
+                goto free_and_out;
+
+            rc = set_foreign_p2m_entry(currd, gfn, _mfn(mfn_list[i]));
+            if ( rc )
+                goto free_and_out;
+        }
+    }
+
+ free_and_out:
+    xfree(mfn_list);
+
+ out:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
 long arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     int rc;
@@ -4922,6 +5023,16 @@ long arch_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         return rc;
     }
 
+    case XENMEM_acquire_resource:
+    {
+        xen_mem_acquire_resource_t xmar;
+
+        if ( copy_from_guest(&xmar, arg, 1) )
+            return -EFAULT;
+
+        return xenmem_acquire_resource(&xmar);
+    }
+
     default:
         return subarch_memory_op(cmd, arg);
     }
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e8a57d118c..c503a7f1d2 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1118,8 +1118,7 @@ static int set_typed_p2m_entry(struct domain *d, unsigned 
long gfn, mfn_t mfn,
 }
 
 /* Set foreign mfn in the given guest's p2m table. */
-static int set_foreign_p2m_entry(struct domain *d, unsigned long gfn,
-                                 mfn_t mfn)
+int set_foreign_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
 {
     return set_typed_p2m_entry(d, gfn, mfn, PAGE_ORDER_4K, p2m_map_foreign,
                                p2m_get_hostp2m(d)->default_access);
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 6395e8fd1d..3ccec250d8 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -613,6 +613,9 @@ void p2m_memory_type_changed(struct domain *d);
 int p2m_is_logdirty_range(struct p2m_domain *, unsigned long start,
                           unsigned long end);
 
+/* Set foreign entry in the p2m table (for priv-mapping) */
+int set_foreign_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
+
 /* Set mmio addresses in the p2m table (for pass-through) */
 int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
                        unsigned int order, p2m_access_t access);
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 29386df98b..9bf58e7384 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -650,7 +650,43 @@ struct xen_vnuma_topology_info {
 typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
 DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
 
-/* Next available subop number is 28 */
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+
+/*
+ * Get the pages for a particular guest resource, so that they can be
+ * mapped directly by a tools domain.
+ */
+#define XENMEM_acquire_resource 28
+struct xen_mem_acquire_resource {
+    /* IN - the domain whose resource is to be mapped */
+    domid_t domid;
+    /* IN - the type of resource (defined below) */
+    uint16_t type;
+
+#define XENMEM_resource_grant_table 0
+
+    /*
+     * IN - a type-specific resource identifier, which must be zero
+     *      unless stated otherwise.
+     */
+    uint32_t id;
+    /* IN - number of (4K) frames of the resource to be mapped */
+    uint32_t nr_frames;
+    /* IN - the index of the initial frame to be mapped */
+    uint64_aligned_t frame;
+    /* IN/OUT - If the tools domain is PV then, upon return, gmfn_list
+     *          will be populated with the MFNs of the resource.
+     *          If the tools domain is HVM then it is expected that, on
+     *          entry, gmfn_list will be populated with a list of GFNs
+     *          that will be mapped to the MFNs of the resource.
+     */
+    XEN_GUEST_HANDLE(xen_pfn_t) gmfn_list;
+};
+typedef struct xen_mem_acquire_resource xen_mem_acquire_resource_t;
+
+#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+
+/* Next available subop number is 29 */
 
 #endif /* __XEN_PUBLIC_MEMORY_H__ */
 
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.