[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 07/11] hvmctl: convert HVMOP_set_mem_type



This allows elimination of the (ab)use of the high operation number
bits for encoding continuations.

Also limiting "nr" at the libxc level to 32 bits (the high 32 bits of
the previous 64-bit parameter got ignore so far).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1627,7 +1627,8 @@ int xc_hvm_modified_memory(
  * Allowed types are HVMMEM_ram_rw, HVMMEM_ram_ro, HVMMEM_mmio_dm
  */
 int xc_hvm_set_mem_type(
-    xc_interface *xch, domid_t dom, hvmmem_type_t memtype, uint64_t first_pfn, 
uint64_t nr);
+    xc_interface *xch, domid_t dom, hvmmem_type_t memtype,
+    uint64_t first_gfn, uint32_t nr);
 
 /*
  * Injects a hardware/software CPU trap, to take effect the next time the HVM 
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -568,30 +568,15 @@ int xc_hvm_modified_memory(
 }
 
 int xc_hvm_set_mem_type(
-    xc_interface *xch, domid_t dom, hvmmem_type_t mem_type, uint64_t 
first_pfn, uint64_t nr)
+    xc_interface *xch, domid_t dom, hvmmem_type_t mem_type,
+    uint64_t first_gfn, uint32_t nr)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_mem_type, arg);
-    int rc;
+    DECLARE_HVMCTL(set_mem_type, dom,
+                   .hvmmem_type = mem_type,
+                   .first_gfn   = first_gfn,
+                   .nr          = nr);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_mem_type hypercall");
-        return -1;
-    }
-
-    arg->domid        = dom;
-    arg->hvmmem_type  = mem_type;
-    arg->first_pfn    = first_pfn;
-    arg->nr           = nr;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_mem_type,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_inject_trap(
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -136,6 +136,70 @@ static int modified_memory(struct domain
     return 0;
 }
 
+static int set_mem_type(struct domain *d,
+                        const struct xen_hvm_set_mem_type *op, uint64_t *iter)
+{
+    /* Interface types to internal p2m types. */
+    static const p2m_type_t memtype[] = {
+        [HVMMEM_ram_rw]  = p2m_ram_rw,
+        [HVMMEM_ram_ro]  = p2m_ram_ro,
+        [HVMMEM_mmio_dm] = p2m_mmio_dm,
+        [HVMMEM_unused]  = p2m_invalid
+    };
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    if ( op->rsvd || op->nr < *iter ||
+         ((op->first_gfn + op->nr - 1) < op->first_gfn) ||
+         ((op->first_gfn + op->nr - 1) > domain_get_maximum_gpfn(d)) )
+        return -EINVAL;
+
+    if ( op->hvmmem_type >= ARRAY_SIZE(memtype) ||
+         unlikely(op->hvmmem_type == HVMMEM_unused) )
+        return -EINVAL;
+
+    while ( op->nr > *iter )
+    {
+        unsigned long gfn = op->first_gfn + *iter;
+        p2m_type_t t;
+        int rc;
+
+        get_gfn_unshare(d, gfn, &t);
+
+        if ( p2m_is_paging(t) )
+        {
+            put_gfn(d, gfn);
+            p2m_mem_paging_populate(d, gfn);
+            return -EAGAIN;
+        }
+
+        if ( p2m_is_shared(t) )
+            rc = -EAGAIN;
+        else if ( !p2m_is_ram(t) &&
+                  (!p2m_is_hole(t) || op->hvmmem_type != HVMMEM_mmio_dm) &&
+                  (t != p2m_mmio_write_dm || op->hvmmem_type != HVMMEM_ram_rw) 
)
+            rc = -EINVAL;
+        else
+            rc = p2m_change_type_one(d, gfn, t, memtype[op->hvmmem_type]);
+
+        put_gfn(d, gfn);
+
+        if ( rc )
+            return rc;
+
+        /*
+         * Check for continuation every once in a while, and if it's not the
+         * last interation.
+         */
+        if ( op->nr > ++*iter && !(*iter & 0xff) &&
+             hypercall_preempt_check() )
+            return -ERESTART;
+    }
+
+    return 0;
+}
+
 long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xen_hvmctl_t) u_hvmctl)
 {
     xen_hvmctl_t op;
@@ -190,6 +254,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = modified_memory(d, &op.u.modified_memory, &op.opaque);
         break;
 
+    case XEN_HVMCTL_set_mem_type:
+        rc = set_mem_type(d, &op.u.set_mem_type, &op.opaque);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5215,31 +5215,11 @@ static int do_altp2m_op(
     return rc;
 }
 
-/*
- * Note that this value is effectively part of the ABI, even if we don't need
- * to make it a formal part of it: A guest suspended for migration in the
- * middle of a continuation would fail to work if resumed on a hypervisor
- * using a different value.
- */
-#define HVMOP_op_mask 0xff
-
 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
-    unsigned long start_iter, mask;
     long rc = 0;
 
-    switch ( op & HVMOP_op_mask )
-    {
-    default:
-        mask = ~0UL;
-        break;
-    case HVMOP_set_mem_type:
-        mask = HVMOP_op_mask;
-        break;
-    }
-
-    start_iter = op & ~mask;
-    switch ( op &= mask )
+    switch ( op )
     {
     case HVMOP_create_ioreq_server:
         rc = hvmop_create_ioreq_server(
@@ -5339,92 +5319,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
         break;
     }
 
-    case HVMOP_set_mem_type:
-    {
-        struct xen_hvm_set_mem_type a;
-        struct domain *d;
-        
-        /* Interface types to internal p2m types */
-        static const p2m_type_t memtype[] = {
-            [HVMMEM_ram_rw]  = p2m_ram_rw,
-            [HVMMEM_ram_ro]  = p2m_ram_ro,
-            [HVMMEM_mmio_dm] = p2m_mmio_dm,
-            [HVMMEM_unused] = p2m_invalid
-        };
-
-        if ( copy_from_guest(&a, arg, 1) )
-            return -EFAULT;
-
-        rc = rcu_lock_remote_domain_by_id(a.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto setmemtype_fail;
-
-        rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
-        if ( rc )
-            goto setmemtype_fail;
-
-        rc = -EINVAL;
-        if ( a.nr < start_iter ||
-             ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
-             ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
-            goto setmemtype_fail;
-            
-        if ( a.hvmmem_type >= ARRAY_SIZE(memtype) ||
-             unlikely(a.hvmmem_type == HVMMEM_unused) )
-            goto setmemtype_fail;
-
-        while ( a.nr > start_iter )
-        {
-            unsigned long pfn = a.first_pfn + start_iter;
-            p2m_type_t t;
-
-            get_gfn_unshare(d, pfn, &t);
-            if ( p2m_is_paging(t) )
-            {
-                put_gfn(d, pfn);
-                p2m_mem_paging_populate(d, pfn);
-                rc = -EAGAIN;
-                goto setmemtype_fail;
-            }
-            if ( p2m_is_shared(t) )
-            {
-                put_gfn(d, pfn);
-                rc = -EAGAIN;
-                goto setmemtype_fail;
-            }
-            if ( !p2m_is_ram(t) &&
-                 (!p2m_is_hole(t) || a.hvmmem_type != HVMMEM_mmio_dm) &&
-                 (t != p2m_mmio_write_dm || a.hvmmem_type != HVMMEM_ram_rw) )
-            {
-                put_gfn(d, pfn);
-                goto setmemtype_fail;
-            }
-
-            rc = p2m_change_type_one(d, pfn, t, memtype[a.hvmmem_type]);
-            put_gfn(d, pfn);
-            if ( rc )
-                goto setmemtype_fail;
-
-            /* Check for continuation if it's not the last interation */
-            if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
-                 hypercall_preempt_check() )
-            {
-                rc = -ERESTART;
-                goto setmemtype_fail;
-            }
-        }
-
-        rc = 0;
-
-    setmemtype_fail:
-        rcu_unlock_domain(d);
-        break;
-    }
-
     case HVMOP_pagetable_dying:
     {
         struct xen_hvm_pagetable_dying a;
@@ -5533,13 +5427,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
     }
     }
 
-    if ( rc == -ERESTART )
-    {
-        ASSERT(!(start_iter & mask));
-        rc = hypercall_create_continuation(__HYPERVISOR_hvm_op, "lh",
-                                           op | start_iter, arg);
-    }
-
     return rc;
 }
 
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -77,6 +77,18 @@ struct xen_hvm_modified_memory {
     uint64_aligned_t first_gfn;
 };
 
+/* XEN_HVMCTL_set_mem_type */
+/* Notify that a region of memory is to be treated in a specific way. */
+struct xen_hvm_set_mem_type {
+    /* Memory type. */
+    uint16_t hvmmem_type;
+    uint16_t rsvd;
+    /* Number of pages. */
+    uint32_t nr;
+    /* First GFN. */
+    uint64_aligned_t first_gfn;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -86,6 +98,7 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_set_pci_link_route            3
 #define XEN_HVMCTL_track_dirty_vram              4
 #define XEN_HVMCTL_modified_memory               5
+#define XEN_HVMCTL_set_mem_type                  6
     uint64_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
@@ -93,6 +106,7 @@ struct xen_hvmctl {
         struct xen_hvm_set_pci_link_route set_pci_link_route;
         struct xen_hvm_track_dirty_vram track_dirty_vram;
         struct xen_hvm_modified_memory modified_memory;
+        struct xen_hvm_set_mem_type set_mem_type;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -95,26 +95,6 @@ typedef enum {
 #endif
 } hvmmem_type_t;
 
-/* Following tools-only interfaces may change in future. */
-#if defined(__XEN__) || defined(__XEN_TOOLS__)
-
-#define HVMOP_set_mem_type    8
-/* Notify that a region of memory is to be treated in a specific way. */
-struct xen_hvm_set_mem_type {
-    /* Domain to be updated. */
-    domid_t domid;
-    /* Memory type */
-    uint16_t hvmmem_type;
-    /* Number of pages. */
-    uint32_t nr;
-    /* First pfn. */
-    uint64_aligned_t first_pfn;
-};
-typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
-
-#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
-
 /* Hint from PV drivers for pagetable destruction. */
 #define HVMOP_pagetable_dying        9
 struct xen_hvm_pagetable_dying {
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -272,7 +272,7 @@ class hvm
     cacheattr
 # XEN_HVMCTL_track_dirty_vram
     trackdirtyvram
-# XEN_HVMCTL_modified_memory, HVMOP_get_mem_type, HVMOP_set_mem_type,
+# XEN_HVMCTL_modified_memory, HVMOP_get_mem_type, XEN_HVMCTL_set_mem_type,
 # HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,
 # HVMOP_inject_trap
     hvmctl


Attachment: hvmctl-06.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.