[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH for-4.15] dmop: Add XEN_DMOP_nr_vcpus



Curiously absent from the stable API/ABIs is an ability to query the number of
vcpus which a domain has.  Emulators need to know this information in
particular to know how many stuct ioreq's live in the ioreq server mappings.

In practice, this forces all userspace to link against libxenctrl to use
xc_domain_getinfo(), which rather defeats the purpose of the stable libraries.

Introduce a DMOP to retrieve this information and surface it in
libxendevicemodel to help emulators shed their use of unstable interfaces.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Paul Durrant <paul@xxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien@xxxxxxx>
CC: Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
CC: Ian Jackson <iwj@xxxxxxxxxxxxxx>

For 4.15.  This was a surprise discovery in the massive ABI untangling effort
I'm currently doing for XenServer's new build system.

This is one new read-only op to obtain information which isn't otherwise
available under a stable API/ABI.  As such, its risk for 4.15 is very low,
with a very real quality-of-life improvement for downstreams.

I realise this is technically a new feature and we're long past feature
freeze, but I'm hoping that "really lets some emulators move off the unstable
libraries" is sufficiently convincing argument.

It's not sufficient to let Qemu move off unstable libraries yet - at a
minimum, the add_to_phymap hypercalls need stabilising to support PCI
Passthrough and BAR remapping.

I'd prefer not to duplicate the op handling between ARM and x86, and if this
weren't a release window, I'd submit a prereq patch to dedup the common dmop
handling.  That can wait to 4.16 at this point.  Also, this op ought to work
against x86 PV guests, but fixing that up will also need this rearrangement
into common code, so needs to wait.
---
 tools/include/xendevicemodel.h               | 10 ++++++++++
 tools/libs/devicemodel/core.c                | 15 +++++++++++++++
 tools/libs/devicemodel/libxendevicemodel.map |  1 +
 xen/arch/arm/dm.c                            | 10 ++++++++++
 xen/arch/x86/hvm/dm.c                        | 11 +++++++++++
 xen/include/public/hvm/dm_op.h               | 15 +++++++++++++++
 xen/include/xlat.lst                         |  1 +
 7 files changed, 63 insertions(+)

diff --git a/tools/include/xendevicemodel.h b/tools/include/xendevicemodel.h
index c06b3c84b9..33698d67f3 100644
--- a/tools/include/xendevicemodel.h
+++ b/tools/include/xendevicemodel.h
@@ -358,6 +358,16 @@ int xendevicemodel_pin_memory_cacheattr(
     uint32_t type);
 
 /**
+ * Query for the number of vCPUs that a domain has.
+ * @parm dmod a handle to an open devicemodel interface.
+ * @parm domid the domain id to be serviced.
+ * @parm vcpus Number of vcpus.
+ * @return 0 on success and fills @p vcpus, or -1 on failure.
+ */
+int xendevicemodel_nr_vcpus(
+    xendevicemodel_handle *dmod, domid_t domid, unsigned int *vcpus);
+
+/**
  * This function restricts the use of this handle to the specified
  * domain.
  *
diff --git a/tools/libs/devicemodel/core.c b/tools/libs/devicemodel/core.c
index 30bd79f8ba..8e619eeb0a 100644
--- a/tools/libs/devicemodel/core.c
+++ b/tools/libs/devicemodel/core.c
@@ -630,6 +630,21 @@ int xendevicemodel_pin_memory_cacheattr(
     return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
 }
 
+int xendevicemodel_nr_vcpus(
+    xendevicemodel_handle *dmod, domid_t domid, unsigned int *vcpus)
+{
+    struct xen_dm_op op = {
+        .op = XEN_DMOP_nr_vcpus,
+    };
+
+    int rc = xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
+    if ( rc )
+        return rc;
+
+    *vcpus = op.u.nr_vcpus.vcpus;
+    return 0;
+}
+
 int xendevicemodel_restrict(xendevicemodel_handle *dmod, domid_t domid)
 {
     return osdep_xendevicemodel_restrict(dmod, domid);
diff --git a/tools/libs/devicemodel/libxendevicemodel.map 
b/tools/libs/devicemodel/libxendevicemodel.map
index 733549327b..f7f9e3d932 100644
--- a/tools/libs/devicemodel/libxendevicemodel.map
+++ b/tools/libs/devicemodel/libxendevicemodel.map
@@ -42,4 +42,5 @@ VERS_1.3 {
 VERS_1.4 {
        global:
                xendevicemodel_set_irq_level;
+               xendevicemodel_nr_vcpus;
 } VERS_1.3;
diff --git a/xen/arch/arm/dm.c b/xen/arch/arm/dm.c
index 785413372c..d689e336fd 100644
--- a/xen/arch/arm/dm.c
+++ b/xen/arch/arm/dm.c
@@ -38,6 +38,7 @@ int dm_op(const struct dmop_args *op_args)
         [XEN_DMOP_set_ioreq_server_state]           = sizeof(struct 
xen_dm_op_set_ioreq_server_state),
         [XEN_DMOP_destroy_ioreq_server]             = sizeof(struct 
xen_dm_op_destroy_ioreq_server),
         [XEN_DMOP_set_irq_level]                    = sizeof(struct 
xen_dm_op_set_irq_level),
+        [XEN_DMOP_nr_vcpus]                         = sizeof(struct 
xen_dm_op_nr_vcpus),
     };
 
     rc = rcu_lock_remote_domain_by_id(op_args->domid, &d);
@@ -122,6 +123,15 @@ int dm_op(const struct dmop_args *op_args)
         break;
     }
 
+    case XEN_DMOP_nr_vcpus:
+    {
+        struct xen_dm_op_nr_vcpus *data = &op.u.nr_vcpus;
+
+        data->vcpus = d->max_vcpus;
+        rc = 0;
+        break;
+    }
+
     default:
         rc = ioreq_server_dm_op(&op, d, &const_op);
         break;
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 612749442e..f4f0910463 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -359,6 +359,7 @@ int dm_op(const struct dmop_args *op_args)
         [XEN_DMOP_remote_shutdown]                  = sizeof(struct 
xen_dm_op_remote_shutdown),
         [XEN_DMOP_relocate_memory]                  = sizeof(struct 
xen_dm_op_relocate_memory),
         [XEN_DMOP_pin_memory_cacheattr]             = sizeof(struct 
xen_dm_op_pin_memory_cacheattr),
+        [XEN_DMOP_nr_vcpus]                         = sizeof(struct 
xen_dm_op_nr_vcpus),
     };
 
     rc = rcu_lock_remote_domain_by_id(op_args->domid, &d);
@@ -606,6 +607,15 @@ int dm_op(const struct dmop_args *op_args)
         break;
     }
 
+    case XEN_DMOP_nr_vcpus:
+    {
+        struct xen_dm_op_nr_vcpus *data = &op.u.nr_vcpus;
+
+        data->vcpus = d->max_vcpus;
+        rc = 0;
+        break;
+    }
+
     default:
         rc = ioreq_server_dm_op(&op, d, &const_op);
         break;
@@ -641,6 +651,7 @@ CHECK_dm_op_map_mem_type_to_ioreq_server;
 CHECK_dm_op_remote_shutdown;
 CHECK_dm_op_relocate_memory;
 CHECK_dm_op_pin_memory_cacheattr;
+CHECK_dm_op_nr_vcpus;
 
 int compat_dm_op(domid_t domid,
                  unsigned int nr_bufs,
diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
index 1f70d58caa..ee97997238 100644
--- a/xen/include/public/hvm/dm_op.h
+++ b/xen/include/public/hvm/dm_op.h
@@ -449,6 +449,20 @@ struct xen_dm_op_set_irq_level {
 };
 typedef struct xen_dm_op_set_irq_level xen_dm_op_set_irq_level_t;
 
+/*
+ * XEN_DMOP_nr_vcpus: Query the number of vCPUs a domain has.
+ *
+ * The number of vcpus a domain has is fixed from creation time.  This bound
+ * is applicable to e.g. the vcpuid parameter of XEN_DMOP_inject_event, or
+ * number of struct ioreq objects mapped via XENMEM_acquire_resource.
+ */
+#define XEN_DMOP_nr_vcpus 20
+
+struct xen_dm_op_nr_vcpus {
+    uint32_t vcpus; /* OUT */
+};
+typedef struct xen_dm_op_nr_vcpus xen_dm_op_nr_vcpus_t;
+
 struct xen_dm_op {
     uint32_t op;
     uint32_t pad;
@@ -472,6 +486,7 @@ struct xen_dm_op {
         xen_dm_op_remote_shutdown_t remote_shutdown;
         xen_dm_op_relocate_memory_t relocate_memory;
         xen_dm_op_pin_memory_cacheattr_t pin_memory_cacheattr;
+        xen_dm_op_nr_vcpus_t nr_vcpus;
     } u;
 };
 
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 398993d5f4..cbbd20c958 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -107,6 +107,7 @@
 ?      dm_op_set_pci_intx_level        hvm/dm_op.h
 ?      dm_op_set_pci_link_route        hvm/dm_op.h
 ?      dm_op_track_dirty_vram          hvm/dm_op.h
+?      dm_op_nr_vcpus                  hvm/dm_op.h
 !      hvm_altp2m_set_mem_access_multi hvm/hvm_op.h
 ?      vcpu_hvm_context                hvm/hvm_vcpu.h
 ?      vcpu_hvm_x86_32                 hvm/hvm_vcpu.h
-- 
2.11.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.