[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/3] libxl: update vcpus bitmap in retrieved guest config



... because the available vcpu bitmap can change during domain life time
due to cpu hotplug and unplug.

For QEMU upstream, we interrogate QEMU for the number of vcpus. For
others, we look directly into xenstore for information.

Reported-by: Jan Beulich <jbeulich@xxxxxxxx>
Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 tools/libxl/libxl.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 91 insertions(+)

diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 006b83f..4f8b663 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -7222,6 +7222,57 @@ void libxl_mac_copy(libxl_ctx *ctx, libxl_mac *dst, 
libxl_mac *src)
         (*dst)[i] = (*src)[i];
 }
 
+static int libxl__update_avail_vcpus_qmp(libxl__gc *gc, uint32_t domid,
+                                         unsigned int max_vcpus,
+                                         libxl_bitmap *map)
+{
+    unsigned int count, i;
+    int rc;
+
+    /* For QEMU upstream we always need to return the number
+     * of cpus present to QEMU whether they are online or not;
+     * otherwise QEMU won't accept the saved state.
+     */
+    rc = libxl__qmp_query_cpus(gc, domid, &count);
+    if (rc) {
+        LOG(ERROR, "fail to get number of cpus for domain %d", domid);
+        goto out;
+    }
+
+    for (i = 0; i < count; i++)
+        libxl_bitmap_set(map, i);
+
+    rc = 0;
+out:
+    return rc;
+}
+
+static int libxl__update_avail_vcpus_xenstore(libxl__gc *gc, uint32_t domid,
+                                              unsigned int max_vcpus,
+                                              libxl_bitmap *map)
+{
+    int rc;
+    unsigned int i;
+    const char *dompath;
+
+    dompath = libxl__xs_get_dompath(gc, domid);
+    if (!dompath) {
+        rc = ERROR_FAIL;
+        goto out;
+    }
+
+    for (i = 0; i < max_vcpus; i++) {
+        const char *path = GCSPRINTF("%s/cpu/%u/availability", dompath, i);
+        const char *content = libxl__xs_read(gc, XBT_NULL, path);
+        if (!strncmp(content, "online", strlen("online")))
+            libxl_bitmap_set(map, i);
+    }
+
+    rc = 0;
+out:
+    return rc;
+}
+
 int libxl_retrieve_domain_configuration(libxl_ctx *ctx, uint32_t domid,
                                         libxl_domain_config *d_config)
 {
@@ -7270,6 +7321,46 @@ int libxl_retrieve_domain_configuration(libxl_ctx *ctx, 
uint32_t domid,
         libxl_dominfo_dispose(&info);
     }
 
+    /* VCPUs */
+    {
+        libxl_bitmap *map = &d_config->b_info.avail_vcpus;
+        unsigned int max_vcpus = d_config->b_info.max_vcpus;
+
+        libxl_bitmap_dispose(map);
+        libxl_bitmap_init(map);
+        libxl_bitmap_alloc(CTX, map, max_vcpus);
+        libxl_bitmap_set_none(map);
+
+        switch (d_config->b_info.type) {
+        case LIBXL_DOMAIN_TYPE_HVM:
+            switch (d_config->b_info.device_model_version) {
+            case LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN:
+                rc = libxl__update_avail_vcpus_qmp(gc, domid,
+                                                   max_vcpus, map);
+                break;
+            case LIBXL_DEVICE_MODEL_VERSION_QEMU_XEN_TRADITIONAL:
+            case LIBXL_DEVICE_MODEL_VERSION_NONE:
+                rc = libxl__update_avail_vcpus_xenstore(gc, domid,
+                                                        max_vcpus, map);
+                break;
+            default:
+            abort();
+            }
+            break;
+        case LIBXL_DOMAIN_TYPE_PV:
+            rc = libxl__update_avail_vcpus_xenstore(gc, domid,
+                                                    max_vcpus, map);
+            break;
+        default:
+            abort();
+        }
+
+        if (rc) {
+            LOG(ERROR, "fail to update available cpu map for domain %d", 
domid);
+            goto out;
+        }
+    }
+
     /* Memory limits:
      *
      * Currently there are three memory limits:
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.