[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC 5/8] libxl: introduce smt field



By setting it to true, it enables the vcpus set by the guest
to be seen as a SMT-enabled topology. It uses then
libxl__cpuid_set_topology() to change the cpuid accordingly.
This setting is made *before* the cpuid is set so that
any changes could be overwritten. The number of SMT threads
are the ones supported by the host. And for that it adds
another helper routine libxl__count_threads_per_core to fetch
it.

This feature is useful in HT-enabled hosts so that hard
pinned domains can see the right topology (i.e. SMT cores exposed
as SMT cores) which in return would lead guest scheduler making
better decisions.

Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx>
---
CC: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
CC: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
CC: Ian Campbell <ian.campbell@xxxxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 tools/libxl/libxl_create.c   |  2 ++
 tools/libxl/libxl_dom.c      | 10 ++++++++++
 tools/libxl/libxl_internal.h |  1 +
 tools/libxl/libxl_types.idl  |  1 +
 tools/libxl/libxl_utils.c    | 17 +++++++++++++++++
 5 files changed, 31 insertions(+)

diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
index de5d27f..dac15d5 100644
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -195,6 +195,8 @@ int libxl__domain_build_info_setdefault(libxl__gc *gc,
         b_info->num_vcpu_hard_affinity = b_info->max_vcpus;
     }
 
+    libxl_defbool_setdefault(&b_info->smt, false);
+
     libxl_defbool_setdefault(&b_info->numa_placement, true);
 
     if (b_info->max_memkb == LIBXL_MEMKB_DEFAULT)
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index 2269998..ff9356d 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -507,6 +507,16 @@ int libxl__build_post(libxl__gc *gc, uint32_t domid,
     }
 
     libxl_cpuid_apply_policy(ctx, domid);
+    if (info->type == LIBXL_DOMAIN_TYPE_HVM
+        && libxl_defbool_val(info->smt)) {
+
+        uint32_t threads = 0;
+
+        if (!libxl__count_threads_per_core(gc, &threads))
+            libxl__cpuid_set_topology(ctx, domid,
+                                      info->max_vcpus / threads, threads);
+    }
+
     if (info->cpuid != NULL)
         libxl_cpuid_set(ctx, domid, info->cpuid);
 
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index 903ad7b..7cc4de7 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -4033,6 +4033,7 @@ void libxl__bitmap_copy_best_effort(libxl__gc *gc, 
libxl_bitmap *dptr,
                                     const libxl_bitmap *sptr);
 
 int libxl__count_physical_sockets(libxl__gc *gc, int *sockets);
+int libxl__count_threads_per_core(libxl__gc *gc, uint32_t *threads);
 
 
 #define LIBXL_QEMU_USER_PREFIX "xen-qemuuser"
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
index f04279e..fa4725a 100644
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -421,6 +421,7 @@ libxl_domain_build_info = Struct("domain_build_info",[
     ("nodemap",         libxl_bitmap),
     ("vcpu_hard_affinity", Array(libxl_bitmap, "num_vcpu_hard_affinity")),
     ("vcpu_soft_affinity", Array(libxl_bitmap, "num_vcpu_soft_affinity")),
+    ("smt",             libxl_defbool),
     ("numa_placement",  libxl_defbool),
     ("tsc_mode",        libxl_tsc_mode),
     ("max_memkb",       MemKB),
diff --git a/tools/libxl/libxl_utils.c b/tools/libxl/libxl_utils.c
index e42422a..dd063c8 100644
--- a/tools/libxl/libxl_utils.c
+++ b/tools/libxl/libxl_utils.c
@@ -861,6 +861,23 @@ int libxl__count_physical_sockets(libxl__gc *gc, int 
*sockets)
     return 0;
 }
 
+int libxl__count_threads_per_core(libxl__gc *gc, uint32_t *threads)
+{
+    int rc;
+    libxl_physinfo info;
+
+    libxl_physinfo_init(&info);
+
+    rc = libxl_get_physinfo(CTX, &info);
+    if (rc)
+        return rc;
+
+    *threads = info.threads_per_core;
+
+    libxl_physinfo_dispose(&info);
+    return 0;
+}
+
 int libxl_socket_bitmap_alloc(libxl_ctx *ctx, libxl_bitmap *socketmap,
                               int max_sockets)
 {
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.