[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v11 2/5] tools/libxl: add command to show PSR hardware info



Add dedicated one to show hardware information.

[root@vmm-psr]xl psr-hwinfo
Cache Monitoring Technology (CMT):
Enabled         : 1
Total RMID      : 63
Supported monitor types:
cache-occupancy
total-mem-bandwidth
local-mem-bandwidth

Signed-off-by: Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx>
Reviewed-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
Changes in v6:
* Add SWITCH_FOREACH_OPT to make '-h' work.
---
 docs/man/xl.pod.1         |  4 ++++
 tools/libxl/xl.h          |  1 +
 tools/libxl/xl_cmdimpl.c  | 41 +++++++++++++++++++++++++++++++++++++++++
 tools/libxl/xl_cmdtable.c |  5 +++++
 4 files changed, 51 insertions(+)

diff --git a/docs/man/xl.pod.1 b/docs/man/xl.pod.1
index 4eb929d..cebec46 100644
--- a/docs/man/xl.pod.1
+++ b/docs/man/xl.pod.1
@@ -1502,6 +1502,10 @@ for any of these monitoring types.
 
 =over 4
 
+=item B<psr-hwinfo>
+
+Show CMT hardware information.
+
 =item B<psr-cmt-attach> [I<domain-id>]
 
 attach: Attach the platform shared resource monitoring service to a domain.
diff --git a/tools/libxl/xl.h b/tools/libxl/xl.h
index 5bc138c..7b56449 100644
--- a/tools/libxl/xl.h
+++ b/tools/libxl/xl.h
@@ -113,6 +113,7 @@ int main_remus(int argc, char **argv);
 #endif
 int main_devd(int argc, char **argv);
 #ifdef LIBXL_HAVE_PSR_CMT
+int main_psr_hwinfo(int argc, char **argv);
 int main_psr_cmt_attach(int argc, char **argv);
 int main_psr_cmt_detach(int argc, char **argv);
 int main_psr_cmt_show(int argc, char **argv);
diff --git a/tools/libxl/xl_cmdimpl.c b/tools/libxl/xl_cmdimpl.c
index 5cc9dfd..922e97a 100644
--- a/tools/libxl/xl_cmdimpl.c
+++ b/tools/libxl/xl_cmdimpl.c
@@ -8113,6 +8113,36 @@ out:
 }
 
 #ifdef LIBXL_HAVE_PSR_CMT
+static int psr_cmt_hwinfo(void)
+{
+    int rc;
+    int enabled;
+    uint32_t total_rmid;
+
+    printf("Cache Monitoring Technology (CMT):\n");
+
+    enabled = libxl_psr_cmt_enabled(ctx);
+    printf("%-16s: %s\n", "Enabled", enabled ? "1" : "0");
+    if (!enabled)
+        return 0;
+
+    rc = libxl_psr_cmt_get_total_rmid(ctx, &total_rmid);
+    if (rc) {
+        fprintf(stderr, "Failed to get max RMID value\n");
+        return rc;
+    }
+    printf("%-16s: %u\n", "Total RMID", total_rmid);
+
+    printf("Supported monitor types:\n");
+    if (libxl_psr_cmt_type_supported(ctx, LIBXL_PSR_CMT_TYPE_CACHE_OCCUPANCY))
+        printf("cache-occupancy\n");
+    if (libxl_psr_cmt_type_supported(ctx, LIBXL_PSR_CMT_TYPE_TOTAL_MEM_COUNT))
+        printf("total-mem-bandwidth\n");
+    if (libxl_psr_cmt_type_supported(ctx, LIBXL_PSR_CMT_TYPE_LOCAL_MEM_COUNT))
+        printf("local-mem-bandwidth\n");
+
+    return rc;
+}
 
 #define MBM_SAMPLE_RETRY_MAX 4
 static int psr_cmt_get_mem_bandwidth(uint32_t domid,
@@ -8279,6 +8309,17 @@ static int psr_cmt_show(libxl_psr_cmt_type type, 
uint32_t domid)
     return 0;
 }
 
+int main_psr_hwinfo(int argc, char **argv)
+{
+    int opt;
+
+    SWITCH_FOREACH_OPT(opt, "", NULL, "psr-hwinfo", 0) {
+        /* No options */
+    }
+
+    return psr_cmt_hwinfo();
+}
+
 int main_psr_cmt_attach(int argc, char **argv)
 {
     uint32_t domid;
diff --git a/tools/libxl/xl_cmdtable.c b/tools/libxl/xl_cmdtable.c
index 12899d1..77a37c5 100644
--- a/tools/libxl/xl_cmdtable.c
+++ b/tools/libxl/xl_cmdtable.c
@@ -525,6 +525,11 @@ struct cmd_spec cmd_table[] = {
       "-F                      Run in the foreground",
     },
 #ifdef LIBXL_HAVE_PSR_CMT
+    { "psr-hwinfo",
+      &main_psr_hwinfo, 0, 1,
+      "Show hardware information for Platform Shared Resource",
+      "",
+    },
     { "psr-cmt-attach",
       &main_psr_cmt_attach, 0, 1,
       "Attach Cache Monitoring Technology service to a domain",
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.