[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 5/6] xen, tools: calculate nr_cpus via num_online_cpus



Once Xen calculates nr_nodes properly, all nr_cpu calculations based on
nr_nodes * sockets_per_node * cores_per_socket * threads_per_core are
broken.  The easy fix is to replace those calculations with a new field,
nr_cpus in physinfo which is calculated by num_online_cpus().  This
patch does so and attempts to change all users over to nr_cpus field in
physinfo.  This patch touches arch/ia64/xen/dom0_ops.c, but I've not done
any IA64 testing with this patch applied.

-- 
Ryan Harper
Software Engineer; Linux Technology Center
IBM Corp., Austin, Tx
(512) 838-9253   T/L: 678-9253
ryanh@xxxxxxxxxx


diffstat output:
 tools/python/xen/lowlevel/xc/xc.c      |    3 ++-
 tools/python/xen/xend/XendNode.py      |    4 ----
 tools/xenmon/xenbaked.c                |    5 +----
 tools/xenstat/libxenstat/src/xenstat.c |    4 +---
 tools/xentrace/xentrace.c              |    5 +----
 xen/arch/ia64/xen/dom0_ops.c           |    1 +
 xen/arch/x86/dom0_ops.c                |    2 +-
 xen/include/public/dom0_ops.h          |    1 +
 8 files changed, 8 insertions(+), 17 deletions(-)

Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx>
---
diff -r 51045f276c90 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Mon Jul 31 10:48:48 2006 -0500
+++ b/tools/python/xen/lowlevel/xc/xc.c Mon Jul 31 10:49:10 2006 -0500
@@ -631,10 +631,11 @@ static PyObject *pyxc_physinfo(XcObject 
     if(q>cpu_cap)
         *(q-1)=0;
 
-    ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s}",
+    ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s}",
                             "threads_per_core", info.threads_per_core,
                             "cores_per_socket", info.cores_per_socket,
                             "sockets_per_node", info.sockets_per_node,
+                            "nr_cpus"         , info.nr_cpus,
                             "total_memory",     pages_to_kib(info.total_pages),
                             "free_memory",      pages_to_kib(info.free_pages),
                             "scrub_memory",     pages_to_kib(info.scrub_pages),
diff -r 51045f276c90 tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py Mon Jul 31 10:48:48 2006 -0500
+++ b/tools/python/xen/xend/XendNode.py Mon Jul 31 10:49:10 2006 -0500
@@ -122,10 +122,6 @@ class XendNode:
     def physinfo(self):
         info = self.xc.physinfo()
 
-        info['nr_cpus'] = (info['nr_nodes'] *
-                           info['sockets_per_node'] *
-                           info['cores_per_socket'] *
-                           info['threads_per_core'])
         info['cpu_mhz'] = info['cpu_khz'] / 1000
         # physinfo is in KiB
         info['total_memory'] = info['total_memory'] / 1024
diff -r 51045f276c90 tools/xenmon/xenbaked.c
--- a/tools/xenmon/xenbaked.c   Mon Jul 31 10:48:48 2006 -0500
+++ b/tools/xenmon/xenbaked.c   Mon Jul 31 10:49:10 2006 -0500
@@ -462,10 +462,7 @@ unsigned int get_num_cpus(void)
     xc_interface_close(xc_handle);
     opts.cpu_freq = (double)op.u.physinfo.cpu_khz/1000.0;
 
-    return (op.u.physinfo.threads_per_core *
-            op.u.physinfo.cores_per_socket *
-            op.u.physinfo.sockets_per_node *
-            op.u.physinfo.nr_nodes);
+    return op.u.physinfo.nr_cpus;
 }
 
 
diff -r 51045f276c90 tools/xenstat/libxenstat/src/xenstat.c
--- a/tools/xenstat/libxenstat/src/xenstat.c    Mon Jul 31 10:48:48 2006 -0500
+++ b/tools/xenstat/libxenstat/src/xenstat.c    Mon Jul 31 10:49:10 2006 -0500
@@ -226,9 +226,7 @@ xenstat_node *xenstat_get_node(xenstat_h
        }
 
        node->cpu_hz = ((unsigned long long)physinfo.cpu_khz) * 1000ULL;
-       node->num_cpus =
-           (physinfo.threads_per_core * physinfo.cores_per_socket *
-            physinfo.sockets_per_node * physinfo.nr_nodes);
+       node->num_cpus = physinfo.nr_cpus;
        node->tot_mem = ((unsigned long long)physinfo.total_pages)
            * handle->page_size;
        node->free_mem = ((unsigned long long)physinfo.free_pages)
diff -r 51045f276c90 tools/xentrace/xentrace.c
--- a/tools/xentrace/xentrace.c Mon Jul 31 10:48:48 2006 -0500
+++ b/tools/xentrace/xentrace.c Mon Jul 31 10:49:10 2006 -0500
@@ -265,10 +265,7 @@ unsigned int get_num_cpus(void)
 
     xc_interface_close(xc_handle);
 
-    return (physinfo.threads_per_core *
-            physinfo.cores_per_socket *
-            physinfo.sockets_per_node *
-            physinfo.nr_nodes);
+    return physinfo.nr_cpus;
 }
 
 
diff -r 51045f276c90 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Mon Jul 31 10:48:48 2006 -0500
+++ b/xen/arch/ia64/xen/dom0_ops.c      Mon Jul 31 10:49:10 2006 -0500
@@ -203,6 +203,7 @@ long arch_do_dom0_op(dom0_op_t *op, XEN_
             cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
         pi->sockets_per_node = 
             num_online_cpus() / cpus_weight(cpu_core_map[0]);
+        pi->nr_cpus          = (u32)num_online_cpus();
         pi->nr_nodes         = 1;
         pi->total_pages      = total_pages; 
         pi->free_pages       = avail_domheap_pages();
diff -r 51045f276c90 xen/arch/x86/dom0_ops.c
--- a/xen/arch/x86/dom0_ops.c   Mon Jul 31 10:48:48 2006 -0500
+++ b/xen/arch/x86/dom0_ops.c   Mon Jul 31 10:49:10 2006 -0500
@@ -193,7 +193,7 @@ long arch_do_dom0_op(struct dom0_op *op,
             cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
         pi->sockets_per_node = 
             num_online_cpus() / cpus_weight(cpu_core_map[0]);
-
+        pi->nr_cpus          = (u32)num_online_cpus();
         pi->total_pages      = total_pages;
         pi->free_pages       = avail_domheap_pages();
         pi->scrub_pages      = avail_scrub_pages();
diff -r 51045f276c90 xen/include/public/dom0_ops.h
--- a/xen/include/public/dom0_ops.h     Mon Jul 31 10:48:48 2006 -0500
+++ b/xen/include/public/dom0_ops.h     Mon Jul 31 10:49:10 2006 -0500
@@ -229,6 +229,7 @@ struct dom0_physinfo {
     uint32_t cores_per_socket;
     uint32_t sockets_per_node;
     uint32_t nr_nodes;
+    uint32_t nr_cpus;
     uint32_t cpu_khz;
     uint64_t total_pages;
     uint64_t free_pages;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.