[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 1/4] xen: move XEN_SYSCTL_physinfo, XEN_SYSCTL_numainfo and XEN_SYSCTL_topologyinfo to common code



Move XEN_SYSCTL_physinfo, XEN_SYSCTL_numainfo and
XEN_SYSCTL_topologyinfo from x86/sysctl.c to common/sysctl.c.

The implementation of XEN_SYSCTL_physinfo is mostly generic but needs to
fill in few arch specific details: introduce arch_do_physinfo to do that.

The implementation of XEN_SYSCTL_physinfo relies on two global
variables: total_pages and cpu_khz. Make them available on ARM.

Implement node_spanned_pages and __node_distance on ARM, assuming 1 numa
node for now.


Changes in v2:
- cpu_khz is khz while cntfrq is hz: take care of the conversion;
- rebased on 77d3a1db3196b1b5864469f8d3f41d496800c795.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 xen/arch/arm/mm.c        |    1 +
 xen/arch/arm/setup.c     |    2 +-
 xen/arch/arm/sysctl.c    |    2 +
 xen/arch/arm/time.c      |   10 ++--
 xen/arch/x86/sysctl.c    |  123 +++------------------------------------------
 xen/common/sysctl.c      |  109 ++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/mm.h |    3 +
 xen/include/xen/sched.h  |    2 +
 8 files changed, 132 insertions(+), 120 deletions(-)

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index acb6771..4d3073b 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -58,6 +58,7 @@ unsigned long frametable_base_mfn __read_mostly;
 unsigned long frametable_virt_end __read_mostly;
 
 unsigned long max_page;
+unsigned long total_pages;
 
 extern char __init_begin[], __init_end[];
 
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index 1e730a7..24dbe69 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -119,7 +119,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t 
dtb_size)
     ram_start = early_info.mem.bank[0].start;
     ram_size  = early_info.mem.bank[0].size;
     ram_end = ram_start + ram_size;
-    ram_pages = ram_size >> PAGE_SHIFT;
+    total_pages = ram_pages = ram_size >> PAGE_SHIFT;
 
     /*
      * Calculate the sizes for the heaps using these constraints:
diff --git a/xen/arch/arm/sysctl.c b/xen/arch/arm/sysctl.c
index a286abe..a5d9cf0 100644
--- a/xen/arch/arm/sysctl.c
+++ b/xen/arch/arm/sysctl.c
@@ -12,6 +12,8 @@
 #include <xen/errno.h>
 #include <public/sysctl.h>
 
+void arch_do_physinfo(xen_sysctl_physinfo_t *pi) { }
+
 long arch_do_sysctl(struct xen_sysctl *sysctl,
                     XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
 {
diff --git a/xen/arch/arm/time.c b/xen/arch/arm/time.c
index 07628e1..3dad9b3 100644
--- a/xen/arch/arm/time.c
+++ b/xen/arch/arm/time.c
@@ -43,16 +43,16 @@ uint64_t __read_mostly boot_count;
 
 /* For fine-grained timekeeping, we use the ARM "Generic Timer", a
  * register-mapped time source in the SoC. */
-static uint32_t __read_mostly cntfrq;      /* Ticks per second */
+unsigned long __read_mostly cpu_khz;  /* CPU clock frequency in kHz. */
 
 /*static inline*/ s_time_t ticks_to_ns(uint64_t ticks)
 {
-    return muldiv64(ticks, SECONDS(1), cntfrq);
+    return muldiv64(ticks, SECONDS(1), 1000 * cpu_khz);
 }
 
 /*static inline*/ uint64_t ns_to_ticks(s_time_t ns)
 {
-    return muldiv64(ns, cntfrq, SECONDS(1));
+    return muldiv64(ns, 1000 * cpu_khz, SECONDS(1));
 }
 
 /* TODO: On a real system the firmware would have set the frequency in
@@ -93,9 +93,9 @@ int __init init_xen_time(void)
     if ( (READ_CP32(ID_PFR1) & ID_PFR1_GT_MASK) != ID_PFR1_GT_v1 )
         panic("CPU does not support the Generic Timer v1 interface.\n");
 
-    cntfrq = READ_CP32(CNTFRQ);
+    cpu_khz = READ_CP32(CNTFRQ) / 1000;
     boot_count = READ_CP64(CNTPCT);
-    printk("Using generic timer at %"PRIu32" Hz\n", cntfrq);
+    printk("Using generic timer at %lu KHz\n", cpu_khz);
 
     return 0;
 }
diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c
index d0be4be..b4d3e32 100644
--- a/xen/arch/x86/sysctl.c
+++ b/xen/arch/x86/sysctl.c
@@ -57,6 +57,15 @@ long cpu_down_helper(void *data)
     return ret;
 }
 
+void arch_do_physinfo(xen_sysctl_physinfo_t *pi)
+{
+    memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
+    if ( hvm_enabled )
+        pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm;
+    if ( iommu_enabled )
+        pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
+}
+
 long arch_do_sysctl(
     struct xen_sysctl *sysctl, XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
 {
@@ -65,120 +74,6 @@ long arch_do_sysctl(
     switch ( sysctl->cmd )
     {
 
-    case XEN_SYSCTL_physinfo:
-    {
-        xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
-
-        memset(pi, 0, sizeof(*pi));
-        pi->threads_per_core =
-            cpumask_weight(per_cpu(cpu_sibling_mask, 0));
-        pi->cores_per_socket =
-            cpumask_weight(per_cpu(cpu_core_mask, 0)) / pi->threads_per_core;
-        pi->nr_cpus = num_online_cpus();
-        pi->nr_nodes = num_online_nodes();
-        pi->max_node_id = MAX_NUMNODES-1;
-        pi->max_cpu_id = nr_cpu_ids - 1;
-        pi->total_pages = total_pages;
-        pi->free_pages = avail_domheap_pages();
-        pi->scrub_pages = 0;
-        pi->cpu_khz = cpu_khz;
-        memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
-        if ( hvm_enabled )
-            pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm;
-        if ( iommu_enabled )
-            pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
-
-        if ( __copy_field_to_guest(u_sysctl, sysctl, u.physinfo) )
-            ret = -EFAULT;
-    }
-    break;
-        
-    case XEN_SYSCTL_topologyinfo:
-    {
-        uint32_t i, max_cpu_index, last_online_cpu;
-        xen_sysctl_topologyinfo_t *ti = &sysctl->u.topologyinfo;
-
-        last_online_cpu = cpumask_last(&cpu_online_map);
-        max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
-        ti->max_cpu_index = last_online_cpu;
-
-        for ( i = 0; i <= max_cpu_index; i++ )
-        {
-            if ( !guest_handle_is_null(ti->cpu_to_core) )
-            {
-                uint32_t core = cpu_online(i) ? cpu_to_core(i) : ~0u;
-                if ( copy_to_guest_offset(ti->cpu_to_core, i, &core, 1) )
-                    break;
-            }
-            if ( !guest_handle_is_null(ti->cpu_to_socket) )
-            {
-                uint32_t socket = cpu_online(i) ? cpu_to_socket(i) : ~0u;
-                if ( copy_to_guest_offset(ti->cpu_to_socket, i, &socket, 1) )
-                    break;
-            }
-            if ( !guest_handle_is_null(ti->cpu_to_node) )
-            {
-                uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
-                if ( copy_to_guest_offset(ti->cpu_to_node, i, &node, 1) )
-                    break;
-            }
-        }
-
-        ret = ((i <= max_cpu_index) ||
-               __copy_field_to_guest(u_sysctl, sysctl, u.topologyinfo))
-            ? -EFAULT : 0;
-    }
-    break;
-
-    case XEN_SYSCTL_numainfo:
-    {
-        uint32_t i, j, max_node_index, last_online_node;
-        xen_sysctl_numainfo_t *ni = &sysctl->u.numainfo;
-
-        last_online_node = last_node(node_online_map);
-        max_node_index = min_t(uint32_t, ni->max_node_index, last_online_node);
-        ni->max_node_index = last_online_node;
-
-        for ( i = 0; i <= max_node_index; i++ )
-        {
-            if ( !guest_handle_is_null(ni->node_to_memsize) )
-            {
-                uint64_t memsize = node_online(i) ? 
-                                   node_spanned_pages(i) << PAGE_SHIFT : 0ul;
-                if ( copy_to_guest_offset(ni->node_to_memsize, i, &memsize, 1) 
)
-                    break;
-            }
-            if ( !guest_handle_is_null(ni->node_to_memfree) )
-            {
-                uint64_t memfree = node_online(i) ? 
-                                   avail_node_heap_pages(i) << PAGE_SHIFT : 
0ul;
-                if ( copy_to_guest_offset(ni->node_to_memfree, i, &memfree, 1) 
)
-                    break;
-            }
-
-            if ( !guest_handle_is_null(ni->node_to_node_distance) )
-            {
-                for ( j = 0; j <= max_node_index; j++)
-                {
-                    uint32_t distance = ~0u;
-                    if ( node_online(i) && node_online(j) )
-                        distance = __node_distance(i, j);
-                    if ( copy_to_guest_offset(
-                        ni->node_to_node_distance, 
-                        i*(max_node_index+1) + j, &distance, 1) )
-                        break;
-                }
-                if ( j <= max_node_index )
-                    break;
-            }
-        }
-
-        ret = ((i <= max_node_index) ||
-               __copy_field_to_guest(u_sysctl, sysctl, u.numainfo))
-            ? -EFAULT : 0;
-    }
-    break;
-    
     case XEN_SYSCTL_cpu_hotplug:
     {
         unsigned int cpu = sysctl->u.cpu_hotplug.cpu;
diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c
index d663ed7..20bb864 100644
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -249,6 +249,115 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) 
u_sysctl)
         ret = sched_adjust_global(&op->u.scheduler_op);
         break;
 
+    case XEN_SYSCTL_physinfo:
+    {
+        xen_sysctl_physinfo_t *pi = &op->u.physinfo;
+
+        memset(pi, 0, sizeof(*pi));
+        pi->threads_per_core =
+            cpumask_weight(per_cpu(cpu_sibling_mask, 0));
+        pi->cores_per_socket =
+            cpumask_weight(per_cpu(cpu_core_mask, 0)) / pi->threads_per_core;
+        pi->nr_cpus = num_online_cpus();
+        pi->nr_nodes = num_online_nodes();
+        pi->max_node_id = MAX_NUMNODES-1;
+        pi->max_cpu_id = nr_cpu_ids - 1;
+        pi->total_pages = total_pages;
+        pi->free_pages = avail_domheap_pages();
+        pi->scrub_pages = 0;
+        pi->cpu_khz = cpu_khz;
+        arch_do_physinfo(pi);
+
+        if ( copy_to_guest(u_sysctl, op, 1) )
+            ret = -EFAULT;
+    }
+    break;
+
+    case XEN_SYSCTL_numainfo:
+    {
+        uint32_t i, j, max_node_index, last_online_node;
+        xen_sysctl_numainfo_t *ni = &op->u.numainfo;
+
+        last_online_node = last_node(node_online_map);
+        max_node_index = min_t(uint32_t, ni->max_node_index, last_online_node);
+        ni->max_node_index = last_online_node;
+
+        for ( i = 0; i <= max_node_index; i++ )
+        {
+            if ( !guest_handle_is_null(ni->node_to_memsize) )
+            {
+                uint64_t memsize = node_online(i) ?
+                                   node_spanned_pages(i) << PAGE_SHIFT : 0ul;
+                if ( copy_to_guest_offset(ni->node_to_memsize, i, &memsize, 1) 
)
+                    break;
+            }
+            if ( !guest_handle_is_null(ni->node_to_memfree) )
+            {
+                uint64_t memfree = node_online(i) ?
+                                   avail_node_heap_pages(i) << PAGE_SHIFT : 
0ul;
+                if ( copy_to_guest_offset(ni->node_to_memfree, i, &memfree, 1) 
)
+                    break;
+            }
+
+            if ( !guest_handle_is_null(ni->node_to_node_distance) )
+            {
+                for ( j = 0; j <= max_node_index; j++)
+                {
+                    uint32_t distance = ~0u;
+                    if ( node_online(i) && node_online(j) )
+                        distance = __node_distance(i, j);
+                    if ( copy_to_guest_offset(
+                        ni->node_to_node_distance,
+                        i*(max_node_index+1) + j, &distance, 1) )
+                        break;
+                }
+                if ( j <= max_node_index )
+                    break;
+            }
+        }
+
+        ret = ((i <= max_node_index) || copy_to_guest(u_sysctl, op, 1))
+            ? -EFAULT : 0;
+    }
+    break;
+
+    case XEN_SYSCTL_topologyinfo:
+    {
+        uint32_t i, max_cpu_index, last_online_cpu;
+        xen_sysctl_topologyinfo_t *ti = &op->u.topologyinfo;
+
+        last_online_cpu = cpumask_last(&cpu_online_map);
+        max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
+        ti->max_cpu_index = last_online_cpu;
+
+        for ( i = 0; i <= max_cpu_index; i++ )
+        {
+            if ( !guest_handle_is_null(ti->cpu_to_core) )
+            {
+                uint32_t core = cpu_online(i) ? cpu_to_core(i) : ~0u;
+                if ( copy_to_guest_offset(ti->cpu_to_core, i, &core, 1) )
+                    break;
+            }
+            if ( !guest_handle_is_null(ti->cpu_to_socket) )
+            {
+                uint32_t socket = cpu_online(i) ? cpu_to_socket(i) : ~0u;
+                if ( copy_to_guest_offset(ti->cpu_to_socket, i, &socket, 1) )
+                    break;
+            }
+            if ( !guest_handle_is_null(ti->cpu_to_node) )
+            {
+                uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
+                if ( copy_to_guest_offset(ti->cpu_to_node, i, &node, 1) )
+                    break;
+            }
+        }
+
+        ret = ((i <= max_cpu_index) || copy_to_guest(u_sysctl, op, 1))
+            ? -EFAULT : 0;
+    }
+    break;
+
+
     default:
         ret = arch_do_sysctl(op, u_sysctl);
         copyback = 0;
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 4ed5df6..96b36c2 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -136,6 +136,9 @@ extern unsigned long frametable_base_mfn;
 
 extern unsigned long max_page;
 extern unsigned long total_pages;
+/* XXX: implement NUMA support */
+#define node_spanned_pages(nid)        (total_pages)
+#define __node_distance(a, b) (20)
 
 /* Boot-time pagetable setup */
 extern void setup_pagetables(unsigned long boot_phys_offset, paddr_t 
xen_paddr);
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 90a6537..ba0f2f8 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -748,6 +748,8 @@ extern void dump_runq(unsigned char key);
 
 #define num_cpupool_cpus(c) cpumask_weight((c)->cpu_valid)
 
+void arch_do_physinfo(xen_sysctl_physinfo_t *pi);
+
 #endif /* __SCHED_H__ */
 
 /*
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.