Hi,
when booting unstable with numa=on, Xen reports the node_to_cpu relation
improperly (test with xm info).
The physinfo sysctl has input parameters (described in public/sysctl.h),
which obviously should not be clobbered with memset before being used in
sysctl.c (introduced in c/s 17336). Attached patch fixed this by saving
the input values before cleaning the structure. This does not only
effect the more or less cosmetic xm info output, but also for instance
the automatic NUMA placement code which stops with an error.
IA64 branch not tested (cross compiler project is stalled for the moment
;-(.
Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>
Regards,
Andre
--
Andre Przywara
AMD-Operating System Research Center (OSRC), Dresden, Germany
Tel: +49 351 277-84917
----to satisfy European Law for business letters:
AMD Saxony Limited Liability Company & Co. KG,
Wilschdorfer Landstr. 101, 01109 Dresden, Germany
Register Court Dresden: HRA 4896, General Partner authorized
to represent: AMD Saxony LLC (Wilmington, Delaware, US)
General Manager of AMD Saxony LLC: Dr. Hans-R. Deppe, Thomas McCoy
diff -r 0a8fc1a62796 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c Mon May 12 11:19:09 2008 +0100
+++ b/xen/arch/ia64/xen/dom0_ops.c Tue May 13 22:10:42 2008 +0200
@@ -407,10 +407,15 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
{
int i;
uint32_t max_array_ent;
+ XEN_GUEST_HANDLE_64(uint32) saved_cpu_to_node;
xen_sysctl_physinfo_t *pi = &op->u.physinfo;
+ max_array_ent = pi->max_cpu_id;
+ saved_cpu_to_node = pi->cpu_to_node;
memset(pi, 0, sizeof(*pi));
+ pi->cpu_to_node = saved_cpu_to_node;
+
pi->threads_per_core = cpus_weight(cpu_sibling_map[0]);
pi->cores_per_socket =
cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
@@ -421,7 +426,6 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
pi->scrub_pages = avail_scrub_pages();
pi->cpu_khz = local_cpu_data->proc_freq / 1000;
- max_array_ent = pi->max_cpu_id;
pi->max_cpu_id = last_cpu(cpu_online_map);
max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
@@ -435,7 +439,8 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
break;
}
}
- }
+ pi->max_cpu_id = max_array_ent;
+ } else pi->max_cpu_id = 0;
if ( copy_to_guest(u_sysctl, op, 1) )
ret = -EFAULT;
diff -r 0a8fc1a62796 xen/arch/x86/sysctl.c
--- a/xen/arch/x86/sysctl.c Mon May 12 11:19:09 2008 +0100
+++ b/xen/arch/x86/sysctl.c Tue May 13 22:10:42 2008 +0200
@@ -40,6 +40,7 @@ long arch_do_sysctl(
case XEN_SYSCTL_physinfo:
{
uint32_t i, max_array_ent;
+ XEN_GUEST_HANDLE_64(uint32) saved_cpu_to_node;
xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
@@ -47,7 +48,11 @@ long arch_do_sysctl(
if ( ret )
break;
+ max_array_ent = pi->max_cpu_id;
+ saved_cpu_to_node = pi->cpu_to_node;
memset(pi, 0, sizeof(*pi));
+ pi->cpu_to_node = saved_cpu_to_node;
+
pi->threads_per_core =
cpus_weight(cpu_sibling_map[0]);
pi->cores_per_socket =
@@ -64,7 +69,6 @@ long arch_do_sysctl(
if ( iommu_enabled )
pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
- max_array_ent = pi->max_cpu_id;
pi->max_cpu_id = last_cpu(cpu_online_map);
max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
@@ -77,7 +81,8 @@ long arch_do_sysctl(
if ( copy_to_guest_offset(pi->cpu_to_node, i, &node, 1) )
break;
}
- }
+ pi->max_cpu_id = max_array_ent;
+ } else pi->max_cpu_id = 0;
ret = copy_to_guest(u_sysctl, sysctl, 1) ? -EFAULT : 0;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|