This patch modifies memory ops to use the NUMA-aware page allocator
functions. We use the target domain's VCPU0 placement to determine
which node's memory to use. We expect the system administrator to
utilize the exposed NUMA topology information to help craft guest config
files that are NUMA-friendly (use only processors and memory values that
will fit within a given node).
--
Ryan Harper
Software Engineer; Linux Technology Center
IBM Corp., Austin, Tx
(512) 838-9253 T/L: 678-9253
ryanh@xxxxxxxxxx
diffstat output:
memory.c | 21 ++++++++++++++-------
1 files changed, 14 insertions(+), 7 deletions(-)
Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx>
---
Make memory hypercalls NUMA-aware
diff -r fa87cea10778 xen/common/memory.c
--- a/xen/common/memory.c Tue Aug 15 11:38:13 2006 -0500
+++ b/xen/common/memory.c Tue Aug 15 11:40:17 2006 -0500
@@ -40,6 +40,8 @@ increase_reservation(
struct page_info *page;
unsigned long i;
xen_pfn_t mfn;
+ /* use domain's first processor for locality parameter */
+ unsigned int cpu = d->vcpu[0]->processor;
if ( !guest_handle_is_null(extent_list) &&
!guest_handle_okay(extent_list, nr_extents) )
@@ -57,8 +59,8 @@ increase_reservation(
return i;
}
- if ( unlikely((page = alloc_domheap_pages(
- d, extent_order, memflags)) == NULL) )
+ if ( unlikely((page = __alloc_domheap_pages( d, cpu,
+ extent_order, memflags )) == NULL) )
{
DPRINTK("Could not allocate order=%d extent: "
"id=%d memflags=%x (%ld of %d)\n",
@@ -91,6 +93,8 @@ populate_physmap(
unsigned long i, j;
xen_pfn_t gpfn;
xen_pfn_t mfn;
+ /* use domain's first processor for locality parameter */
+ unsigned int cpu = d->vcpu[0]->processor;
if ( !guest_handle_okay(extent_list, nr_extents) )
return 0;
@@ -110,8 +114,8 @@ populate_physmap(
if ( unlikely(__copy_from_guest_offset(&gpfn, extent_list, i, 1)) )
goto out;
- if ( unlikely((page = alloc_domheap_pages(
- d, extent_order, memflags)) == NULL) )
+ if ( unlikely((page = __alloc_domheap_pages( d, cpu,
+ extent_order, memflags )) == NULL) )
{
DPRINTK("Could not allocate order=%d extent: "
"id=%d memflags=%x (%ld of %d)\n",
@@ -293,7 +297,7 @@ memory_exchange(XEN_GUEST_HANDLE(xen_mem
unsigned long in_chunk_order, out_chunk_order;
xen_pfn_t gpfn, gmfn, mfn;
unsigned long i, j, k;
- unsigned int memflags = 0;
+ unsigned int memflags = 0, cpu;
long rc = 0;
struct domain *d;
struct page_info *page;
@@ -367,6 +371,9 @@ memory_exchange(XEN_GUEST_HANDLE(xen_mem
}
d = current->domain;
+ /* use domain's first processor for locality parameter */
+ cpu = d->vcpu[0]->processor;
+
for ( i = 0; i < (exch.in.nr_extents >> in_chunk_order); i++ )
{
if ( hypercall_preempt_check() )
@@ -412,8 +419,8 @@ memory_exchange(XEN_GUEST_HANDLE(xen_mem
/* Allocate a chunk's worth of anonymous output pages. */
for ( j = 0; j < (1UL << out_chunk_order); j++ )
{
- page = alloc_domheap_pages(
- NULL, exch.out.extent_order, memflags);
+ page = __alloc_domheap_pages( NULL, cpu,
+ exch.out.extent_order, memflags);
if ( unlikely(page == NULL) )
{
rc = -ENOMEM;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|