|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 1/7] xen/vnuma: subop hypercall and vnuma topology structures.
Defines XENMEM subop hypercall for PV vNUMA enabled guests and provides
vNUMA topology information from per-domain vnuma topology build info.
TODO:
subop XENMEM hypercall is subject to change to sysctl subop.
Signed-off-by: Elena Ufimtseva <ufimtseva@xxxxxxxxx>
---
xen/common/memory.c | 90 ++++++++++++++++++++++++++++++++++++++++++-
xen/include/public/memory.h | 1 +
xen/include/public/vnuma.h | 12 ++++++
xen/include/xen/domain.h | 9 +++++
xen/include/xen/sched.h | 1 +
xen/include/xen/vnuma.h | 27 +++++++++++++
6 files changed, 139 insertions(+), 1 deletion(-)
create mode 100644 xen/include/public/vnuma.h
create mode 100644 xen/include/xen/vnuma.h
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 50b740f..c7fbe11 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -28,6 +28,7 @@
#include <public/memory.h>
#include <xsm/xsm.h>
#include <xen/trace.h>
+#include <xen/vnuma.h>
struct memop_args {
/* INPUT */
@@ -732,7 +733,94 @@ long do_memory_op(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(void) arg)
rcu_unlock_domain(d);
break;
-
+ case XENMEM_get_vnuma_info:
+ {
+ int i;
+ struct vnuma_topology_info mtopology;
+ struct vnuma_topology_info touser_topo;
+ struct domain *d;
+ unsigned int max_pages;
+ vnuma_memblk_t *vblks;
+ XEN_GUEST_HANDLE(int) vdistance;
+ XEN_GUEST_HANDLE_PARAM(int) vdist_param;
+ XEN_GUEST_HANDLE(vnuma_memblk_t) buf;
+ XEN_GUEST_HANDLE_PARAM(vnuma_memblk_t) buf_param;
+ XEN_GUEST_HANDLE(int) vcpu_to_vnode;
+ XEN_GUEST_HANDLE_PARAM(int) vmap_param;
+
+ rc = -1;
+ if ( guest_handle_is_null(arg) )
+ return rc;
+ if( copy_from_guest(&mtopology, arg, 1) )
+ {
+ gdprintk(XENLOG_INFO, "Cannot get copy_from_guest..\n");
+ return -EFAULT;
+ }
+ gdprintk(XENLOG_INFO, "Domain id is %d\n",mtopology.domid);
+ if ( (d = rcu_lock_domain_by_any_id(mtopology.domid)) == NULL )
+ {
+ gdprintk(XENLOG_INFO, "Numa: Could not get domain id.\n");
+ return -ESRCH;
+ }
+ rcu_unlock_domain(d);
+ touser_topo.nr_vnodes = d->vnuma.nr_vnodes;
+ rc = copy_to_guest(arg, &touser_topo, 1);
+ if ( rc )
+ {
+ gdprintk(XENLOG_INFO, "Bad news, could not copy to guest NUMA
info\n");
+ return -EFAULT;
+ }
+ max_pages = d->max_pages;
+ if ( touser_topo.nr_vnodes == 0 || touser_topo.nr_vnodes >
d->max_vcpus )
+ {
+ gdprintk(XENLOG_INFO, "vNUMA: Error in block creation - vnodes %d,
vcpus %d \n", touser_topo.nr_vnodes, d->max_vcpus);
+ return -EFAULT;
+ }
+ vblks = (vnuma_memblk_t *)xmalloc_array(struct vnuma_memblk,
touser_topo.nr_vnodes);
+ if ( vblks == NULL )
+ {
+ gdprintk(XENLOG_INFO, "vNUMA: Could not get memory for
memblocks\n");
+ return -1;
+ }
+ buf_param = guest_handle_cast(mtopology.vnuma_memblks, vnuma_memblk_t);
+ buf = guest_handle_from_param(buf_param, vnuma_memblk_t);
+ for ( i = 0; i < touser_topo.nr_vnodes; i++ )
+ {
+ gdprintk(XENLOG_INFO, "vmemblk[%d] start %#lx end %#lx\n", i,
d->vnuma.vnuma_memblks[i].start, d->vnuma.vnuma_memblks[i].end);
+ if ( copy_to_guest_offset(buf, i, &d->vnuma.vnuma_memblks[i],
1) )
+ {
+ gdprintk(XENLOG_INFO, "Failed to copy to guest
vmemblk[%d]\n", i);
+ goto out;
+ }
+ }
+ vdist_param = guest_handle_cast(mtopology.vdistance, int);
+ vdistance = guest_handle_from_param(vdist_param, int);
+ for ( i = 0; i < touser_topo.nr_vnodes * touser_topo.nr_vnodes; i++ )
+ {
+ if ( copy_to_guest_offset(vdistance, i, &d->vnuma.vdistance[i], 1)
)
+ {
+ gdprintk(XENLOG_INFO, "Failed to copy to guest
vdistance[%d]\n", i);
+ goto out;
+ }
+ }
+ vmap_param = guest_handle_cast(mtopology.vcpu_to_vnode, int);
+ vcpu_to_vnode = guest_handle_from_param(vmap_param, int);
+ for ( i = 0; i < d->max_vcpus ; i++ )
+ {
+ if ( copy_to_guest_offset(vcpu_to_vnode, i,
&d->vnuma.vcpu_to_vnode[i], 1) )
+ {
+ gdprintk(XENLOG_INFO, "Failed to copy to guest
vcputonode[%d]\n", i);
+ goto out;
+ }
+ else
+ gdprintk(XENLOG_INFO, "Copied map [%d] = %x\n", i,
d->vnuma.vcpu_to_vnode[i]);
+ }
+ return rc;
+out:
+ if ( vblks ) xfree(vblks);
+ return rc;
+ break;
+ }
default:
rc = arch_memory_op(op, arg);
break;
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 7a26dee..30cb8af 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -453,6 +453,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
* Caller must be privileged or the hypercall fails.
*/
#define XENMEM_claim_pages 24
+#define XENMEM_get_vnuma_info 25
/*
* XENMEM_claim_pages flags - the are no flags at this time.
diff --git a/xen/include/public/vnuma.h b/xen/include/public/vnuma.h
new file mode 100644
index 0000000..a88dfe2
--- /dev/null
+++ b/xen/include/public/vnuma.h
@@ -0,0 +1,12 @@
+#ifndef __XEN_PUBLIC_VNUMA_H
+#define __XEN_PUBLIC_VNUMA_H
+
+#include "xen.h"
+
+struct vnuma_memblk {
+ uint64_t start, end;
+};
+typedef struct vnuma_memblk vnuma_memblk_t;
+DEFINE_XEN_GUEST_HANDLE(vnuma_memblk_t);
+
+#endif
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index a057069..3d39218 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -4,6 +4,7 @@
#include <public/xen.h>
#include <asm/domain.h>
+#include <public/vnuma.h>
typedef union {
struct vcpu_guest_context *nat;
@@ -89,4 +90,12 @@ extern unsigned int xen_processor_pmbits;
extern bool_t opt_dom0_vcpus_pin;
+struct domain_vnuma_info {
+ uint16_t nr_vnodes;
+ int *vdistance;
+ vnuma_memblk_t *vnuma_memblks;
+ int *vcpu_to_vnode;
+ int *vnode_to_pnode;
+};
+
#endif /* __XEN_DOMAIN_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index ae6a3b8..cb023cf 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -377,6 +377,7 @@ struct domain
nodemask_t node_affinity;
unsigned int last_alloc_node;
spinlock_t node_affinity_lock;
+ struct domain_vnuma_info vnuma;
};
struct domain_setup_info
diff --git a/xen/include/xen/vnuma.h b/xen/include/xen/vnuma.h
new file mode 100644
index 0000000..f1ab531
--- /dev/null
+++ b/xen/include/xen/vnuma.h
@@ -0,0 +1,27 @@
+#ifndef _VNUMA_H
+#define _VNUMA_H
+#include <public/vnuma.h>
+
+/* DEFINE_XEN_GUEST_HANDLE(vnuma_memblk_t); */
+
+struct vnuma_topology_info {
+ domid_t domid;
+ uint16_t nr_vnodes;
+ XEN_GUEST_HANDLE_64(vnuma_memblk_t) vnuma_memblks;
+ XEN_GUEST_HANDLE_64(int) vdistance;
+ XEN_GUEST_HANDLE_64(int) vcpu_to_vnode;
+ XEN_GUEST_HANDLE_64(int) vnode_to_pnode;
+};
+typedef struct vnuma_topology_info vnuma_topology_info_t;
+DEFINE_XEN_GUEST_HANDLE(vnuma_topology_info_t);
+
+#define __vnode_distance_offset(_dom, _i, _j) \
+ ( ((_j)*((_dom)->vnuma.nr_vnodes)) + (_i) )
+
+#define __vnode_distance(_dom, _i, _j) \
+ ( (_dom)->vnuma.vdistance[__vnode_distance_offset((_dom), (_i), (_j))]
)
+
+#define __vnode_distance_set(_dom, _i, _j, _v) \
+ do { __vnode_distance((_dom), (_i), (_j)) = (_v); } while(0)
+
+#endif
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |