|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 2/7] xen/vnuma: domctl subop for vnuma setup.
Defines domctl subop hypercall for per-domain vNUMA topology construct.
Signed-off-by: Elena Ufimtseva <ufimtseva@xxxxxxxxx>
---
xen/common/domain.c | 6 ++++
xen/common/domctl.c | 72 ++++++++++++++++++++++++++++++++++++++++++-
xen/include/public/domctl.h | 15 ++++++++-
3 files changed, 91 insertions(+), 2 deletions(-)
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 9390a22..f0c0a79 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -227,6 +227,11 @@ struct domain *domain_create(
spin_lock_init(&d->node_affinity_lock);
d->node_affinity = NODE_MASK_ALL;
d->auto_node_affinity = 1;
+ d->vnuma.vnuma_memblks = NULL;
+ d->vnuma.vnode_to_pnode = NULL;
+ d->vnuma.vcpu_to_vnode = NULL;
+ d->vnuma.vdistance = NULL;
+ d->vnuma.nr_vnodes = 0;
spin_lock_init(&d->shutdown_lock);
d->shutdown_code = -1;
@@ -532,6 +537,7 @@ int domain_kill(struct domain *d)
tmem_destroy(d->tmem);
domain_set_outstanding_pages(d, 0);
d->tmem = NULL;
+ /* TODO: vnuma_destroy(d->vnuma); */
/* fallthrough */
case DOMDYING_dying:
rc = domain_relinquish_resources(d);
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 9760d50..b552e60 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -29,6 +29,7 @@
#include <asm/page.h>
#include <public/domctl.h>
#include <xsm/xsm.h>
+#include <xen/vnuma.h>
static DEFINE_SPINLOCK(domctl_lock);
DEFINE_SPINLOCK(vcpu_alloc_lock);
@@ -862,7 +863,76 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t)
u_domctl)
ret = set_global_virq_handler(d, virq);
}
break;
-
+ case XEN_DOMCTL_setvnumainfo:
+ {
+ int i, j;
+ int dist_size;
+ int dist, vmap, vntop;
+ vnuma_memblk_t vmemblk;
+
+ ret = -EFAULT;
+ dist = i = j = 0;
+ if (op->u.vnuma.nr_vnodes <= 0 || op->u.vnuma.nr_vnodes > NR_CPUS)
+ break;
+ d->vnuma.nr_vnodes = op->u.vnuma.nr_vnodes;
+ dist_size = d->vnuma.nr_vnodes * d->vnuma.nr_vnodes;
+ if ( (d->vnuma.vdistance = xmalloc_bytes(sizeof(*d->vnuma.vdistance) *
dist_size) ) == NULL)
+ break;
+ for ( i = 0; i < d->vnuma.nr_vnodes; i++ )
+ for ( j = 0; j < d->vnuma.nr_vnodes; j++ )
+ {
+ if ( unlikely(__copy_from_guest_offset(&dist,
op->u.vnuma.vdistance, __vnode_distance_offset(d, i, j), 1)) )
+ {
+ gdprintk(XENLOG_INFO, "vNUMA: Copy distance table
error\n");
+ goto err_dom;
+ }
+ __vnode_distance_set(d, i, j, dist);
+ }
+ if ( (d->vnuma.vnuma_memblks =
xmalloc_bytes(sizeof(*d->vnuma.vnuma_memblks) * d->vnuma.nr_vnodes)) == NULL )
+ goto err_dom;
+ for ( i = 0; i < d->vnuma.nr_vnodes; i++ )
+ {
+ if ( unlikely(__copy_from_guest_offset(&vmemblk,
op->u.vnuma.vnuma_memblks, i, 1)) )
+ {
+ gdprintk(XENLOG_INFO, "vNUMA: memory size error\n");
+ goto err_dom;
+ }
+ d->vnuma.vnuma_memblks[i].start = vmemblk.start;
+ d->vnuma.vnuma_memblks[i].end = vmemblk.end;
+ }
+ if ( (d->vnuma.vcpu_to_vnode =
xmalloc_bytes(sizeof(*d->vnuma.vcpu_to_vnode) * d->max_vcpus)) == NULL )
+ goto err_dom;
+ for ( i = 0; i < d->max_vcpus; i++ )
+ {
+ if ( unlikely(__copy_from_guest_offset(&vmap,
op->u.vnuma.vcpu_to_vnode, i, 1)) )
+ {
+ gdprintk(XENLOG_INFO, "vNUMA: vcputovnode map error\n");
+ goto err_dom;
+ }
+ d->vnuma.vcpu_to_vnode[i] = vmap;
+ }
+ if ( !guest_handle_is_null(op->u.vnuma.vnode_to_pnode) )
+ {
+ if ( (d->vnuma.vnode_to_pnode =
xmalloc_bytes(sizeof(*d->vnuma.vnode_to_pnode) * d->vnuma.nr_vnodes)) == NULL )
+ goto err_dom;
+ for ( i = 0; i < d->vnuma.nr_vnodes; i++ )
+ {
+ if ( unlikely(__copy_from_guest_offset(&vntop,
op->u.vnuma.vnode_to_pnode, i, 1)) )
+ {
+ gdprintk(XENLOG_INFO, "vNUMA: vnode_t_pnode map error\n");
+ goto err_dom;
+ }
+ d->vnuma.vnode_to_pnode[i] = vntop;
+ }
+ }
+ else
+ d->vnuma.vnode_to_pnode = NULL;
+ ret = 0;
+ break;
+err_dom:
+ ret = -EINVAL;
+ }
+ break;
default:
ret = arch_do_domctl(op, d, u_domctl);
break;
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 4c5b2bb..a034688 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -35,6 +35,7 @@
#include "xen.h"
#include "grant_table.h"
#include "hvm/save.h"
+#include "xen/vnuma.h"
#define XEN_DOMCTL_INTERFACE_VERSION 0x00000009
@@ -852,6 +853,17 @@ struct xen_domctl_set_broken_page_p2m {
typedef struct xen_domctl_set_broken_page_p2m xen_domctl_set_broken_page_p2m_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t);
+struct xen_domctl_vnuma {
+ uint16_t nr_vnodes;
+ XEN_GUEST_HANDLE_64(int) vdistance;
+ XEN_GUEST_HANDLE_64(vnuma_memblk_t) vnuma_memblks;
+ XEN_GUEST_HANDLE_64(int) vcpu_to_vnode;
+ XEN_GUEST_HANDLE_64(int) vnode_to_pnode;
+};
+
+typedef struct xen_domctl_vnuma xen_domctl_vnuma_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_vnuma_t);
+
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
@@ -920,6 +932,7 @@ struct xen_domctl {
#define XEN_DOMCTL_set_broken_page_p2m 67
#define XEN_DOMCTL_setnodeaffinity 68
#define XEN_DOMCTL_getnodeaffinity 69
+#define XEN_DOMCTL_setvnumainfo 70
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -979,6 +992,7 @@ struct xen_domctl {
struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
struct xen_domctl_gdbsx_domstatus gdbsx_domstatus;
+ struct xen_domctl_vnuma vnuma;
uint8_t pad[128];
} u;
};
@@ -986,7 +1000,6 @@ typedef struct xen_domctl xen_domctl_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_t);
#endif /* __XEN_PUBLIC_DOMCTL_H__ */
-
/*
* Local variables:
* mode: C
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |