Signed-off-by: juergen.gross@xxxxxxxxxxxxxx To be able to support arbitrary numbers of physical cpus it was necessary to include the size of cpumaps in the xc-interfaces for cpu pools. These were: definition of xc_cpupoolinfo_t xc_cpupool_getinfo() xc_cpupool_freeinfo() diff -r d978675f3d53 tools/libxc/xc_cpupool.c --- a/tools/libxc/xc_cpupool.c Thu Sep 16 18:29:26 2010 +0100 +++ b/tools/libxc/xc_cpupool.c Fri Sep 17 07:42:30 2010 +0200 @@ -34,6 +34,20 @@ static int do_sysctl_save(xc_interface * return ret; } +static int get_cpumap_size(xc_interface *xch) +{ + static int cpumap_size = 0; + xc_physinfo_t physinfo; + + if ( cpumap_size ) + return cpumap_size; + + if ( !xc_physinfo(xch, &physinfo) ) + cpumap_size = (physinfo.max_cpu_id + 8) / 8; + + return cpumap_size; +} + int xc_cpupool_create(xc_interface *xch, uint32_t *ppoolid, uint32_t sched_id) @@ -64,50 +78,56 @@ int xc_cpupool_destroy(xc_interface *xch return do_sysctl_save(xch, &sysctl); } -int xc_cpupool_getinfo(xc_interface *xch, - uint32_t first_poolid, - uint32_t n_max, - xc_cpupoolinfo_t *info) +xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, + uint32_t poolid) { int err = 0; - int p; - uint32_t poolid = first_poolid; - uint8_t local[sizeof (info->cpumap)]; + xc_cpupoolinfo_t *info; + uint8_t *local; + int local_size; + int cpumap_size; + int size; DECLARE_SYSCTL; - memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t)); + local_size = get_cpumap_size(xch); + cpumap_size = (local_size + 7) / 8; + size = sizeof(xc_cpupoolinfo_t) + cpumap_size * 8 + local_size; + info = malloc(size); + if ( !info ) + return NULL; - for (p = 0; p < n_max; p++) + memset(info, 0, size); + info->cpumap_size = local_size * 8; + info->cpumap = (uint64_t *)(info + 1); + local = (uint8_t *)(info->cpumap + cpumap_size); + + sysctl.cmd = XEN_SYSCTL_cpupool_op; + sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO; + sysctl.u.cpupool_op.cpupool_id = poolid; + set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); + sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8; + + if ( (err = lock_pages(local, local_size)) != 0 ) { - sysctl.cmd = XEN_SYSCTL_cpupool_op; - sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO; - sysctl.u.cpupool_op.cpupool_id = poolid; - set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); - sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8; + PERROR("Could not lock memory for Xen hypercall"); + free(info); + return NULL; + } + err = do_sysctl_save(xch, &sysctl); + unlock_pages(local, local_size); - if ( (err = lock_pages(local, sizeof(local))) != 0 ) - { - PERROR("Could not lock memory for Xen hypercall"); - break; - } - err = do_sysctl_save(xch, &sysctl); - unlock_pages(local, sizeof (local)); - - if ( err < 0 ) - break; - - info->cpupool_id = sysctl.u.cpupool_op.cpupool_id; - info->sched_id = sysctl.u.cpupool_op.sched_id; - info->n_dom = sysctl.u.cpupool_op.n_dom; - bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8); - poolid = sysctl.u.cpupool_op.cpupool_id + 1; - info++; + if ( err < 0 ) + { + free(info); + return NULL; } - if ( p == 0 ) - return err; + info->cpupool_id = sysctl.u.cpupool_op.cpupool_id; + info->sched_id = sysctl.u.cpupool_op.sched_id; + info->n_dom = sysctl.u.cpupool_op.n_dom; + bitmap_byte_to_64(info->cpumap, local, local_size * 8); - return p; + return info; } int xc_cpupool_addcpu(xc_interface *xch, @@ -150,30 +170,37 @@ int xc_cpupool_movedomain(xc_interface * } int xc_cpupool_freeinfo(xc_interface *xch, - uint64_t *cpumap) + uint64_t *cpumap, + int cpusize) { int err; - uint8_t local[sizeof (*cpumap)]; + uint8_t *local; DECLARE_SYSCTL; + + local = malloc(cpusize); + if (local == NULL) + return -ENOMEM; sysctl.cmd = XEN_SYSCTL_cpupool_op; sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO; set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local); - sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8; + sysctl.u.cpupool_op.cpumap.nr_cpus = cpusize * 8; - if ( (err = lock_pages(local, sizeof(local))) != 0 ) + if ( (err = lock_pages(local, cpusize)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); + free(local); return err; } err = do_sysctl_save(xch, &sysctl); - unlock_pages(local, sizeof (local)); + unlock_pages(local, cpusize); + bitmap_byte_to_64(cpumap, local, cpusize * 8); + + free(local); if (err < 0) return err; - bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); - return 0; } diff -r d978675f3d53 tools/libxc/xenctrl.h --- a/tools/libxc/xenctrl.h Thu Sep 16 18:29:26 2010 +0100 +++ b/tools/libxc/xenctrl.h Fri Sep 17 07:42:30 2010 +0200 @@ -535,7 +535,8 @@ typedef struct xc_cpupoolinfo { uint32_t cpupool_id; uint32_t sched_id; uint32_t n_dom; - uint64_t cpumap; + uint32_t cpumap_size; /* max number of cpus in map */ + uint64_t *cpumap; } xc_cpupoolinfo_t; /** @@ -564,15 +565,11 @@ int xc_cpupool_destroy(xc_interface *xch * Get cpupool info. Returns info for up to the specified number of cpupools * starting at the given id. * @parm xc_handle a handle to an open hypervisor interface - * @parm first_poolid lowest id for which info is returned - * @parm n_max maximum number of cpupools to return info - * @parm info pointer to xc_cpupoolinfo_t array - * return number of cpupool infos + * @parm poolid lowest id for which info is returned + * return cpupool info ptr (obtained by malloc) */ -int xc_cpupool_getinfo(xc_interface *xch, - uint32_t first_poolid, - uint32_t n_max, - xc_cpupoolinfo_t *info); +xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch, + uint32_t poolid); /** * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned. @@ -615,10 +612,12 @@ int xc_cpupool_movedomain(xc_interface * * * @parm xc_handle a handle to an open hypervisor interface * @parm cpumap pointer where to store the cpumap + * @parm cpusize size of cpumap array in bytes * return 0 on success, -1 on failure */ int xc_cpupool_freeinfo(xc_interface *xch, - uint64_t *cpumap); + uint64_t *cpumap, + int cpusize); /* diff -r d978675f3d53 tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c Thu Sep 16 18:29:26 2010 +0100 +++ b/tools/python/xen/lowlevel/xc/xc.c Fri Sep 17 07:42:30 2010 +0200 @@ -241,7 +241,7 @@ static PyObject *pyxc_vcpu_setaffinity(X if ( xc_physinfo(self->xc_handle, &info) != 0 ) return pyxc_error_to_exception(self->xc_handle); - nr_cpus = info.nr_cpus; + nr_cpus = info.max_cpu_id + 1; size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); cpumap = malloc(cpumap_size * size); @@ -400,7 +400,7 @@ static PyObject *pyxc_vcpu_getinfo(XcObj if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) return pyxc_error_to_exception(self->xc_handle); - nr_cpus = pinfo.nr_cpus; + nr_cpus = pinfo.max_cpu_id + 1; rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info); if ( rc < 0 ) @@ -1906,22 +1906,23 @@ static PyObject *pyxc_dom_set_memshr(XcO return zero; } -static PyObject *cpumap_to_cpulist(uint64_t cpumap) +static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize) { PyObject *cpulist = NULL; - uint32_t i; + int i; cpulist = PyList_New(0); - for ( i = 0; cpumap != 0; i++ ) + for ( i = 0; i < cpusize; i++ ) { - if ( cpumap & 1 ) + if ( *cpumap & (1L << (i % 64)) ) { PyObject* pyint = PyInt_FromLong(i); PyList_Append(cpulist, pyint); Py_DECREF(pyint); } - cpumap >>= 1; + if ( (i % 64) == 63 ) + cpumap++; } return cpulist; } @@ -1966,7 +1967,7 @@ static PyObject *pyxc_cpupool_getinfo(Xc PyObject *list, *info_dict; uint32_t first_pool = 0; - int max_pools = 1024, nr_pools, i; + int max_pools = 1024, i; xc_cpupoolinfo_t *info; static char *kwd_list[] = { "first_pool", "max_pools", NULL }; @@ -1975,38 +1976,31 @@ static PyObject *pyxc_cpupool_getinfo(Xc &first_pool, &max_pools) ) return NULL; - info = calloc(max_pools, sizeof(xc_cpupoolinfo_t)); - if (info == NULL) - return PyErr_NoMemory(); - - nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info); - - if (nr_pools < 0) + list = PyList_New(0); + for (i = 0; i < max_pools; i++) { - free(info); - return pyxc_error_to_exception(self->xc_handle); - } - - list = PyList_New(nr_pools); - for ( i = 0 ; i < nr_pools; i++ ) - { + info = xc_cpupool_getinfo(self->xc_handle, first_pool); + if (info == NULL) + break; info_dict = Py_BuildValue( "{s:i,s:i,s:i,s:N}", - "cpupool", (int)info[i].cpupool_id, - "sched", info[i].sched_id, - "n_dom", info[i].n_dom, - "cpulist", cpumap_to_cpulist(info[i].cpumap)); + "cpupool", (int)info->cpupool_id, + "sched", info->sched_id, + "n_dom", info->n_dom, + "cpulist", cpumap_to_cpulist(info->cpumap, + info->cpumap_size)); + first_pool = info->cpupool_id + 1; + free(info); + if ( info_dict == NULL ) { Py_DECREF(list); - if ( info_dict != NULL ) { Py_DECREF(info_dict); } - free(info); return NULL; } - PyList_SetItem(list, i, info_dict); + + PyList_Append(list, info_dict); + Py_DECREF(info_dict); } - - free(info); return list; } @@ -2072,12 +2066,28 @@ static PyObject *pyxc_cpupool_movedomain static PyObject *pyxc_cpupool_freeinfo(XcObject *self) { - uint64_t cpumap; + uint64_t *cpumap; + xc_physinfo_t physinfo; + int ret; + PyObject *info = NULL; - if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0) + if (xc_physinfo(self->xc_handle, &physinfo)) return pyxc_error_to_exception(self->xc_handle); - return cpumap_to_cpulist(cpumap); + cpumap = calloc((physinfo.max_cpu_id + 64) / 64, sizeof(uint64_t)); + if (!cpumap) { + errno = -ENOMEM; + return PyErr_SetFromErrno(xc_error_obj); + } + + ret = xc_cpupool_freeinfo(self->xc_handle, cpumap, + (physinfo.max_cpu_id + 8) / 8); + if (!ret) + info = cpumap_to_cpulist(cpumap, physinfo.max_cpu_id + 1); + + free(cpumap); + + return ret ? pyxc_error_to_exception(self->xc_handle) : info; } static PyObject *pyflask_context_to_sid(PyObject *self, PyObject *args,