|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1 of 3] libxl: add internal function to get a domain's scheduler
# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1337764129 -3600
# Node ID 6533501734be011cb1f7669b7840bfdf5b2eb801
# Parent 3c9221c5b82e53d010452d1208a9f9acaf8981cd
libxl: add internal function to get a domain's scheduler.
This takes into account cpupools.
Add a helper to get the info for a single cpu pool, refactor libxl_list_cpupool
t use this. While there fix the leaks due to not disposing the partial list on
realloc failure in that function.
Fix the failure of sched_domain_output to free the poolinfo list.
Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
diff -r 3c9221c5b82e -r 6533501734be tools/libxl/libxl.c
--- a/tools/libxl/libxl.c Tue May 22 16:15:06 2012 +0100
+++ b/tools/libxl/libxl.c Wed May 23 10:08:49 2012 +0100
@@ -552,41 +552,72 @@ int libxl_domain_info(libxl_ctx *ctx, li
return 0;
}
+static int cpupool_info(libxl__gc *gc,
+ libxl_cpupoolinfo *info,
+ uint32_t poolid,
+ bool exact /* exactly poolid or >= poolid */)
+{
+ xc_cpupoolinfo_t *xcinfo;
+ int rc = ERROR_FAIL;
+
+ xcinfo = xc_cpupool_getinfo(CTX->xch, poolid);
+ if (xcinfo == NULL)
+ return ERROR_FAIL;
+
+ if (exact && xcinfo->cpupool_id != poolid)
+ goto out;
+
+ info->poolid = xcinfo->cpupool_id;
+ info->sched = xcinfo->sched_id;
+ info->n_dom = xcinfo->n_dom;
+ if (libxl_cpumap_alloc(CTX, &info->cpumap))
+ goto out;
+ memcpy(info->cpumap.map, xcinfo->cpumap, info->cpumap.size);
+
+ rc = 0;
+out:
+ xc_cpupool_infofree(CTX->xch, xcinfo);
+ return rc;
+}
+
+int libxl_cpupool_info(libxl_ctx *ctx,
+ libxl_cpupoolinfo *info, uint32_t poolid)
+{
+ GC_INIT(ctx);
+ int rc = cpupool_info(gc, info, poolid, true);
+ GC_FREE;
+ return rc;
+}
+
libxl_cpupoolinfo * libxl_list_cpupool(libxl_ctx *ctx, int *nb_pool)
{
- libxl_cpupoolinfo *ptr, *tmp;
+ GC_INIT(ctx);
+ libxl_cpupoolinfo info, *ptr, *tmp;
int i;
- xc_cpupoolinfo_t *info;
uint32_t poolid;
ptr = NULL;
poolid = 0;
for (i = 0;; i++) {
- info = xc_cpupool_getinfo(ctx->xch, poolid);
- if (info == NULL)
+ if (cpupool_info(gc, &info, poolid, false))
break;
tmp = realloc(ptr, (i + 1) * sizeof(libxl_cpupoolinfo));
if (!tmp) {
LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info");
+ while(--i>0)
+ libxl_cpupoolinfo_dispose(ptr+i);
free(ptr);
- xc_cpupool_infofree(ctx->xch, info);
- return NULL;
+ goto out;
}
ptr = tmp;
- ptr[i].poolid = info->cpupool_id;
- ptr[i].sched = info->sched_id;
- ptr[i].n_dom = info->n_dom;
- if (libxl_cpumap_alloc(ctx, &ptr[i].cpumap)) {
- xc_cpupool_infofree(ctx->xch, info);
- break;
- }
- memcpy(ptr[i].cpumap.map, info->cpumap, ptr[i].cpumap.size);
- poolid = info->cpupool_id + 1;
- xc_cpupool_infofree(ctx->xch, info);
+ ptr[i] = info;
+ poolid = info.poolid + 1;
}
*nb_pool = i;
+out:
+ GC_FREE;
return ptr;
}
diff -r 3c9221c5b82e -r 6533501734be tools/libxl/libxl.h
--- a/tools/libxl/libxl.h Tue May 22 16:15:06 2012 +0100
+++ b/tools/libxl/libxl.h Wed May 23 10:08:49 2012 +0100
@@ -860,6 +860,7 @@ int libxl_cpupool_cpuadd_node(libxl_ctx
int libxl_cpupool_cpuremove(libxl_ctx *ctx, uint32_t poolid, int cpu);
int libxl_cpupool_cpuremove_node(libxl_ctx *ctx, uint32_t poolid, int node,
int *cpus);
int libxl_cpupool_movedomain(libxl_ctx *ctx, uint32_t poolid, uint32_t domid);
+int libxl_cpupool_info(libxl_ctx *ctx, libxl_cpupoolinfo *info, uint32_t
poolid);
int libxl_domid_valid_guest(uint32_t domid);
diff -r 3c9221c5b82e -r 6533501734be tools/libxl/libxl_dom.c
--- a/tools/libxl/libxl_dom.c Tue May 22 16:15:06 2012 +0100
+++ b/tools/libxl/libxl_dom.c Wed May 23 10:08:49 2012 +0100
@@ -93,6 +93,41 @@ int libxl__domain_shutdown_reason(libxl_
return (info.flags >> XEN_DOMINF_shutdownshift) & XEN_DOMINF_shutdownmask;
}
+int libxl__domain_cpupool(libxl__gc *gc, uint32_t domid)
+{
+ xc_domaininfo_t info;
+ int ret;
+
+ ret = xc_domain_getinfolist(CTX->xch, domid, 1, &info);
+ if (ret != 1)
+ return ERROR_FAIL;
+ if (info.domain != domid)
+ return ERROR_FAIL;
+
+ return info.cpupool;
+}
+
+libxl_scheduler libxl__domain_scheduler(libxl__gc *gc, uint32_t domid)
+{
+ uint32_t cpupool = libxl__domain_cpupool(gc, domid);
+ libxl_cpupoolinfo poolinfo;
+ libxl_scheduler sched = LIBXL_SCHEDULER_UNKNOWN;
+ int rc;
+
+ if (cpupool < 0)
+ return sched;
+
+ rc = libxl_cpupool_info(CTX, &poolinfo, cpupool);
+ if (rc < 0)
+ goto out;
+
+ sched = poolinfo.sched;
+
+out:
+ libxl_cpupoolinfo_dispose(&poolinfo);
+ return sched;
+}
+
int libxl__build_pre(libxl__gc *gc, uint32_t domid,
libxl_domain_build_info *info, libxl__domain_build_state *state)
{
diff -r 3c9221c5b82e -r 6533501734be tools/libxl/libxl_internal.h
--- a/tools/libxl/libxl_internal.h Tue May 22 16:15:06 2012 +0100
+++ b/tools/libxl/libxl_internal.h Wed May 23 10:08:49 2012 +0100
@@ -716,6 +716,8 @@ _hidden int libxl__atfork_init(libxl_ctx
/* from xl_dom */
_hidden libxl_domain_type libxl__domain_type(libxl__gc *gc, uint32_t domid);
_hidden int libxl__domain_shutdown_reason(libxl__gc *gc, uint32_t domid);
+_hidden int libxl__domain_cpupool(libxl__gc *gc, uint32_t domid);
+_hidden libxl_scheduler libxl__domain_scheduler(libxl__gc *gc, uint32_t domid);
_hidden int libxl__sched_set_params(libxl__gc *gc, uint32_t domid,
libxl_sched_params *scparams);
#define LIBXL__DOMAIN_IS_TYPE(gc, domid, type) \
libxl__domain_type((gc), (domid)) == LIBXL_DOMAIN_TYPE_##type
diff -r 3c9221c5b82e -r 6533501734be tools/libxl/libxl_types.idl
--- a/tools/libxl/libxl_types.idl Tue May 22 16:15:06 2012 +0100
+++ b/tools/libxl/libxl_types.idl Wed May 23 10:08:49 2012 +0100
@@ -109,7 +109,9 @@ libxl_bios_type = Enumeration("bios_type
])
# Consistent with values defined in domctl.h
+# Except unknown which we have made up
libxl_scheduler = Enumeration("scheduler", [
+ (0, "unknown"),
(4, "sedf"),
(5, "credit"),
(6, "credit2"),
diff -r 3c9221c5b82e -r 6533501734be tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c Tue May 22 16:15:06 2012 +0100
+++ b/tools/libxl/xl_cmdimpl.c Wed May 23 10:08:49 2012 +0100
@@ -4603,6 +4603,7 @@ static int sched_domain_output(libxl_sch
for (p = 0; p < n_pools; p++) {
libxl_cpupoolinfo_dispose(poolinfo + p);
}
+ free(poolinfo);
}
return 0;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |