WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] Re: [PATCH 3 of 7] xl: don't use libxl allocator for vcpu_li

On Wed, 2010-08-11 at 13:11 +0100, Stefano Stabellini wrote:
> On Tue, 10 Aug 2010, Gianni Tedesco (3P) wrote:
> >  tools/libxl/libxl.c      |  17 ++++++++++++++---
> >  tools/libxl/libxl.h      |   1 +
> >  tools/libxl/xl_cmdimpl.c |   8 +++++---
> >  3 files changed, 20 insertions(+), 6 deletions(-)
> > 
> > 
> > This also fixes a bug with an erroneous call to libxl_free(). A destructor 
> > for
> > the vpcu list is also implemented which is called from xl.
> > 
> > Signed-off-by: Gianni Tedesco <gianni.tedesco@xxxxxxxxxx>
> 
> I applied all the patches of this series apart from this one that
> doesn't apply correctly. Please update and resend.

diff -r fffedd3d70e1 tools/libxl/libxl.c
--- a/tools/libxl/libxl.c       Wed Aug 11 13:18:05 2010 +0100
+++ b/tools/libxl/libxl.c       Wed Aug 11 14:16:26 2010 +0100
@@ -2722,6 +2722,8 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
     xc_domaininfo_t domaininfo;
     xc_vcpuinfo_t vcpuinfo;
     xc_physinfo_t physinfo = { 0 };
+    uint64_t *cpumaps;
+    unsigned num_cpuwords;
 
     if (xc_domain_getinfolist(ctx->xch, domid, 1, &domaininfo) != 1) {
         XL_LOG_ERRNO(ctx, XL_LOG_ERROR, "getting infolist");
@@ -2732,14 +2734,15 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
         return NULL;
     }
     *nrcpus = physinfo.max_cpu_id + 1;
-    ptr = libxl_calloc(ctx, domaininfo.max_vcpu_id + 1, sizeof 
(libxl_vcpuinfo));
+    ret = ptr = calloc(domaininfo.max_vcpu_id + 1, sizeof (libxl_vcpuinfo));
     if (!ptr) {
         return NULL;
     }
 
-    ret = ptr;
+    num_cpuwords = ((physinfo.max_cpu_id + 64) / 64);
+    cpumaps = calloc(num_cpuwords * sizeof(*cpumaps), domaininfo.max_vcpu_id + 
1);
     for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) {
-        ptr->cpumap = libxl_calloc(ctx, (*nrcpus + 63) / 64, sizeof 
(uint64_t));
+        ptr->cpumap = cpumaps + (num_cpuwords * *nb_vcpu);
         if (!ptr->cpumap) {
             return NULL;
         }
@@ -2762,6 +2765,13 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ct
     return ret;
 }
 
+void libxl_free_vcpu_list(libxl_vcpuinfo *vcpu)
+{
+    if ( vcpu )
+        free(vcpu[0].cpumap);
+    free(vcpu);
+}
+
 int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
                            uint64_t *cpumap, int nrcpus)
 {
diff -r fffedd3d70e1 tools/libxl/libxl.h
--- a/tools/libxl/libxl.h       Wed Aug 11 13:18:05 2010 +0100
+++ b/tools/libxl/libxl.h       Wed Aug 11 14:16:26 2010 +0100
@@ -598,6 +598,7 @@ typedef struct {
 int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo *physinfo);
 libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t domid,
                                        int *nb_vcpu, int *nrcpus);
+void libxl_free_vcpu_list(libxl_vcpuinfo *vcpu);
 int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
                            uint64_t *cpumap, int nrcpus);
 int libxl_set_vcpucount(libxl_ctx *ctx, uint32_t domid, uint32_t count);
diff -r fffedd3d70e1 tools/libxl/xl_cmdimpl.c
--- a/tools/libxl/xl_cmdimpl.c  Wed Aug 11 13:18:05 2010 +0100
+++ b/tools/libxl/xl_cmdimpl.c  Wed Aug 11 14:16:26 2010 +0100
@@ -3268,7 +3268,7 @@ static void print_vcpuinfo(uint32_t tdom
 void vcpulist(int argc, char **argv)
 {
     libxl_dominfo *dominfo;
-    libxl_vcpuinfo *vcpuinfo;
+    libxl_vcpuinfo *vcpuinfo, *list = NULL;
     libxl_physinfo physinfo;
     int nb_vcpu, nb_domain, nrcpus;
 
@@ -3284,7 +3284,7 @@ void vcpulist(int argc, char **argv)
             goto vcpulist_out;
         }
         for (; nb_domain > 0; --nb_domain, ++dominfo) {
-            if (!(vcpuinfo = libxl_list_vcpu(&ctx, dominfo->domid, &nb_vcpu,
+            if (!(list = vcpuinfo = libxl_list_vcpu(&ctx, dominfo->domid, 
&nb_vcpu,
                 &nrcpus))) {
                 fprintf(stderr, "libxl_list_vcpu failed.\n");
                 goto vcpulist_out;
@@ -3292,19 +3292,21 @@ void vcpulist(int argc, char **argv)
             for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) {
                 print_vcpuinfo(dominfo->domid, vcpuinfo, physinfo.nr_cpus);
             }
+            libxl_free_vcpu_list(list);
         }
     } else {
         for (; argc > 0; ++argv, --argc) {
             if (domain_qualifier_to_domid(*argv, &domid, 0) < 0) {
                 fprintf(stderr, "%s is an invalid domain identifier\n", *argv);
             }
-            if (!(vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &nrcpus))) 
{
+            if (!(list = vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, 
&nrcpus))) {
                 fprintf(stderr, "libxl_list_vcpu failed.\n");
                 goto vcpulist_out;
             }
             for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) {
                 print_vcpuinfo(domid, vcpuinfo, physinfo.nr_cpus);
             }
+            libxl_free_vcpu_list(list);
         }
     }
   vcpulist_out:



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel