[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [Patch 2/6] xen: cpupool support - support in libxc


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
  • From: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
  • Date: Fri, 17 Apr 2009 11:53:57 +0200
  • Delivery-date: Fri, 17 Apr 2009 02:55:17 -0700
  • Domainkey-signature: s=s1536a; d=ts.fujitsu.com; c=nofws; q=dns; h=X-SBRSScore:X-IronPort-AV:Received:X-IronPort-AV: Received:Received:Message-ID:Date:From:Organization: User-Agent:MIME-Version:To:Subject:X-Enigmail-Version: Content-Type; b=JGrjd9pXdcrHXBFL/P+YHms0zmG4Lmo6KyAtk2nUbByoFRwlzYdPQtaE bf06Epe7hbP38uPN1CY1GUeAYUhYP/UGr5sf2xN3k9lX9cPmyF/2d0N8A uZUxdgz4shveQXa5qcA4wLpyszmiIUmonVt/i3cSt53JCsugFdAyL5yoF pRxhxkZ4xtdaQllCgWzpWlobICLG2kYTwfBrhIJYUgZHWWz6Rc8kSUa2w vV1PHAsrQ/riJVqOmoFxryXe1dqHb;
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

Signed-off-by: juergen.gross@xxxxxxxxxxxxxx

-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 636 47950
Fujitsu Technolgy Solutions               e-mail: juergen.gross@xxxxxxxxxxxxxx
Otto-Hahn-Ring 6                        Internet: ts.fujitsu.com
D-81739 Muenchen                 Company details: ts.fujitsu.com/imprint.html
diff -r 655dc3bc1d8e tools/libxc/Makefile
--- a/tools/libxc/Makefile      Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/libxc/Makefile      Thu Apr 09 10:02:30 2009 +0200
@@ -8,6 +8,7 @@ CTRL_SRCS-y       += xc_core.c
 CTRL_SRCS-y       += xc_core.c
 CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c
 CTRL_SRCS-$(CONFIG_IA64) += xc_core_ia64.c
+CTRL_SRCS-y       += xc_cpupool.c
 CTRL_SRCS-y       += xc_domain.c
 CTRL_SRCS-y       += xc_evtchn.c
 CTRL_SRCS-y       += xc_misc.c
diff -r 655dc3bc1d8e tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/libxc/xc_domain.c   Thu Apr 09 10:04:51 2009 +0200
@@ -6,6 +6,7 @@
  * Copyright (c) 2003, K A Fraser.
  */
 
+#include <stdarg.h>
 #include "xc_private.h"
 #include "xg_save_restore.h"
 #include <xen/memory.h>
@@ -15,15 +16,21 @@ int xc_domain_create(int xc_handle,
                      uint32_t ssidref,
                      xen_domain_handle_t handle,
                      uint32_t flags,
-                     uint32_t *pdomid)
+                     uint32_t *pdomid, ...)
 {
     int err;
+    va_list ap;
     DECLARE_DOMCTL;
 
     domctl.cmd = XEN_DOMCTL_createdomain;
     domctl.domain = (domid_t)*pdomid;
     domctl.u.createdomain.ssidref = ssidref;
     domctl.u.createdomain.flags   = flags;
+    if ( flags & XEN_DOMCTL_CDF_pool ) {
+        va_start(ap, pdomid);
+        domctl.u.createdomain.cpupool = va_arg(ap, uint32_t);
+        va_end(ap);
+    }
     memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
     if ( (err = do_domctl(xc_handle, &domctl)) != 0 )
         return err;
@@ -205,6 +212,7 @@ int xc_domain_getinfo(int xc_handle,
         info->cpu_time = domctl.u.getdomaininfo.cpu_time;
         info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
         info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
+        info->cpupool = domctl.u.getdomaininfo.cpupool;
 
         memcpy(info->handle, domctl.u.getdomaininfo.handle,
                sizeof(xen_domain_handle_t));
diff -r 655dc3bc1d8e tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h  Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/libxc/xc_private.h  Thu Apr 09 10:06:34 2009 +0200
@@ -155,6 +155,19 @@ static inline int do_domctl(int xc_handl
     unlock_pages(domctl, sizeof(*domctl));
 
  out1:
+    return ret;
+}
+
+static inline int do_domctl_save(int xc_handle, struct xen_domctl *domctl)
+{
+    int ret;
+
+    do
+    {
+        ret = do_domctl(xc_handle, domctl);
+    }
+    while ( (ret < 0 ) && (errno == EAGAIN) );
+
     return ret;
 }
 
diff -r 655dc3bc1d8e tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Thu Apr 16 11:54:06 2009 +0100
+++ b/tools/libxc/xenctrl.h     Thu Apr 09 10:11:31 2009 +0200
@@ -165,6 +165,7 @@ typedef struct xc_dominfo {
     unsigned int  nr_online_vcpus;
     unsigned int  max_vcpu_id;
     xen_domain_handle_t handle;
+    unsigned int  cpupool;
 } xc_dominfo_t;
 
 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
@@ -201,7 +202,7 @@ int xc_domain_create(int xc_handle,
                      uint32_t ssidref,
                      xen_domain_handle_t handle,
                      uint32_t flags,
-                     uint32_t *pdomid);
+                     uint32_t *pdomid, ...);
 
 
 /* Functions to produce a dump of a given domain
@@ -492,6 +493,99 @@ int xc_domain_setdebugging(int xc_handle
 int xc_domain_setdebugging(int xc_handle,
                            uint32_t domid,
                            unsigned int enable);
+
+/*
+ * CPUPOOL MANAGEMENT FUNCTIONS
+ */
+
+typedef struct xc_cpupoolinfo {
+    uint32_t cpupool_id;
+    uint32_t sched_id;
+    uint32_t n_dom;
+    uint64_t cpumap;
+} xc_cpupoolinfo_t;
+
+/**
+ * Create a new cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm ppoolid pointer to the new cpupool id (in/out)
+ * @parm sched_id id of scheduler to use for pool
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_create(int xc_handle,
+                      uint32_t *ppoolid,
+                      uint32_t sched_id);
+
+/**
+ * Destroy a cpupool. Pool must be unused and have no cpu assigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool to destroy
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_destroy(int xc_handle,
+                       uint32_t poolid);
+
+/**
+ * Get cpupool info. Returns info for up to the specified number of cpupools
+ * starting at the given id.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm first_poolid lowest id for which info is returned
+ * @parm n_max maximum number of cpupools to return info
+ * @parm info pointer to xc_cpupoolinfo_t array
+ * return number of cpupool infos
+ */
+int xc_cpupool_getinfo(int xc_handle,
+                       uint32_t first_poolid,
+                       uint32_t n_max,
+                       xc_cpupoolinfo_t *info);
+
+/**
+ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to add
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_addcpu(int xc_handle,
+                      uint32_t poolid,
+                      int cpu);
+
+/**
+ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to remove
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_removecpu(int xc_handle,
+                         uint32_t poolid,
+                         int cpu);
+
+/**
+ * Move domain to another cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the destination cpupool
+ * @parm domid id of the domain to move
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_movedomain(int xc_handle,
+                          uint32_t poolid,
+                          uint32_t domid);
+
+/**
+ * Return map of cpus not in any cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm cpumap pointer where to store the cpumap
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_freeinfo(int xc_handle,
+                        uint64_t *cpumap);
 
 /*
  * EVENT CHANNEL FUNCTIONS
diff -r 655dc3bc1d8e tools/libxc/xc_cpupool.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_cpupool.c  Thu Apr 09 10:32:04 2009 +0200
@@ -0,0 +1,154 @@
+/******************************************************************************
+ * xc_cpupool.c
+ *
+ * API for manipulating and obtaining information on cpupools.
+ *
+ * Copyright (c) 2009, J Gross.
+ */
+
+#include <stdarg.h>
+#include "xc_private.h"
+
+int xc_cpupool_create(int xc_handle,
+                      uint32_t *ppoolid,
+                      uint32_t sched_id)
+{
+    int err;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE;
+    domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+        XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid;
+    domctl.u.cpupool_op.sched_id = sched_id;
+    if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 )
+        return err;
+
+    *ppoolid = domctl.u.cpupool_op.cpupool_id;
+    return 0;
+}
+
+int xc_cpupool_destroy(int xc_handle,
+                       uint32_t poolid)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_getinfo(int xc_handle, 
+                       uint32_t first_poolid,
+                       uint32_t n_max, 
+                       xc_cpupoolinfo_t *info)
+{
+    int err = 0;
+    int p;
+    uint32_t poolid = first_poolid;
+    uint8_t local[sizeof (info->cpumap)];
+    DECLARE_DOMCTL;
+
+    memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+
+    for (p = 0; p < n_max; p++)
+    {
+        domctl.cmd = XEN_DOMCTL_cpupool_op;
+        domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO;
+        domctl.u.cpupool_op.cpupool_id = poolid;
+        set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+        domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+
+        if ( (err = lock_pages(local, sizeof(local))) != 0 )
+        {
+            PERROR("Could not lock memory for Xen hypercall");
+            break;
+        }
+        err = do_domctl_save(xc_handle, &domctl);
+        unlock_pages(local, sizeof (local));
+
+        if ( err < 0 )
+            break;
+
+        info->cpupool_id = domctl.u.cpupool_op.cpupool_id;
+        info->sched_id = domctl.u.cpupool_op.sched_id;
+        info->n_dom = domctl.u.cpupool_op.n_dom;
+        bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
+        poolid = domctl.u.cpupool_op.cpupool_id + 1;
+        info++;
+    }
+
+    if ( p == 0 )
+        return err;
+
+    return p;
+}
+
+int xc_cpupool_addcpu(int xc_handle,
+                      uint32_t poolid,
+                      int cpu)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_removecpu(int xc_handle,
+                         uint32_t poolid,
+                         int cpu)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_movedomain(int xc_handle,
+                          uint32_t poolid,
+                          uint32_t domid)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    domctl.u.cpupool_op.domid = domid;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_freeinfo(int xc_handle,
+                        uint64_t *cpumap)
+{
+    int err;
+    uint8_t local[sizeof (*cpumap)];
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO;
+    set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+    domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+
+    if ( (err = lock_pages(local, sizeof(local))) != 0 )
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        return err;
+    }
+
+    err = do_domctl_save(xc_handle, &domctl);
+    unlock_pages(local, sizeof (local));
+
+    if (err < 0)
+        return err;
+
+    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
+
+    return 0;
+}
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.