WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [Patch 2/6] Cpupools: libxc part

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [Patch 2/6] Cpupools: libxc part
From: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
Date: Wed, 21 Apr 2010 13:16:49 +0200
Delivery-date: Wed, 21 Apr 2010 04:21:02 -0700
Dkim-signature: v=1; a=rsa-sha256; c=simple/simple; d=ts.fujitsu.com; i=juergen.gross@xxxxxxxxxxxxxx; q=dns/txt; s=s1536b; t=1271848672; x=1303384672; h=message-id:date:from:mime-version:to:subject; z=Message-ID:=20<4BCEDEA1.2030902@xxxxxxxxxxxxxx>|Date:=20 Wed,=2021=20Apr=202010=2013:16:49=20+0200|From:=20Juergen =20Gross=20<juergen.gross@xxxxxxxxxxxxxx>|MIME-Version: =201.0|To:=20"xen-devel@xxxxxxxxxxxxxxxxxxx"=20<xen-devel @lists.xensource.com>|Subject:=20[Patch=202/6]=20Cpupools :=20libxc=20part; bh=ZbIccKsS6vGfSgjw3lcUqxzdfsAJ4yqrNWP5bB3tCxc=; b=rqeCrK+ioWoXCCVIvclE6a6swNTvEsv3IqVk7bfcFJ00iuTklOXS/nJ3 vcu6QrEgt3EcQ8DwBheCLh9ecM4vEJAO4JmSFJyWqa40i1Vf6er6uFhqS 37P7zuDOPg1NnN55vHjpTSkZ4Q7ZZkj5nTXHfBXJUca7LD/UasY1XNEbV hTb5W8nuX/9cZwM1K0k5wMjKZ4wkCr9X7/bDNV0W+ZKeqxcy3RXEFKkVH xfB1sdVEgxjMAR5uJ3mL0+Hla8qhv;
Domainkey-signature: s=s1536a; d=ts.fujitsu.com; c=nofws; q=dns; h=X-SBRSScore:X-IronPort-AV:Received:X-IronPort-AV: Received:Received:Message-ID:Date:From:Organization: User-Agent:MIME-Version:To:Subject:X-Enigmail-Version: Content-Type; b=UCoBUpU+SEa4cPo75eUl2BvckVQzZQNu6KmyVCdLeZrye9XOxBKvidQc WQatO+gcEJjin3Ky9He4MgtVNGFnlSqGYpCjRcR4GYpK0qqdChcIsuctK 2Gd/RXWl1P/7Z0v2rlYOsrI8tSf1ZtJHyPTKP96XL0e4i4cGTU3IEmhYR e9WWMaj1sqEL4hy9ZS7rTuxvM3yHV+dJFYdjX2DjGyHQdgUOpfX2lhh6s c42NwsQlVRGecZfANNm/5QQwiCTeG;
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Organization: Fujitsu Technology Solutions
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mozilla-Thunderbird 2.0.0.24 (X11/20100329)
-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions              e-mail: juergen.gross@xxxxxxxxxxxxxx
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html
Signed-off-by: juergen.gross@xxxxxxxxxxxxxx

diff -r dbf0fd95180f tools/libxc/Makefile
--- a/tools/libxc/Makefile      Tue Apr 20 14:32:53 2010 +0100
+++ b/tools/libxc/Makefile      Wed Apr 21 13:08:38 2010 +0200
@@ -8,6 +8,7 @@ CTRL_SRCS-y       += xc_core.c
 CTRL_SRCS-y       += xc_core.c
 CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c
 CTRL_SRCS-$(CONFIG_IA64) += xc_core_ia64.c
+CTRL_SRCS-y       += xc_cpupool.c
 CTRL_SRCS-y       += xc_domain.c
 CTRL_SRCS-y       += xc_evtchn.c
 CTRL_SRCS-y       += xc_misc.c
diff -r dbf0fd95180f tools/libxc/xc_cpupool.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_cpupool.c  Wed Apr 21 13:08:38 2010 +0200
@@ -0,0 +1,154 @@
+/******************************************************************************
+ * xc_cpupool.c
+ *
+ * API for manipulating and obtaining information on cpupools.
+ *
+ * Copyright (c) 2009, J Gross.
+ */
+
+#include <stdarg.h>
+#include "xc_private.h"
+
+int xc_cpupool_create(int xc_handle,
+                      uint32_t *ppoolid,
+                      uint32_t sched_id)
+{
+    int err;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE;
+    domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+        XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid;
+    domctl.u.cpupool_op.sched_id = sched_id;
+    if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 )
+        return err;
+
+    *ppoolid = domctl.u.cpupool_op.cpupool_id;
+    return 0;
+}
+
+int xc_cpupool_destroy(int xc_handle,
+                       uint32_t poolid)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_getinfo(int xc_handle, 
+                       uint32_t first_poolid,
+                       uint32_t n_max, 
+                       xc_cpupoolinfo_t *info)
+{
+    int err = 0;
+    int p;
+    uint32_t poolid = first_poolid;
+    uint8_t local[sizeof (info->cpumap)];
+    DECLARE_DOMCTL;
+
+    memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+
+    for (p = 0; p < n_max; p++)
+    {
+        domctl.cmd = XEN_DOMCTL_cpupool_op;
+        domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO;
+        domctl.u.cpupool_op.cpupool_id = poolid;
+        set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+        domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+
+        if ( (err = lock_pages(local, sizeof(local))) != 0 )
+        {
+            PERROR("Could not lock memory for Xen hypercall");
+            break;
+        }
+        err = do_domctl_save(xc_handle, &domctl);
+        unlock_pages(local, sizeof (local));
+
+        if ( err < 0 )
+            break;
+
+        info->cpupool_id = domctl.u.cpupool_op.cpupool_id;
+        info->sched_id = domctl.u.cpupool_op.sched_id;
+        info->n_dom = domctl.u.cpupool_op.n_dom;
+        bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
+        poolid = domctl.u.cpupool_op.cpupool_id + 1;
+        info++;
+    }
+
+    if ( p == 0 )
+        return err;
+
+    return p;
+}
+
+int xc_cpupool_addcpu(int xc_handle,
+                      uint32_t poolid,
+                      int cpu)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_removecpu(int xc_handle,
+                         uint32_t poolid,
+                         int cpu)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_movedomain(int xc_handle,
+                          uint32_t poolid,
+                          uint32_t domid)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    domctl.u.cpupool_op.domid = domid;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_freeinfo(int xc_handle,
+                        uint64_t *cpumap)
+{
+    int err;
+    uint8_t local[sizeof (*cpumap)];
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO;
+    set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+    domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+
+    if ( (err = lock_pages(local, sizeof(local))) != 0 )
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        return err;
+    }
+
+    err = do_domctl_save(xc_handle, &domctl);
+    unlock_pages(local, sizeof (local));
+
+    if (err < 0)
+        return err;
+
+    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
+
+    return 0;
+}
diff -r dbf0fd95180f tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Tue Apr 20 14:32:53 2010 +0100
+++ b/tools/libxc/xc_domain.c   Wed Apr 21 13:08:38 2010 +0200
@@ -220,6 +220,7 @@ int xc_domain_getinfo(int xc_handle,
         info->cpu_time = domctl.u.getdomaininfo.cpu_time;
         info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
         info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
+        info->cpupool = domctl.u.getdomaininfo.cpupool;
 
         memcpy(info->handle, domctl.u.getdomaininfo.handle,
                sizeof(xen_domain_handle_t));
diff -r dbf0fd95180f tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h  Tue Apr 20 14:32:53 2010 +0100
+++ b/tools/libxc/xc_private.h  Wed Apr 21 13:08:38 2010 +0200
@@ -164,6 +164,19 @@ static inline int do_domctl(int xc_handl
     return ret;
 }
 
+static inline int do_domctl_save(int xc_handle, struct xen_domctl *domctl)
+{
+    int ret;
+
+    do
+    {
+        ret = do_domctl(xc_handle, domctl);
+    }
+    while ( (ret < 0 ) && (errno == EAGAIN) );
+
+    return ret;
+}
+
 static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl)
 {
     int ret = -1;
diff -r dbf0fd95180f tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Tue Apr 20 14:32:53 2010 +0100
+++ b/tools/libxc/xenctrl.h     Wed Apr 21 13:08:38 2010 +0200
@@ -171,6 +171,7 @@ typedef struct xc_dominfo {
     unsigned int  nr_online_vcpus;
     unsigned int  max_vcpu_id;
     xen_domain_handle_t handle;
+    unsigned int  cpupool;
 } xc_dominfo_t;
 
 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
@@ -508,6 +509,100 @@ int xc_domain_setdebugging(int xc_handle
 int xc_domain_setdebugging(int xc_handle,
                            uint32_t domid,
                            unsigned int enable);
+
+/*
+ * CPUPOOL MANAGEMENT FUNCTIONS
+ */
+
+typedef struct xc_cpupoolinfo {
+    uint32_t cpupool_id;
+    uint32_t sched_id;
+    uint32_t n_dom;
+    uint64_t cpumap;
+} xc_cpupoolinfo_t;
+
+/**
+ * Create a new cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm ppoolid pointer to the new cpupool id (in/out)
+ * @parm sched_id id of scheduler to use for pool
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_create(int xc_handle,
+                      uint32_t *ppoolid,
+                      uint32_t sched_id);
+
+/**
+ * Destroy a cpupool. Pool must be unused and have no cpu assigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool to destroy
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_destroy(int xc_handle,
+                       uint32_t poolid);
+
+/**
+ * Get cpupool info. Returns info for up to the specified number of cpupools
+ * starting at the given id.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm first_poolid lowest id for which info is returned
+ * @parm n_max maximum number of cpupools to return info
+ * @parm info pointer to xc_cpupoolinfo_t array
+ * return number of cpupool infos
+ */
+int xc_cpupool_getinfo(int xc_handle,
+                       uint32_t first_poolid,
+                       uint32_t n_max,
+                       xc_cpupoolinfo_t *info);
+
+/**
+ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to add
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_addcpu(int xc_handle,
+                      uint32_t poolid,
+                      int cpu);
+
+/**
+ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to remove
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_removecpu(int xc_handle,
+                         uint32_t poolid,
+                         int cpu);
+
+/**
+ * Move domain to another cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the destination cpupool
+ * @parm domid id of the domain to move
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_movedomain(int xc_handle,
+                          uint32_t poolid,
+                          uint32_t domid);
+
+/**
+ * Return map of cpus not in any cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm cpumap pointer where to store the cpumap
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_freeinfo(int xc_handle,
+                        uint64_t *cpumap);
+
 
 /*
  * EVENT CHANNEL FUNCTIONS
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>