[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [Patch 3/6] Cpupools: libxen part


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
  • From: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
  • Date: Tue, 20 Apr 2010 11:39:44 +0200
  • Delivery-date: Tue, 20 Apr 2010 02:43:25 -0700
  • Domainkey-signature: s=s1536a; d=ts.fujitsu.com; c=nofws; q=dns; h=X-SBRSScore:X-IronPort-AV:Received:X-IronPort-AV: Received:Received:Message-ID:Date:From:Organization: User-Agent:MIME-Version:To:Subject:X-Enigmail-Version: Content-Type; b=QeDjRYNS+sBOAZtFZWFAY0xy7BckjniA8XMlf3gwOOaWey68oXW5z+53 qT0adh/brQ3l3qoy12r3YFlbcJkq3oCtuzsQHp/+UxdQ7cirm/Uta624h rNwVTkMVoON6GszK9iuybJ9oDOrFrRdt8o3z2vWPHdRXX3K7LKqbjNRIC lW8BDlNcz56SGaezbCD4S+GyQR3G+nbIFqg2vkDoxIHsDp7GRNcpg7hhj e3inwktzReB4ClkebQcZEt4fnhfzN;
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions              e-mail: juergen.gross@xxxxxxxxxxxxxx
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html
Signed-off-by: juergen.gross@xxxxxxxxxxxxxx

diff -r fadf63ab49e7 tools/libxen/include/xen/api/xen_all.h
--- a/tools/libxen/include/xen/api/xen_all.h    Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/libxen/include/xen/api/xen_all.h    Tue Apr 20 11:10:40 2010 +0200
@@ -37,4 +37,5 @@
 #include <xen/api/xen_vm_power_state.h>
 #include <xen/api/xen_vtpm.h>
 #include <xen/api/xen_xspolicy.h>
+#include <xen/api/xen_cpu_pool.h>
 #endif
diff -r fadf63ab49e7 tools/libxen/include/xen/api/xen_cpu_pool.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxen/include/xen/api/xen_cpu_pool.h       Tue Apr 20 11:10:40 
2010 +0200
@@ -0,0 +1,424 @@
+/*
+ * Copyright (c) 2006-2007, XenSource Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
+ */
+
+#ifndef XEN_CPU_POOL_H
+#define XEN_CPU_POOL_H
+
+#include <stddef.h>
+#include <stdlib.h>
+
+#include <xen/api/xen_common.h>
+#include <xen/api/xen_string_set.h>
+#include <xen/api/xen_string_string_map.h>
+#include <xen/api/xen_host_cpu_decl.h>
+#include <xen/api/xen_host.h>
+#include <xen/api/xen_vm_decl.h>
+#include <xen/api/xen_vm.h>
+#include <xen/api/xen_cpu_pool_decl.h>
+
+/*
+ * The cpu_pool class.
+ *
+ * Management of CPU pools.
+ */
+
+
+/**
+ * Free the given xen_cpu_pool.  The given handle must have been allocated
+ * by this library.
+ */
+extern void
+xen_cpu_pool_free(xen_cpu_pool cpu_pool);
+
+
+typedef struct xen_cpu_pool_set
+{
+    size_t size;
+    xen_cpu_pool *contents[];
+} xen_cpu_pool_set;
+
+/**
+ * Allocate a xen_cpu_pool_set of the given size.
+ */
+extern xen_cpu_pool_set *
+xen_cpu_pool_set_alloc(size_t size);
+
+/**
+ * Free the given xen_cpu_pool_set.  The given set must have been allocated
+ * by this library.
+ */
+extern void
+xen_cpu_pool_set_free(xen_cpu_pool_set *set);
+
+
+typedef struct xen_cpu_pool_record
+{
+    xen_cpu_pool handle;
+    char *uuid;
+    char *name_label;
+    char *name_description;
+    struct xen_host_record_opt *resident_on;
+    bool auto_power_on;
+    struct xen_vm_record_opt_set *started_vms;
+    int64_t ncpu;
+    char *sched_policy;
+    struct xen_string_set *proposed_cpus;
+    struct xen_host_cpu_record_opt_set *host_cpus;
+    bool activated;
+    xen_string_string_map *other_config;
+} xen_cpu_pool_record;
+
+/**
+ * Allocate a xen_cpu_pool_record.
+ */
+extern xen_cpu_pool_record *
+xen_cpu_pool_record_alloc(void);
+
+/**
+ * Free the given xen_cpu_pool_record, and all referenced values.  The given
+ * record must have been allocated by this library.
+ */
+extern void
+xen_cpu_pool_record_free(xen_cpu_pool_record *record);
+
+
+typedef struct xen_cpu_pool_record_opt
+{
+    bool is_record;
+    union
+    {
+        xen_cpu_pool handle;
+        xen_cpu_pool_record *record;
+    } u;
+} xen_cpu_pool_record_opt;
+
+/**
+ * Allocate a xen_cpu_pool_record_opt.
+ */
+extern xen_cpu_pool_record_opt *
+xen_cpu_pool_record_opt_alloc(void);
+
+/**
+ * Free the given xen_cpu_pool_record_opt, and all referenced values.  The
+ * given record_opt must have been allocated by this library.
+ */
+extern void
+xen_cpu_pool_record_opt_free(xen_cpu_pool_record_opt *record_opt);
+
+
+typedef struct xen_cpu_pool_record_set
+{
+    size_t size;
+    xen_cpu_pool_record *contents[];
+} xen_cpu_pool_record_set;
+
+/**
+ * Allocate a xen_cpu_pool_record_set of the given size.
+ */
+extern xen_cpu_pool_record_set *
+xen_cpu_pool_record_set_alloc(size_t size);
+
+/**
+ * Free the given xen_cpu_pool_record_set, and all referenced values.  The
+ * given set must have been allocated by this library.
+ */
+extern void
+xen_cpu_pool_record_set_free(xen_cpu_pool_record_set *set);
+
+
+
+typedef struct xen_cpu_pool_record_opt_set
+{
+    size_t size;
+    xen_cpu_pool_record_opt *contents[];
+} xen_cpu_pool_record_opt_set;
+
+/**
+ * Allocate a xen_cpu_pool_record_opt_set of the given size.
+ */
+extern xen_cpu_pool_record_opt_set *
+xen_cpu_pool_record_opt_set_alloc(size_t size);
+
+/**
+ * Free the given xen_cpu_pool_record_opt_set, and all referenced values.
+ * The given set must have been allocated by this library.
+ */
+extern void
+xen_cpu_pool_record_opt_set_free(xen_cpu_pool_record_opt_set *set);
+
+
+/**
+ * Get a record containing the current state of the given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_get_record(xen_session *session, xen_cpu_pool_record **result,
+    xen_cpu_pool cpu_pool);
+
+
+/**
+ * Get a reference to the cpu_pool instance with the specified UUID.
+ */
+extern bool
+xen_cpu_pool_get_by_uuid(xen_session *session, xen_cpu_pool *result, char 
*uuid);
+
+
+/**
+ * Create a new cpu_pool instance, and return its handle.
+ */
+extern bool
+xen_cpu_pool_create(xen_session *session, xen_cpu_pool *result,
+    xen_cpu_pool_record *record);
+
+
+/**
+ * Destroy the specified VBD instance.
+ */
+extern bool
+xen_cpu_pool_destroy(xen_session *session, xen_cpu_pool cpu_pool);
+
+
+/**
+ * Get the uuid field of the given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_get_uuid(xen_session *session, char **result, xen_cpu_pool 
cpu_pool);
+
+
+/**
+ * Deactivate the given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_deactivate(xen_session *session, xen_cpu_pool cpu_pool);
+
+
+/**
+ * Activate the given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_activate(xen_session *session, xen_cpu_pool cpu_pool);
+
+
+/**
+ * Add a physical cpu to the active pool.
+ */
+extern bool
+xen_cpu_pool_add_host_CPU_live(xen_session *session, xen_cpu_pool cpu_pool,
+    xen_host_cpu host_cpu);
+
+
+/**
+ * Remove a physical cpu from the active pool.
+ */
+extern bool
+xen_cpu_pool_remove_host_CPU_live(xen_session *session, xen_cpu_pool cpu_pool,
+    xen_host_cpu host_cpu);
+
+
+/**
+ * Return a list of all the cpu_pools known to the system.
+ */
+extern bool
+xen_cpu_pool_get_all(xen_session *session, struct xen_cpu_pool_set **result);
+
+
+/**
+ * Get the uuid field of the cpu_pool with given name.
+ */
+extern bool
+xen_cpu_pool_get_by_name_label(xen_session *session,
+    struct xen_cpu_pool_set **result, char *label);
+
+
+/**
+ * Get activation state of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_get_activated(xen_session *session, bool *result,
+    xen_cpu_pool cpu_pool);
+
+
+/**
+ * Get auto_power_on option of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_get_auto_power_on(xen_session *session, bool *result,
+    xen_cpu_pool cpu_pool);
+
+
+/**
+ * Get host_cpu refs of all physical cpus of cpu_pool.
+ */
+extern bool
+xen_cpu_pool_get_host_CPUs(xen_session *session, struct xen_host_cpu_set 
**result,
+    xen_cpu_pool cpu_pool);
+
+
+/**
+ * Get name description field of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_get_name_description(xen_session *session, char **result,
+    xen_cpu_pool cpu_pool);
+
+
+/**
+ * Get name label field of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_get_name_label(xen_session *session, char **result,
+    xen_cpu_pool cpu_pool);
+
+
+/**
+ * Get count of physical cpus to attach to cpu_pool on activation.
+ */
+extern bool
+xen_cpu_pool_get_ncpu(xen_session *session, int64_t *result,
+    xen_cpu_pool cpu_pool);
+
+
+/**
+ * Get proposed_CPUs field of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_get_proposed_CPUs(xen_session *session, struct xen_string_set 
**result,
+    xen_cpu_pool cpu_pool);
+
+
+/**
+ * Get the other_config field of the given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_get_other_config(xen_session *session, xen_string_string_map 
**result,
+    xen_cpu_pool cpu_pool);
+
+
+/**
+ * Get host the cpu_pool is resident on.
+ */
+extern bool
+xen_cpu_pool_get_resident_on(xen_session *session, xen_host *result,
+    xen_cpu_pool cpu_pool);
+
+
+/**
+ * Get sched_policy field of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_get_sched_policy(xen_session *session, char **result,
+    xen_cpu_pool cpu_pool);
+
+
+/**
+ * Get set of started vms in given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_get_started_VMs(xen_session *session, xen_vm_set **result,
+    xen_cpu_pool cpu_pool);
+
+
+/**
+ *  Set auto_power_on field of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_set_auto_power_on(xen_session *session, xen_cpu_pool cpu_pool,
+    bool auto_power_on);
+
+
+/**
+ * Set proposed_CPUs field of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_set_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool,
+    xen_string_set *proposed_cpus);
+
+
+/**
+ * Add a proposed cpu to proposed_CPUs field of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_add_to_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool,
+    char* proposed_cpu);
+
+
+/**
+ * Remove a proposed cpu from proposed_CPUs field of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_remove_from_proposed_CPUs(xen_session *session, xen_cpu_pool 
cpu_pool,
+    char* proposed_cpu);
+
+
+/**
+ * Set name_label field of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_set_name_label(xen_session *session, xen_cpu_pool cpu_pool,
+    char *label);
+
+
+/**
+ * Set name_description field of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_set_name_description(xen_session *session, xen_cpu_pool cpu_pool,
+    char *descr);
+
+
+/**
+ * Set ncpu field of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_set_ncpu(xen_session *session, xen_cpu_pool cpu_pool, int64_t 
ncpu);
+
+
+/**
+ * Set the other_config field of the given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_set_other_config(xen_session *session, xen_cpu_pool cpu_pool,
+    xen_string_string_map *other_config);
+
+
+/**
+ * Add the given key-value pair to the other_config field of the given
+ * cpu_pool.
+ */
+extern bool
+xen_cpu_pool_add_to_other_config(xen_session *session, xen_cpu_pool cpu_pool,
+    char *key, char *value);
+
+
+/**
+ * Remove the given key and its corresponding value from the
+ * other_config field of the given cpu_pool. If the key is not in that Map, 
then
+ * do nothing.
+ */
+extern bool
+xen_cpu_pool_remove_from_other_config(xen_session *session, xen_cpu_pool 
cpu_pool,
+    char *key);
+
+/**
+ * Set sched_policy of given cpu_pool.
+ */
+extern bool
+xen_cpu_pool_set_sched_policy(xen_session *session, xen_cpu_pool cpu_pool,
+    char *sched_policy);
+
+
+#endif
diff -r fadf63ab49e7 tools/libxen/include/xen/api/xen_cpu_pool_decl.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxen/include/xen/api/xen_cpu_pool_decl.h  Tue Apr 20 11:10:40 
2010 +0200
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2006-2007, XenSource Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
+ */
+
+#ifndef XEN_CPU_POOL_DECL_H
+#define XEN_CPU_POOL_DECL_H
+
+typedef void *xen_cpu_pool;
+
+struct xen_cpu_pool_set;
+struct xen_cpu_pool_record;
+struct xen_cpu_pool_record_set;
+struct xen_cpu_pool_record_opt;
+struct xen_cpu_pool_record_opt_set;
+
+#endif
diff -r fadf63ab49e7 tools/libxen/include/xen/api/xen_host.h
--- a/tools/libxen/include/xen/api/xen_host.h   Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/libxen/include/xen/api/xen_host.h   Tue Apr 20 11:10:40 2010 +0200
@@ -29,7 +29,7 @@
 #include <xen/api/xen_string_set.h>
 #include <xen/api/xen_string_string_map.h>
 #include <xen/api/xen_vm_decl.h>
-
+#include <xen/api/xen_cpu_pool_decl.h>
 
 /*
  * The host class.
@@ -91,6 +91,7 @@ typedef struct xen_host_record
     struct xen_pbd_record_opt_set *pbds;
     struct xen_host_cpu_record_opt_set *host_cpus;
     struct xen_host_metrics_record_opt *metrics;
+    struct xen_cpu_pool_record_opt_set *resident_cpu_pools;
 } xen_host_record;
 
 /**
@@ -494,4 +495,11 @@ xen_host_get_all(xen_session *session, s
 xen_host_get_all(xen_session *session, struct xen_host_set **result);
 
 
+/**
+ * Get list of resident cpu pools.
+ */
+extern bool
+xen_host_get_resident_cpu_pools(xen_session *session, struct xen_cpu_pool_set 
**result,
+       xen_host host);
+
 #endif
diff -r fadf63ab49e7 tools/libxen/include/xen/api/xen_host_cpu.h
--- a/tools/libxen/include/xen/api/xen_host_cpu.h       Mon Apr 19 17:57:28 
2010 +0100
+++ b/tools/libxen/include/xen/api/xen_host_cpu.h       Tue Apr 20 11:10:40 
2010 +0200
@@ -22,6 +22,7 @@
 #include <xen/api/xen_common.h>
 #include <xen/api/xen_host_cpu_decl.h>
 #include <xen/api/xen_host_decl.h>
+#include <xen/api/xen_cpu_pool_decl.h>
 
 
 /*
@@ -72,6 +73,7 @@ typedef struct xen_host_cpu_record
     char *flags;
     char *features;
     double utilisation;
+    struct xen_cpu_pool_record_opt_set *cpu_pools;
 } xen_host_cpu_record;
 
 /**
@@ -244,4 +246,18 @@ xen_host_cpu_get_all(xen_session *sessio
 xen_host_cpu_get_all(xen_session *session, struct xen_host_cpu_set **result);
 
 
+/**
+ * Get the ref of the cpu_pool to which the host_cpu belongs.
+ */
+extern bool
+xen_host_cpu_get_cpu_pool(xen_session *session, struct xen_cpu_pool_set 
**result, xen_host_cpu host_cpu);
+
+
+/**
+ * Return a list of all the host_cpus not assigned to a cpu_pool.
+ */
+extern bool
+xen_host_cpu_get_unassigned_cpus(xen_session *session, struct xen_host_cpu_set 
**result);
+
+
 #endif
diff -r fadf63ab49e7 tools/libxen/include/xen/api/xen_vm.h
--- a/tools/libxen/include/xen/api/xen_vm.h     Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/libxen/include/xen/api/xen_vm.h     Tue Apr 20 11:10:40 2010 +0200
@@ -34,6 +34,7 @@
 #include <xen/api/xen_vm_metrics_decl.h>
 #include <xen/api/xen_vm_power_state.h>
 #include <xen/api/xen_vtpm_decl.h>
+#include <xen/api/xen_cpu_pool_decl.h>
 
 
 /*
@@ -113,6 +114,8 @@ typedef struct xen_vm_record
     struct xen_vm_metrics_record_opt *metrics;
     struct xen_vm_guest_metrics_record_opt *guest_metrics;
     char *security_label;
+    char *pool_name;
+    struct xen_cpu_pool_record_opt_set *cpu_pool;
 } xen_vm_record;
 
 /**
@@ -905,4 +908,33 @@ extern bool
 extern bool
 xen_vm_get_security_label(xen_session *session, char **result, xen_vm vm);
 
+
+/**
+ * Get the cpu_pool ref field of a domain.
+ */
+extern bool
+xen_vm_get_cpu_pool(xen_session *session, struct xen_cpu_pool_set **result, 
xen_vm vm);
+
+
+/**
+ * Get the pool_name field of a domain.
+ */
+extern bool
+xen_vm_get_pool_name(xen_session *session, char **result, xen_vm vm);
+
+
+/**
+ * Set the pool_name field of a domain.
+ */
+extern bool
+xen_vm_set_pool_name(xen_session *session, xen_vm vm, char *pool_name);
+
+
+/**
+ * Migrate the VM to another cpu_pool (on the same host). This can only be
+ * called when the specified VM is in the Running state.
+ */
+extern bool
+xen_vm_cpu_pool_migrate(xen_session *session, xen_vm vm, xen_cpu_pool 
cpu_pool);
+
 #endif
diff -r fadf63ab49e7 tools/libxen/src/xen_cpu_pool.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxen/src/xen_cpu_pool.c   Tue Apr 20 11:10:40 2010 +0200
@@ -0,0 +1,671 @@
+/*
+ * Copyright (c) 2006-2007, XenSource Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
+ */
+
+
+#include <stddef.h>
+#include <stdlib.h>
+
+#include "xen_internal.h"
+#include <xen/api/xen_common.h>
+#include <xen/api/xen_cpu_pool.h>
+#include <xen/api/xen_host_cpu.h>
+
+XEN_FREE(xen_cpu_pool)
+XEN_SET_ALLOC_FREE(xen_cpu_pool)
+XEN_ALLOC(xen_cpu_pool_record)
+XEN_SET_ALLOC_FREE(xen_cpu_pool_record)
+XEN_ALLOC(xen_cpu_pool_record_opt)
+XEN_RECORD_OPT_FREE(xen_cpu_pool)
+XEN_SET_ALLOC_FREE(xen_cpu_pool_record_opt)
+
+
+static const struct_member xen_cpu_pool_record_struct_members[] =
+    {
+        { .key = "uuid",
+          .type = &abstract_type_string,
+          .offset = offsetof(xen_cpu_pool_record, uuid) },
+        { .key = "name_label",
+          .type = &abstract_type_string,
+          .offset = offsetof(xen_cpu_pool_record, name_label) },
+        { .key = "name_description",
+          .type = &abstract_type_string,
+          .offset = offsetof(xen_cpu_pool_record, name_description) },
+        { .key = "resident_on",
+          .type = &abstract_type_ref,
+          .offset = offsetof(xen_cpu_pool_record, resident_on) },
+        { .key = "auto_power_on",
+          .type = &abstract_type_bool,
+          .offset = offsetof(xen_cpu_pool_record, auto_power_on) },
+        { .key = "started_VMs",
+          .type = &abstract_type_ref_set,
+          .offset = offsetof(xen_cpu_pool_record, started_vms) },
+        { .key = "ncpu",
+          .type = &abstract_type_int,
+          .offset = offsetof(xen_cpu_pool_record, ncpu) },
+        { .key = "sched_policy",
+          .type = &abstract_type_string,
+          .offset = offsetof(xen_cpu_pool_record, sched_policy) },
+        { .key = "proposed_CPUs",
+          .type = &abstract_type_string_set,
+          .offset = offsetof(xen_cpu_pool_record, proposed_cpus) },
+        { .key = "host_CPUs",
+          .type = &abstract_type_ref_set,
+          .offset = offsetof(xen_cpu_pool_record, host_cpus) },
+        { .key = "activated",
+          .type = &abstract_type_bool,
+          .offset = offsetof(xen_cpu_pool_record, activated) },
+        { .key = "other_config",
+          .type = &abstract_type_string_string_map,
+          .offset = offsetof(xen_cpu_pool_record, other_config) },
+    };
+
+
+const abstract_type xen_cpu_pool_record_abstract_type_ =
+    {
+       .typename = STRUCT,
+       .struct_size = sizeof(xen_cpu_pool_record),
+       .member_count =
+           sizeof(xen_cpu_pool_record_struct_members) / sizeof(struct_member),
+       .members = xen_cpu_pool_record_struct_members
+    };
+
+
+void
+xen_cpu_pool_record_free(xen_cpu_pool_record *record)
+{
+    if (record == NULL)
+    {
+        return;
+    }
+    free(record->handle);
+    free(record->uuid);
+    free(record->name_label);
+    free(record->name_description);
+    xen_host_record_opt_free(record->resident_on);
+    xen_vm_record_opt_set_free(record->started_vms);
+    free(record->sched_policy);
+    xen_string_set_free(record->proposed_cpus);
+    xen_host_cpu_record_opt_set_free(record->host_cpus);
+    xen_string_string_map_free(record->other_config);
+    free(record);
+}
+
+
+bool
+xen_cpu_pool_get_record(xen_session *session, xen_cpu_pool_record **result,
+    xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    abstract_type result_type = xen_cpu_pool_record_abstract_type_;
+
+    *result = NULL;
+    XEN_CALL_("cpu_pool.get_record");
+
+    if (session->ok)
+    {
+       (*result)->handle = xen_strdup_((*result)->uuid);
+    }
+
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_by_uuid(xen_session *session, xen_cpu_pool *result, char 
*uuid)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = uuid }
+        };
+
+    abstract_type result_type = abstract_type_string;
+
+    *result = NULL;
+    XEN_CALL_("cpu_pool.get_by_uuid");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_create(xen_session *session, xen_cpu_pool *result,
+    xen_cpu_pool_record *record)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &xen_cpu_pool_record_abstract_type_,
+              .u.struct_val = record }
+        };
+
+    abstract_type result_type = abstract_type_string;
+
+    *result = NULL;
+    XEN_CALL_("cpu_pool.create");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_destroy(xen_session *session, xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    xen_call_(session, "cpu_pool.destroy", param_values, 1, NULL, NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_uuid(xen_session *session, char **result, xen_cpu_pool 
cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    abstract_type result_type = abstract_type_string;
+
+    *result = NULL;
+    XEN_CALL_("cpu_pool.get_uuid");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_deactivate(xen_session *session, xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+        };
+
+    xen_call_(session, "cpu_pool.deactivate", param_values, 1, NULL, NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_activate(xen_session *session, xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+        };
+
+    xen_call_(session, "cpu_pool.activate", param_values, 1, NULL, NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_add_host_CPU_live(xen_session *session, xen_cpu_pool cpu_pool,
+    xen_host_cpu host_cpu)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+            { .type = &abstract_type_string,
+              .u.string_val = host_cpu },
+        };
+
+    xen_call_(session, "cpu_pool.add_host_CPU_live", param_values, 2, NULL, 
NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_remove_host_CPU_live(xen_session *session, xen_cpu_pool cpu_pool,
+    xen_host_cpu host_cpu)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+            { .type = &abstract_type_string,
+              .u.string_val = host_cpu },
+        };
+
+    xen_call_(session, "cpu_pool.remove_host_CPU_live", param_values, 2, NULL, 
NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_all(xen_session *session, struct xen_cpu_pool_set **result)
+{
+    abstract_type result_type = abstract_type_string_set;
+
+    *result = NULL;
+    xen_call_(session, "cpu_pool.get_all", NULL, 0, &result_type, result);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_by_name_label(xen_session *session,
+    struct xen_cpu_pool_set **result, char *label)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = label }
+        };
+
+    abstract_type result_type = abstract_type_string_set;
+
+    *result = NULL;
+    XEN_CALL_("cpu_pool.get_by_name_label");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_activated(xen_session *session, bool *result,
+    xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    abstract_type result_type = abstract_type_bool;
+
+    XEN_CALL_("cpu_pool.get_activated");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_auto_power_on(xen_session *session, bool *result,
+    xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    abstract_type result_type = abstract_type_bool;
+
+    XEN_CALL_("cpu_pool.get_auto_power_on");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_host_CPUs(xen_session *session, struct xen_host_cpu_set 
**result,
+    xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    abstract_type result_type = abstract_type_string_set;
+
+    *result = NULL;
+    XEN_CALL_("cpu_pool.get_host_CPUs");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_name_description(xen_session *session, char **result,
+    xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    abstract_type result_type = abstract_type_string;
+
+    *result = NULL;
+    XEN_CALL_("cpu_pool.get_name_description");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_name_label(xen_session *session, char **result,
+    xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    abstract_type result_type = abstract_type_string;
+
+    *result = NULL;
+    XEN_CALL_("cpu_pool.get_name_label");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_ncpu(xen_session *session, int64_t *result,
+    xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    abstract_type result_type = abstract_type_int;
+
+    XEN_CALL_("cpu_pool.get_ncpu");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_proposed_CPUs(xen_session *session, struct xen_string_set 
**result,
+    xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    abstract_type result_type = abstract_type_string_set;
+
+    *result = NULL;
+    XEN_CALL_("cpu_pool.get_proposed_CPUs");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_other_config(xen_session *session, xen_string_string_map 
**result,
+    xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    abstract_type result_type = abstract_type_string_string_map;
+
+    *result = NULL;
+    XEN_CALL_("cpu_pool.get_other_config");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_resident_on(xen_session *session, xen_host *result,
+    xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    abstract_type result_type = abstract_type_string;
+
+    *result = NULL;
+    XEN_CALL_("cpu_pool.get_resident_on");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_sched_policy(xen_session *session, char **result,
+    xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    abstract_type result_type = abstract_type_string;
+
+    *result = NULL;
+    XEN_CALL_("cpu_pool.get_sched_policy");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_get_started_VMs(xen_session *session, xen_vm_set **result,
+    xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    abstract_type result_type = abstract_type_string_set;
+
+    *result = NULL;
+    XEN_CALL_("cpu_pool.get_started_VMs");
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_set_auto_power_on(xen_session *session, xen_cpu_pool cpu_pool,
+    bool auto_power_on)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+            { .type = &abstract_type_bool,
+              .u.bool_val = auto_power_on }
+        };
+
+    xen_call_(session, "cpu_pool.set_auto_power_on", param_values, 2, NULL, 
NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_set_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool,
+    xen_string_set *proposed_cpus)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+            { .type = &abstract_type_string_set,
+              .u.set_val = (arbitrary_set *)proposed_cpus }
+        };
+
+    xen_call_(session, "cpu_pool.set_proposed_CPUs", param_values, 2, NULL, 
NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_add_to_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool,
+    char* proposed_cpu)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+            { .type = &abstract_type_string,
+              .u.string_val = proposed_cpu }
+        };
+
+    xen_call_(session, "cpu_pool.add_to_proposed_CPUs", param_values, 2, NULL, 
NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_remove_from_proposed_CPUs(xen_session *session, xen_cpu_pool 
cpu_pool,
+    char* proposed_cpu)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+            { .type = &abstract_type_string,
+              .u.string_val = proposed_cpu }
+        };
+
+    xen_call_(session, "cpu_pool.remove_from_proposed_CPUs", param_values, 2, 
NULL, NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_set_name_label(xen_session *session, xen_cpu_pool cpu_pool,
+    char *label)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+            { .type = &abstract_type_string,
+              .u.string_val = label }
+        };
+
+    xen_call_(session, "cpu_pool.set_name_label", param_values, 2, NULL, NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_set_name_description(xen_session *session, xen_cpu_pool cpu_pool,
+    char *descr)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+            { .type = &abstract_type_string,
+              .u.string_val = descr }
+        };
+
+    xen_call_(session, "cpu_pool.set_name_description", param_values, 2, NULL, 
NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_set_ncpu(xen_session *session, xen_cpu_pool cpu_pool, int64_t 
ncpu)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+            { .type = &abstract_type_int,
+              .u.int_val = ncpu }
+        };
+
+    xen_call_(session, "cpu_pool.set_ncpu", param_values, 2, NULL, NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_set_other_config(xen_session *session, xen_cpu_pool cpu_pool,
+    xen_string_string_map *other_config)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+            { .type = &abstract_type_string_string_map,
+              .u.set_val = (arbitrary_set *)other_config }
+        };
+
+    xen_call_(session, "cpu_pool.set_other_config", param_values, 2, NULL, 
NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_add_to_other_config(xen_session *session, xen_cpu_pool cpu_pool,
+    char *key, char *value)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+            { .type = &abstract_type_string,
+              .u.string_val = key },
+            { .type = &abstract_type_string,
+              .u.string_val = value }
+        };
+
+    xen_call_(session, "cpu_pool.add_to_other_config", param_values, 3, NULL, 
NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_remove_from_other_config(xen_session *session, xen_cpu_pool 
cpu_pool,
+    char *key)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+            { .type = &abstract_type_string,
+              .u.string_val = key }
+        };
+
+    xen_call_(session, "cpu_pool.remove_from_other_config", param_values, 2, 
NULL, NULL);
+    return session->ok;
+}
+
+
+bool
+xen_cpu_pool_set_sched_policy(xen_session *session, xen_cpu_pool cpu_pool,
+    char *sched_policy)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool },
+            { .type = &abstract_type_string,
+              .u.string_val = sched_policy }
+        };
+
+    xen_call_(session, "cpu_pool.set_sched_policy", param_values, 2, NULL, 
NULL);
+    return session->ok;
+}
+
diff -r fadf63ab49e7 tools/libxen/src/xen_host.c
--- a/tools/libxen/src/xen_host.c       Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/libxen/src/xen_host.c       Tue Apr 20 11:10:40 2010 +0200
@@ -30,6 +30,7 @@
 #include <xen/api/xen_sr.h>
 #include <xen/api/xen_string_string_map.h>
 #include <xen/api/xen_vm.h>
+#include <xen/api/xen_cpu_pool.h>
 
 
 XEN_FREE(xen_host)
@@ -108,7 +109,10 @@ static const struct_member xen_host_reco
           .offset = offsetof(xen_host_record, host_cpus) },
         { .key = "metrics",
           .type = &abstract_type_ref,
-          .offset = offsetof(xen_host_record, metrics) }
+          .offset = offsetof(xen_host_record, metrics) },
+        { .key = "resident_cpu_pools",
+          .type = &abstract_type_ref_set,
+          .offset = offsetof(xen_host_record, resident_cpu_pools) }
     };
 
 const abstract_type xen_host_record_abstract_type_ =
@@ -148,6 +152,7 @@ xen_host_record_free(xen_host_record *re
     xen_pbd_record_opt_set_free(record->pbds);
     xen_host_cpu_record_opt_set_free(record->host_cpus);
     xen_host_metrics_record_opt_free(record->metrics);
+    xen_cpu_pool_record_opt_set_free(record->resident_cpu_pools);
     free(record);
 }
 
@@ -889,3 +894,22 @@ xen_host_get_uuid(xen_session *session, 
     XEN_CALL_("host.get_uuid");
     return session->ok;
 }
+
+
+bool
+xen_host_get_resident_cpu_pools(xen_session *session, struct xen_cpu_pool_set 
**result,
+        xen_host host)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = host }
+        };
+
+    abstract_type result_type = abstract_type_string_set;
+
+    *result = NULL;
+    XEN_CALL_("host.get_resident_cpu_pools");
+    return session->ok;
+}
+
diff -r fadf63ab49e7 tools/libxen/src/xen_host_cpu.c
--- a/tools/libxen/src/xen_host_cpu.c   Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/libxen/src/xen_host_cpu.c   Tue Apr 20 11:10:40 2010 +0200
@@ -24,6 +24,7 @@
 #include <xen/api/xen_common.h>
 #include <xen/api/xen_host.h>
 #include <xen/api/xen_host_cpu.h>
+#include <xen/api/xen_cpu_pool.h>
 
 
 XEN_FREE(xen_host_cpu)
@@ -66,7 +67,10 @@ static const struct_member xen_host_cpu_
           .offset = offsetof(xen_host_cpu_record, features) },
         { .key = "utilisation",
           .type = &abstract_type_float,
-          .offset = offsetof(xen_host_cpu_record, utilisation) }
+          .offset = offsetof(xen_host_cpu_record, utilisation) },
+        { .key = "cpu_pool",
+          .type = &abstract_type_ref_set,
+          .offset = offsetof(xen_host_cpu_record, cpu_pools) },
     };
 
 const abstract_type xen_host_cpu_record_abstract_type_ =
@@ -94,6 +98,7 @@ xen_host_cpu_record_free(xen_host_cpu_re
     free(record->stepping);
     free(record->flags);
     free(record->features);
+    xen_cpu_pool_record_opt_set_free(record->cpu_pools);
     free(record);
 }
 
@@ -315,3 +320,34 @@ xen_host_cpu_get_uuid(xen_session *sessi
     XEN_CALL_("host_cpu.get_uuid");
     return session->ok;
 }
+
+
+bool
+xen_host_cpu_get_cpu_pool(xen_session *session, struct xen_cpu_pool_set 
**result, xen_host_cpu host_cpu)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = host_cpu }
+        };
+
+    abstract_type result_type = abstract_type_string_set;
+
+    *result = NULL;
+    XEN_CALL_("host_cpu.get_cpu_pool");
+    return session->ok;
+}
+
+
+bool
+xen_host_cpu_get_unassigned_cpus(xen_session *session, struct xen_host_cpu_set 
**result)
+{
+    abstract_type result_type = abstract_type_string_set;
+
+    *result = NULL;
+    xen_call_(session, "host_cpu.get_unassigned_cpus", NULL, 0, &result_type, 
result);
+    return session->ok;
+}
+
+
+
diff -r fadf63ab49e7 tools/libxen/src/xen_vm.c
--- a/tools/libxen/src/xen_vm.c Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/libxen/src/xen_vm.c Tue Apr 20 11:10:40 2010 +0200
@@ -36,6 +36,7 @@
 #include <xen/api/xen_vm_guest_metrics.h>
 #include <xen/api/xen_vm_metrics.h>
 #include <xen/api/xen_vtpm.h>
+#include <xen/api/xen_cpu_pool.h>
 
 
 XEN_FREE(xen_vm)
@@ -165,7 +166,13 @@ static const struct_member xen_vm_record
           .offset = offsetof(xen_vm_record, guest_metrics) },
         { .key = "security_label",
           .type = &abstract_type_string,
-          .offset = offsetof(xen_vm_record, security_label) }
+          .offset = offsetof(xen_vm_record, security_label) },
+        { .key = "pool_name",
+          .type = &abstract_type_string,
+          .offset = offsetof(xen_vm_record, pool_name) },
+        { .key = "cpu_pool",
+          .type = &abstract_type_ref_set,
+          .offset = offsetof(xen_vm_record, cpu_pool) },
     };
 
 const abstract_type xen_vm_record_abstract_type_ =
@@ -209,6 +216,7 @@ xen_vm_record_free(xen_vm_record *record
     xen_string_string_map_free(record->other_config);
     xen_vm_metrics_record_opt_free(record->metrics);
     xen_vm_guest_metrics_record_opt_free(record->guest_metrics);
+    xen_cpu_pool_record_opt_set_free(record->cpu_pool);
     free(record->security_label);
     free(record);
 }
@@ -1781,3 +1789,71 @@ xen_vm_get_security_label(xen_session *s
     XEN_CALL_("VM.get_security_label");
     return session->ok;
 }
+
+
+bool
+xen_vm_get_cpu_pool(xen_session *session, struct xen_cpu_pool_set **result, 
xen_vm vm)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = vm },
+        };
+
+    abstract_type result_type = abstract_type_string_set;
+
+    *result = NULL;
+    XEN_CALL_("VM.get_cpu_pool");
+    return session->ok;
+}
+
+
+bool
+xen_vm_get_pool_name(xen_session *session, char **result, xen_vm vm)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = vm },
+        };
+
+    abstract_type result_type = abstract_type_string;
+
+    *result = NULL;
+    XEN_CALL_("VM.get_pool_name");
+    return session->ok;
+}
+
+
+bool
+xen_vm_set_pool_name(xen_session *session, xen_vm vm, char *pool_name)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = vm },
+            { .type = &abstract_type_string,
+              .u.string_val = pool_name }
+        };
+
+    xen_call_(session, "VM.set_pool_name", param_values, 2, NULL, NULL);
+    return session->ok;
+}
+
+
+bool
+xen_vm_cpu_pool_migrate(xen_session *session, xen_vm vm, xen_cpu_pool cpu_pool)
+{
+    abstract_value param_values[] =
+        {
+            { .type = &abstract_type_string,
+              .u.string_val = vm },
+            { .type = &abstract_type_string,
+              .u.string_val = cpu_pool }
+        };
+
+    xen_call_(session, "VM.cpu_pool_migrate", param_values, 2, NULL, NULL);
+    return session->ok;
+}
+
+
diff -r fadf63ab49e7 tools/libxen/test/test_bindings.c
--- a/tools/libxen/test/test_bindings.c Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/libxen/test/test_bindings.c Tue Apr 20 11:10:40 2010 +0200
@@ -28,6 +28,7 @@
 #include <xen/api/xen_all.h>
 
 //#define PRINT_XML
+//////////////#define POOL_TESTS
 
 static void usage()
 {
@@ -123,6 +124,649 @@ static void print_error(xen_session *ses
     }
     fprintf(stderr, "\n");
 }
+
+
+#ifdef POOL_TESTS
+#define NAME_DESCRIPTION "TestPool"
+#define NAME_DESCRIPTION_2 "TestPool-2"
+#define NAME_LABEL "Pool-1"
+#define NAME_LABEL_2 "Pool-2"
+#define SCHED_NAME "credit"
+#define NCPU_VAL   2
+#define NCPU_VAL_2   1
+
+
+static int pool_tests(xen_session *session, xen_host host)
+{
+    int rc = 1;
+    xen_cpu_pool_set            *pools = NULL;
+    xen_host_record             *host_record = NULL;
+    xen_cpu_pool_record_opt     *cpu_pool_opt = NULL;
+    xen_cpu_pool_record         *cpu_pool_rec = NULL;
+    xen_host_cpu_set            *host_cpu_set = NULL;
+    xen_host_cpu_record         *host_cpu_record = NULL;
+    xen_vm_set                  *vm_set = NULL;
+    xen_cpu_pool                pool = NULL;
+    xen_cpu_pool                pool_out = NULL;
+    xen_string_string_map       *pool_other_config = NULL;
+    xen_vm_record               *vm_record = NULL;
+    xen_string_set              *proposed_cpus = NULL;
+    xen_host                    res_host = NULL;
+    char                        *name_description = NULL;
+    char                        *name_label = NULL;
+    char                        *sched_policy = NULL;
+    char                        *pool_uuid = NULL;
+    int64_t                     ncpu;
+
+    for (int loop= 0; loop < 1; loop++)
+    {
+        // Test extensions of class host
+        printf("Test cpu_pool extension of host class 
-----------------------------------------\n");
+
+        printf("host.get_resident_cpu_pools\n");
+        if (!xen_host_get_resident_cpu_pools(session, &pools, host))
+        {
+            break;
+        }
+        if (pools->size != 1)
+        {
+            printf("Wrong pool count; only one pool expected\n");
+            break;
+        }
+        printf("Pool UUID %s\n", (char*)pools->contents[0]);
+        xen_cpu_pool_set_free(pools);
+        pools = NULL;
+
+        printf("host.get_record\n");
+        if (!xen_host_get_record(session, &host_record, host))
+        {
+            break;
+        }
+        printf("Pool count %d\n", (int)host_record->resident_cpu_pools->size);
+        if (host_record->resident_cpu_pools->size != 1)
+        {
+            break;
+        }
+        cpu_pool_opt = host_record->resident_cpu_pools->contents[0];
+        printf("Pool UUID %s\n", (char*)cpu_pool_opt->u.handle);
+        xen_host_record_free(host_record);
+        host_record = NULL;
+        cpu_pool_opt = NULL;
+
+
+        // Test extensions of class host_cpu
+        printf("host_cpu.get_all\n");
+        if (!xen_host_cpu_get_all(session, &host_cpu_set))
+        {
+            break;
+        }
+
+        printf("host_cpu.get_cpu_pool & host_cpu.get_record\n");
+        for (int i= 0; i < host_cpu_set->size; i++)
+        {
+            if (!xen_host_cpu_get_cpu_pool(session, &pools, 
host_cpu_set->contents[i]))
+            {
+                break;
+            }
+            if (pools->size > 1)
+            {
+                printf("Wrong pool count (xen_host_cpu_get_cpu_pool)\n");
+                break;
+            }
+
+            printf("host_cpu (get_cpu_pool) %s, cpu_pool %s\n", 
(char*)host_cpu_set->contents[i],
+                pools->size != 0 ? (char*)pools->contents[0] : "(None)");
+
+            if (!xen_host_cpu_get_record(session, &host_cpu_record, 
host_cpu_set->contents[i]))
+            {
+                break;
+            }
+            if (host_cpu_record->cpu_pools->size > 1)
+            {
+                printf("Wrong pool count (xen_host_cpu_get_record)\n");
+                break;
+            }
+
+            printf("host_cpu (get_record) %s, cpu_pool %s\n", 
(char*)host_cpu_set->contents[i],
+                host_cpu_record->cpu_pools->size != 0
+                ? 
(char*)((xen_cpu_pool_record_opt*)(host_cpu_record->cpu_pools->contents[0])->u.handle)
+                : "(None)");
+
+        }
+        xen_host_cpu_record_free(host_cpu_record);
+        host_cpu_record = NULL;
+        xen_host_cpu_set_free(host_cpu_set);
+        host_cpu_set = NULL;
+        xen_cpu_pool_set_free(pools);
+        pools = NULL;
+
+        printf("host_cpu.get_unassigned_cpus\n");
+        if (!xen_host_cpu_get_unassigned_cpus(session, &host_cpu_set))
+        {
+            break;
+        }
+        printf("Free cpus (not bound to a pool)\n");
+        for (int i= 0; i < host_cpu_set->size; i++)
+        {
+            printf("  cpu UUID %s\n", (char*)host_cpu_set->contents[i]);
+        }
+        xen_host_cpu_set_free(host_cpu_set);
+        host_cpu_set = NULL;
+
+
+        printf("vm.get_record\n");
+        if (!xen_vm_get_all(session, &vm_set))
+        {
+            break;
+        }
+
+        if (!xen_vm_get_record(session, &vm_record, vm_set->contents[0]))
+        {
+            break;
+        }
+        printf("VM %s, pool_name %s, cpu_pool %s\n", 
(char*)vm_set->contents[0],
+            vm_record->pool_name, (char*)vm_record->cpu_pool->contents[0]);
+
+        xen_vm_record_free(vm_record);
+        vm_record = NULL;
+
+        printf("vm.get_cpu_pool\n");
+        if (!xen_vm_get_cpu_pool(session, &pools, vm_set->contents[0]))
+        {
+            break;
+        }
+        printf("vm_get_cpu_pool %s\n", (char*)pools->contents[0]);
+
+        xen_vm_set_free(vm_set);
+        xen_cpu_pool_set_free(pools);
+        vm_set = NULL;
+        pools = NULL;
+
+
+        // Class cpu_pool
+
+        // create
+        pool_other_config = xen_string_string_map_alloc(1);
+        pool_other_config->contents[0].key = strdup("type");
+        pool_other_config->contents[0].val = strdup("bs2000");
+        xen_string_set *proposed_CPUs_set = xen_string_set_alloc(1);
+        proposed_CPUs_set->contents[0] = strdup("3");
+
+        xen_cpu_pool_record new_cpu_pool_record =
+        {
+            .name_label = NAME_LABEL,
+            .name_description = NAME_DESCRIPTION,
+            .auto_power_on = false,
+            .ncpu = NCPU_VAL,
+            .sched_policy = SCHED_NAME,
+            .proposed_cpus = proposed_CPUs_set,
+            .other_config = pool_other_config,
+        };
+
+        printf("cpu_pool.create\n");
+        if (!xen_cpu_pool_create(session, &pool, &new_cpu_pool_record))
+        {
+            break;
+        }
+        printf("New Pool UUID %s\n", (char*)pool);
+        xen_string_set_free(proposed_CPUs_set);
+        proposed_CPUs_set = NULL;
+        xen_string_string_map_free(pool_other_config);
+        pool_other_config = NULL;
+
+        // get_by_name_label
+        printf("cpu_pool.get_by_name_label\n");
+        if (!xen_cpu_pool_get_by_name_label(session, &pools, "Pool-1"))
+        {
+            break;
+        }
+        if (strcmp((char*)pools->contents[0], (char*)pool) != 0)
+        {
+            break;
+        }
+        xen_cpu_pool_set_free(pools);
+        pools = NULL;
+
+
+        // get_by_uuid
+        printf("cpu_pool.get_by_uuid\n");
+        if (!xen_cpu_pool_get_by_uuid(session, &pool_out, pool))
+        {
+            break;
+        }
+        if (strcmp((char*)pool_out, (char*)pool) != 0)
+        {
+            printf("Wrong pool returned\n");
+            break;
+        }
+        xen_cpu_pool_free(pool_out);
+        pool_out = NULL;
+
+        // get_all
+        printf("cpu_pool.get_all\n");
+        if (!xen_cpu_pool_get_all(session, &pools))
+        {
+            break;
+        }
+        if (pools->size != 2)
+        {
+            printf("Wrong pool count (%d)\n", (int)pools->size);
+            break;
+        }
+        xen_cpu_pool_set_free(pools);
+        pools = NULL;
+
+
+        // get_activated
+        printf("cpu_pool.get_activated\n");
+        bool activated_state = true;
+        if (!xen_cpu_pool_get_activated(session, &activated_state, pool))
+        {
+            break;
+        }
+        if (activated_state)
+        {
+            printf("Pool must not be activated\n");
+            break;
+        }
+
+
+        // get_auto_power_on
+        printf("cpu_pool.get_auto_power_on\n");
+        bool power_state = true;
+        if (!xen_cpu_pool_get_auto_power_on(session, &power_state, pool))
+        {
+            break;
+        }
+        if (power_state)
+        {
+            printf("Pool must not have attibute 'auto_power_on'\n");
+            break;
+        }
+
+        // get_host_CPUs
+        printf("cpu_pool.get_host_CPUs\n");
+        if (!xen_cpu_pool_get_host_CPUs(session, &host_cpu_set, pool))
+        {
+            break;
+        }
+        if (host_cpu_set->size != 0)
+        {
+            printf("Pool must not have any attached cpus\n");
+            break;
+        }
+        xen_host_cpu_set_free(host_cpu_set);
+        host_cpu_set = NULL;
+
+
+        // get_name_description
+        printf("cpu_pool.get_name_description\n");
+        if (!xen_cpu_pool_get_name_description(session, &name_description, 
pool))
+        {
+            break;
+        }
+        if (strcmp(NAME_DESCRIPTION, name_description) != 0)
+        {
+            printf("Pool has wrong name_description\n");
+            break;
+        }
+        free(name_description);
+        name_description = NULL;
+
+
+        // get_name_label
+        printf("cpu_pool.get_name_label\n");
+        if (!xen_cpu_pool_get_name_label(session, &name_label, pool))
+        {
+            break;
+        }
+        if (strcmp(NAME_LABEL, name_label) != 0)
+        {
+            printf("Pool has wrong name_label\n");
+            break;
+        }
+        free(name_label);
+        name_label = NULL;
+
+        // get_ncpu
+        printf("cpu_pool.get_ncpu\n");
+        if (!xen_cpu_pool_get_ncpu(session, &ncpu, pool))
+        {
+            break;
+        }
+        if (NCPU_VAL != ncpu)
+        {
+            printf("Pool has wrong ncpu\n");
+            break;
+        }
+
+        // get_proposed_CPUs
+        printf("cpu_pool.get_proposed_CPUs\n");
+        if (!xen_cpu_pool_get_proposed_CPUs(session, &proposed_cpus, pool))
+        {
+            break;
+        }
+        if (proposed_cpus->size != 1)
+        {
+            printf("Pool has wrong proposed_cpus count\n");
+            break;
+        }
+        xen_string_set_free(proposed_cpus);
+        proposed_cpus = NULL;
+
+
+        // get_other_config
+        printf("cpu_pool.get_other_config\n");
+        if (!xen_cpu_pool_get_other_config(session, &pool_other_config, pool))
+        {
+            break;
+        }
+        if (pool_other_config->size != 1)
+        {
+            printf("Pool has wrong other_config element count\n");
+            break;
+        }
+        if ((strcmp(pool_other_config->contents[0].key, "type") != 0) ||
+            (strcmp(pool_other_config->contents[0].val, "bs2000") != 0))
+        {
+            printf("Pool has wrong other_config attributes\n");
+            break;
+        }
+        xen_string_string_map_free(pool_other_config);
+        pool_other_config = NULL;
+
+
+        // get_record
+        printf("cpu_pool.get_record\n");
+        if (!xen_cpu_pool_get_record(session, &cpu_pool_rec, pool))
+        {
+            break;
+        }
+        if ( (strcmp(cpu_pool_rec->name_label, NAME_LABEL) != 0) ||
+             (strcmp(cpu_pool_rec->name_description, NAME_DESCRIPTION) != 0) ||
+             (cpu_pool_rec->auto_power_on) ||
+             (cpu_pool_rec->ncpu != NCPU_VAL) ||
+             (cpu_pool_rec->started_vms->size != 0) ||
+             (strcmp(cpu_pool_rec->sched_policy, SCHED_NAME) != 0) ||
+             (cpu_pool_rec->proposed_cpus->size != 1) ||
+             (cpu_pool_rec->host_cpus->size != 0) ||
+             (cpu_pool_rec->activated) ||
+             (strcmp(cpu_pool_rec->resident_on->u.handle, host) != 0) ||
+             (strcmp(cpu_pool_rec->uuid, pool) != 0) ||
+             (cpu_pool_rec->other_config->size != 1))
+        {
+            printf("Wrong record output\n");
+            break;
+        }
+        xen_cpu_pool_record_free(cpu_pool_rec);
+        cpu_pool_rec = NULL;
+
+
+        // get_resident_on
+        printf("cpu_pool.get_resident_on\n");
+        if (!xen_cpu_pool_get_resident_on(session, &res_host, pool))
+        {
+            break;
+        }
+        if (strcmp(res_host, host) != 0)
+        {
+            printf("Wrong resident host returned\n");
+            break;
+        }
+        xen_host_free(res_host);
+        res_host = NULL;
+
+
+        // get_sched_policy
+        printf("cpu_pool.get_sched_policy\n");
+        if (!xen_cpu_pool_get_sched_policy(session, &sched_policy, pool))
+        {
+            break;
+        }
+        if (strcmp(sched_policy, SCHED_NAME) != 0)
+        {
+            printf("Wrong sched_policy returned\n");
+            break;
+        }
+        free(sched_policy);
+        sched_policy = NULL;
+
+
+        // get_started_VMs
+        printf("cpu_pool.get_started_VMs\n");
+        if (!xen_cpu_pool_get_started_VMs(session, &vm_set, pool))
+        {
+            break;
+        }
+        if (vm_set->size != 0)
+        {
+            printf("Wrong count of started VMs\n");
+            break;
+        }
+        xen_vm_set_free(vm_set);
+        vm_set = NULL;
+
+
+        // get_uuid
+        printf("cpu_pool.get_uuid\n");
+        if (!xen_cpu_pool_get_uuid(session, &pool_uuid, pool))
+        {
+            break;
+        }
+        if (strcmp(pool_uuid, pool) != 0)
+        {
+            printf("Wrong Pool UUID returnd\n");
+            break;
+        }
+        free(pool_uuid);
+        pool_uuid = NULL;
+
+
+        // set_auto_power_on
+        printf("cpu_pool.set_auto_power_on\n");
+        if (!xen_cpu_pool_set_auto_power_on(session, pool, true))
+            break;
+
+
+        // set_proposed_CPUs
+        printf("cpu_pool.set_proposed_CPUs\n");
+        proposed_CPUs_set = xen_string_set_alloc(2);
+        proposed_CPUs_set->contents[0] = strdup("2");
+        proposed_CPUs_set->contents[1] = strdup("4");
+        if (!xen_cpu_pool_set_proposed_CPUs(session, pool, proposed_CPUs_set))
+            break;
+        xen_string_set_free(proposed_CPUs_set);
+        proposed_CPUs_set = NULL;
+
+
+        // add_to_proposed_CPUs
+        printf("cpu_pool.add_to_proposed_CPUs\n");
+        if (!xen_cpu_pool_add_to_proposed_CPUs(session, pool, "3"))
+            break;
+
+
+        // remove_from_proposed_CPUs
+        printf("cpu_pool.remove_from_proposed_CPUs\n");
+        if (!xen_cpu_pool_remove_from_proposed_CPUs(session, pool, "4"))
+            break;
+
+
+        // set_name_label
+        printf("cpu_pool.set_name_label\n");
+        if (!xen_cpu_pool_set_name_label(session, pool, NAME_LABEL_2))
+            break;
+
+
+        // set_name_description
+        printf("cpu_pool.set_name_description\n");
+        if (!xen_cpu_pool_set_name_description(session, pool, 
NAME_DESCRIPTION_2))
+            break;
+
+
+        // set_ncpu
+        printf("cpu_pool.set_ncpu\n");
+        if (!xen_cpu_pool_set_ncpu(session, pool, NCPU_VAL_2))
+            break;
+
+
+        // set_other_config
+        printf("cpu_pool.set_other_config\n");
+        pool_other_config = xen_string_string_map_alloc(2);
+        pool_other_config->contents[0].key = strdup("test1");
+        pool_other_config->contents[0].val = strdup("field1");
+        pool_other_config->contents[1].key = strdup("test2");
+        pool_other_config->contents[1].val = strdup("field2");
+        if (!xen_cpu_pool_set_other_config(session, pool, pool_other_config))
+            break;
+        xen_string_string_map_free(pool_other_config);
+        pool_other_config = NULL;
+
+
+        // add_to_other_config
+        printf("cpu_pool.add_to_other_config\n");
+        if (!xen_cpu_pool_add_to_other_config(session, pool, "test3", 
"field3"))
+            break;
+
+
+        // remove_from_other_config
+        printf("cpu_pool.remove_from_other_config\n");
+        if (!xen_cpu_pool_remove_from_other_config(session, pool, "test2"))
+            break;
+
+
+        // set_sched_policy
+        printf("cpu_pool.set_sched_policy\n");
+        if (!xen_cpu_pool_set_sched_policy(session, pool, SCHED_NAME))
+            break;
+
+
+        // check get_record again
+        printf("check cpu_pool record\n");
+        if (!xen_cpu_pool_get_record(session, &cpu_pool_rec, pool))
+        {
+            break;
+        }
+        if ( (strcmp(cpu_pool_rec->name_label, NAME_LABEL_2) != 0) ||
+             (strcmp(cpu_pool_rec->name_description, NAME_DESCRIPTION_2) != 0) 
||
+             (!cpu_pool_rec->auto_power_on) ||
+             (cpu_pool_rec->ncpu != NCPU_VAL_2) ||
+             (cpu_pool_rec->started_vms->size != 0) ||
+             (strcmp(cpu_pool_rec->sched_policy, SCHED_NAME) != 0) ||
+             (cpu_pool_rec->proposed_cpus->size != 2) ||
+             (cpu_pool_rec->host_cpus->size != 0) ||
+             (cpu_pool_rec->activated) ||
+             (strcmp(cpu_pool_rec->resident_on->u.handle, host) != 0) ||
+             (strcmp(cpu_pool_rec->uuid, pool) != 0) ||
+             (cpu_pool_rec->other_config->size != 2))
+        {
+            printf("Wrong record output\n");
+            break;
+        }
+        xen_cpu_pool_record_free(cpu_pool_rec);
+        cpu_pool_rec = NULL;
+
+
+        // activate pool
+        printf("cpu_pool.activate\n");
+        if (!xen_cpu_pool_activate(session, pool))
+            break;
+
+
+        // add_host_CPU_live
+        printf("cpu_pool.add_host_CPU_live\n");
+        if (!xen_host_cpu_get_unassigned_cpus(session, &host_cpu_set))
+        {
+            break;
+        }
+        if (host_cpu_set->size < 1)
+        {
+            printf("No free CPU found\n");
+            break;
+        }
+        if (!xen_cpu_pool_add_host_CPU_live(session, pool, 
host_cpu_set->contents[0]))
+            break;
+
+
+        // remove_host_CPU_live
+        printf("cpu_pool.remove_host_CPU_live\n");
+        if (!xen_cpu_pool_remove_host_CPU_live(session, pool, 
host_cpu_set->contents[0]))
+            break;
+
+        xen_host_cpu_set_free(host_cpu_set);
+        host_cpu_set = NULL;
+
+
+        // check get_record again
+        printf("check cpu_pool record\n");
+        if (!xen_cpu_pool_get_record(session, &cpu_pool_rec, pool))
+        {
+            break;
+        }
+        if ( (strcmp(cpu_pool_rec->name_label, NAME_LABEL_2) != 0) ||
+             (strcmp(cpu_pool_rec->name_description, NAME_DESCRIPTION_2) != 0) 
||
+             (!cpu_pool_rec->auto_power_on) ||
+             (cpu_pool_rec->ncpu != NCPU_VAL_2) ||
+             (cpu_pool_rec->started_vms->size != 0) ||
+             (strcmp(cpu_pool_rec->sched_policy, SCHED_NAME) != 0) ||
+             (cpu_pool_rec->proposed_cpus->size != 2) ||
+             (cpu_pool_rec->host_cpus->size != 1) ||
+             (!cpu_pool_rec->activated) ||
+             (strcmp(cpu_pool_rec->resident_on->u.handle, host) != 0) ||
+             (strcmp(cpu_pool_rec->uuid, pool) != 0) ||
+             (cpu_pool_rec->other_config->size != 2))
+        {
+            printf("Wrong record output\n");
+            break;
+        }
+        xen_cpu_pool_record_free(cpu_pool_rec);
+        cpu_pool_rec = NULL;
+
+
+        // deactivate pool
+        printf("cpu_pool.deactivate\n");
+        if (!xen_cpu_pool_deactivate(session, pool))
+            break;
+
+
+        // Pool delete
+        if (!xen_cpu_pool_destroy(session, pool))
+        {
+            break;
+        }
+        xen_cpu_pool_free(pool);
+        pool = NULL;
+
+        // Tests OK
+        printf("Pool Tests OK\n");
+        rc= 0;
+    }
+
+    if (rc != 0)
+    {
+        print_error(session);
+    }
+
+    xen_cpu_pool_set_free(pools);
+    xen_host_record_free(host_record);
+    xen_cpu_pool_record_opt_free(cpu_pool_opt);
+    xen_host_cpu_set_free(host_cpu_set);
+    xen_host_cpu_record_free(host_cpu_record);
+    xen_vm_set_free(vm_set);
+    xen_cpu_pool_free(pool);
+    xen_cpu_pool_free(pool_out);
+    xen_string_string_map_free(pool_other_config);
+    xen_vm_record_free(vm_record);
+    xen_string_set_free(proposed_cpus);
+    free(name_description);
+    free(name_label);
+    free(sched_policy);
+    free(pool_uuid);
+    xen_cpu_pool_record_free(cpu_pool_rec);
+    xen_host_free(res_host);
+
+    return rc;
+}
+#endif
 
 
 int main(int argc, char **argv)
@@ -365,6 +1009,11 @@ int main(int argc, char **argv)
 
     xen_vm_record_free(vm_record);
 
+#ifdef POOL_TESTS
+    if (pool_tests(session, host) != 0)
+        return 1;
+#endif
+
     xen_host_free(host);
     xen_string_string_map_free(versions);
     free(dmesg);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.