WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH][RESUBMIT] add dom0 vcpu hotplug control

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH][RESUBMIT] add dom0 vcpu hotplug control
From: Ryan Harper <ryanh@xxxxxxxxxx>
Date: Tue, 14 Jun 2005 11:37:18 -0500
Delivery-date: Tue, 14 Jun 2005 16:36:26 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <20050607220246.GD28421@xxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20050607220246.GD28421@xxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.5.6+20040907i
* Ryan Harper <ryanh@xxxxxxxxxx> [2005-06-07 17:05]:
> This patch adds new control messages for vcpu hotplug events.  Via the
> xm vcpu_hotplug sub-program, VCPUS in domains can be enabled/disabled
> when CONFIG_HOTPLUG_CPU is enabled in the target domain's kernel.
> 

Updated this patch to use vcpu_to_cpu up/down info to control whether
state changes are sent when hotplugging vcpus.

Built and tested with vcpu_down patch against 20050614 nightly unstable
snapshot.

--
Ryan Harper
Software Engineer; Linux Technology Center
IBM Corp., Austin, Tx
(512) 838-9253   T/L: 678-9253
ryanh@xxxxxxxxxx

diffstat output:
 linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c |   81 +++++++++++++++++
 tools/python/xen/lowlevel/xu/xu.c                      |   16 +++
 tools/python/xen/xend/XendClient.py                    |    6 +
 tools/python/xen/xend/XendDomain.py                    |   12 ++
 tools/python/xen/xend/XendDomainInfo.py                |   12 ++
 tools/python/xen/xend/server/SrvDomain.py              |    8 +
 tools/python/xen/xend/server/messages.py               |   18 +++
 tools/python/xen/xm/main.py                            |   30 ++++++
 xen/include/public/io/domain_controller.h              |   20 ++++
 9 files changed, 203 insertions(+)

Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx>
---
diff -urN vcpu_down/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c 
vcpu_cntl/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
--- vcpu_down/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c    
2005-06-13 10:41:56.000000000 -0500
+++ vcpu_cntl/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c    
2005-06-13 13:41:55.000000000 -0500
@@ -85,6 +85,13 @@
 /* Set when the idlers are all forked */
 int smp_threads_ready;
 
+#ifdef CONFIG_HOTPLUG_CPU
+struct vcpu_hotplug_handler_t {
+    void (*fn)();
+    u32 vcpu;
+};
+#endif
+
 #if 0
 /*
  * Trampoline 80x86 program as an array.
@@ -1297,6 +1304,9 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
+#include <asm-xen/ctrl_if.h>
+/* hotplug down/up funtion pointer and target vcpu */
+struct vcpu_hotplug_handler_t vcpu_hotplug_handler;
 
 /* must be called with the cpucontrol mutex held */
 static int __devinit cpu_enable(unsigned int cpu)
@@ -1357,6 +1367,77 @@
        }
        printk(KERN_ERR "CPU %u didn't die...\n", cpu);
 }
+
+static int vcpu_hotplug_cpu_process(void *unused)
+{
+    struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler;
+
+    if ( handler->fn ) {
+        (*(handler->fn))(handler->vcpu);
+        handler->fn = NULL;
+    }
+    return 0;
+}
+
+static void __vcpu_hotplug_handler(void *unused)
+{
+    int err;
+
+    err = kernel_thread(vcpu_hotplug_cpu_process, 
+                                         NULL, CLONE_FS | CLONE_FILES);
+    if ( err < 0 )
+        printk(KERN_ALERT "Error creating hotplug_cpu process!\n");
+
+}
+
+static void vcpu_hotplug_event_handler(ctrl_msg_t *msg, unsigned long id)
+{
+    static DECLARE_WORK(vcpu_hotplug_work, __vcpu_hotplug_handler, NULL);
+    vcpu_hotplug_t *req = (vcpu_hotplug_t *)&msg->msg[0];
+    struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler;
+    ssize_t ret;
+
+    if ( msg->length != sizeof(vcpu_hotplug_t) )
+        goto parse_error;
+
+    /* grab target vcpu from msg */
+    handler->vcpu = req->vcpu;
+
+    /* determine which function to call based on msg subtype */
+    switch ( msg->subtype ) {
+        case CMSG_VCPU_HOTPLUG_OFF:
+            handler->fn = (void *)&cpu_down;
+            ret = schedule_work(&vcpu_hotplug_work);
+            req->status = (u32) ret;
+        break;
+        case CMSG_VCPU_HOTPLUG_ON:
+            handler->fn = (void *)&cpu_up;
+            ret = schedule_work(&vcpu_hotplug_work);
+            req->status = (u32) ret;
+        break;
+        default:
+            goto parse_error;
+    }
+
+    ctrl_if_send_response(msg);
+    return;
+ parse_error:
+    msg->length = 0;
+    ctrl_if_send_response(msg);
+}
+
+static int __init setup_vcpu_hotplug_event(void)
+{
+    struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler;
+
+    handler->fn = NULL;
+    ctrl_if_register_receiver(CMSG_VCPU_HOTPLUG, vcpu_hotplug_event_handler, 
0);
+
+    return 0;
+}
+
+__initcall(setup_vcpu_hotplug_event);
+
 #else /* ... !CONFIG_HOTPLUG_CPU */
 int __cpu_disable(void)
 {
diff -urN vcpu_down/tools/python/xen/lowlevel/xu/xu.c 
vcpu_cntl/tools/python/xen/lowlevel/xu/xu.c
--- vcpu_down/tools/python/xen/lowlevel/xu/xu.c 2005-06-12 22:13:37.000000000 
-0500
+++ vcpu_cntl/tools/python/xen/lowlevel/xu/xu.c 2005-06-13 13:41:55.000000000 
-0500
@@ -744,6 +744,14 @@
         C2P(mem_request_t, target, Int, Long);
         C2P(mem_request_t, status, Int, Long);
         return dict;
+    case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF):
+        C2P(vcpu_hotplug_t, vcpu, Int, Long);
+        C2P(vcpu_hotplug_t, status, Int, Long);
+        return dict;
+    case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON):
+        C2P(vcpu_hotplug_t, vcpu, Int, Long);
+        C2P(vcpu_hotplug_t, status, Int, Long);
+        return dict;
     }
 
     return PyString_FromStringAndSize((char *)xum->msg.msg, xum->msg.length);
@@ -909,6 +917,14 @@
     case TYPE(CMSG_MEM_REQUEST, CMSG_MEM_REQUEST_SET):
         P2C(mem_request_t, target, u32);
         break;
+    case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF):
+        P2C(vcpu_hotplug_t, vcpu, u32);
+        P2C(vcpu_hotplug_t, status, u32);
+        break;
+    case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON):
+        P2C(vcpu_hotplug_t, vcpu, u32);
+        P2C(vcpu_hotplug_t, status, u32);
+        break;
     case TYPE(CMSG_USBIF_FE, CMSG_USBIF_FE_INTERFACE_STATUS_CHANGED):
         P2C(usbif_fe_interface_status_changed_t, status, u32);
         P2C(usbif_fe_interface_status_changed_t, evtchn, u16);
diff -urN vcpu_down/tools/python/xen/xend/server/messages.py 
vcpu_cntl/tools/python/xen/xend/server/messages.py
--- vcpu_down/tools/python/xen/xend/server/messages.py  2005-06-12 
22:13:37.000000000 -0500
+++ vcpu_cntl/tools/python/xen/xend/server/messages.py  2005-06-13 
13:41:55.000000000 -0500
@@ -309,6 +309,24 @@
 msg_formats.update(mem_request_formats)
 
 #============================================================================
+# Domain vcpu hotplug message.
+#============================================================================
+
+CMSG_VCPU_HOTPLUG     = 10
+CMSG_VCPU_HOTPLUG_OFF = 0
+CMSG_VCPU_HOTPLUG_ON  = 1
+
+vcpu_hotplug_formats = {
+    'vcpu_hotplug_off_t':
+    (CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF),
+
+    'vcpu_hotplug_on_t':
+    (CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON)
+    }
+
+msg_formats.update(vcpu_hotplug_formats)
+
+#============================================================================
 class Msg:
     pass
 
diff -urN vcpu_down/tools/python/xen/xend/server/SrvDomain.py 
vcpu_cntl/tools/python/xen/xend/server/SrvDomain.py
--- vcpu_down/tools/python/xen/xend/server/SrvDomain.py 2005-06-12 
22:13:41.000000000 -0500
+++ vcpu_cntl/tools/python/xen/xend/server/SrvDomain.py 2005-06-13 
16:53:32.265760766 -0500
@@ -180,6 +180,14 @@
         val = fn(req.args, {'dom': self.dom.id})
         return val
 
+    def op_vcpu_hotplug(self, op, req):
+        fn = FormFn(self.xd.domain_vcpu_hotplug,
+                    [['dom', 'int'],
+                     ['vcpu', 'int'],
+                     ['state', 'int']])
+        val = fn(req.args, {'dom': self.dom.id})
+        return val
+
     def render_POST(self, req):
         return self.perform(req)
         
diff -urN vcpu_down/tools/python/xen/xend/XendClient.py 
vcpu_cntl/tools/python/xen/xend/XendClient.py
--- vcpu_down/tools/python/xen/xend/XendClient.py       2005-06-12 
22:13:36.000000000 -0500
+++ vcpu_cntl/tools/python/xen/xend/XendClient.py       2005-06-13 
13:41:55.000000000 -0500
@@ -271,6 +271,12 @@
                              'target'    : mem_target })
         return val
 
+    def xend_domain_vcpu_hotplug(self, id, vcpu, state):
+        return self.xendPost(self.domainurl(id),
+                            {'op'         : 'vcpu_hotplug',
+                             'vcpu'       : vcpu,
+                             'state'      : state })
+
     def xend_domain_vif_limit(self, id, vif, credit, period):
         return self.xendPost(self.domainurl(id),
                             { 'op'      : 'vif_limit_set',
diff -urN vcpu_down/tools/python/xen/xend/XendDomainInfo.py 
vcpu_cntl/tools/python/xen/xend/XendDomainInfo.py
--- vcpu_down/tools/python/xen/xend/XendDomainInfo.py   2005-06-12 
22:13:43.000000000 -0500
+++ vcpu_cntl/tools/python/xen/xend/XendDomainInfo.py   2005-06-13 
13:41:55.000000000 -0500
@@ -949,6 +949,18 @@
             msg = messages.packMsg('mem_request_t', { 'target' : target * (1 
<< 8)} )
             self.channel.writeRequest(msg)
 
+    def vcpu_hotplug(self, vcpu, state):
+        """Disable or enable VCPU in domain.
+        """
+        if self.channel:
+            if int(state) == 0:
+                msg = messages.packMsg('vcpu_hotplug_off_t', { 'vcpu' : vcpu} )
+            else:
+                msg = messages.packMsg('vcpu_hotplug_on_t',  { 'vcpu' : vcpu} )
+
+            self.channel.writeRequest(msg)
+
+
     def shutdown(self, reason, key=0):
         msgtype = shutdown_messages.get(reason)
         if not msgtype:
diff -urN vcpu_down/tools/python/xen/xend/XendDomain.py 
vcpu_cntl/tools/python/xen/xend/XendDomain.py
--- vcpu_down/tools/python/xen/xend/XendDomain.py       2005-06-12 
22:13:43.000000000 -0500
+++ vcpu_cntl/tools/python/xen/xend/XendDomain.py       2005-06-13 
13:45:29.000000000 -0500
@@ -710,6 +710,18 @@
         dominfo = self.domain_lookup(id)
         return dominfo.mem_target_set(mem)
 
+    def domain_vcpu_hotplug(self, id, vcpu, state):
+        """Enable or disable VCPU vcpu in DOM id
+
+        @param id: domain
+        @param vcpu: target VCPU in domain
+        @param state: which state VCPU will become
+        @return: 0 on success, -1 on error
+        """
+
+        dominfo = self.domain_lookup(id)
+        return dominfo.vcpu_hotplug(vcpu, state)
+
     def domain_dumpcore(self, id):
         """Save a core dump for a crashed domain.
 
diff -urN vcpu_down/tools/python/xen/xm/main.py 
vcpu_cntl/tools/python/xen/xm/main.py
--- vcpu_down/tools/python/xen/xm/main.py       2005-06-13 14:46:16.000000000 
-0500
+++ vcpu_cntl/tools/python/xen/xm/main.py       2005-06-13 17:28:55.159285868 
-0500
@@ -571,6 +571,36 @@
 
 xm.prog(ProgBalloon)
 
+
+class ProgVcpuhotplug(Prog):
+    group = 'domain'
+    name  = 'vcpu_hotplug'
+    info  = """Enable or disable a VCPU in a domain."""
+
+    def help(self, args):
+        print args[0], "DOM VCPU [0|1]"
+        print """\nRequest virtual processor VCPU to be disabled or enabled in
+domain DOM"""
+
+    def main(self, args):
+        if len(args) != 4: self.err("%s: Invalid arguments(s)" % args[0])
+        dom = args[1]
+        vcpu = args[2]
+        state = args[3]
+        info = server.xend_domain(dom)
+        vcpu_to_cpu = sxp.child_value(info, 'vcpu_to_cpu', 
'-1').replace('-1','#')
+        # only send state change if states differ 
+        try:
+            # (down going up) or (up going down)
+            if (vcpu_to_cpu[int(vcpu)] == "#" and state == "1") or \
+               (vcpu_to_cpu[int(vcpu)] != "#" and state == "0"):
+                server.xend_domain_vcpu_hotplug(int(dom), int(vcpu), 
int(state))
+        except IndexError:
+            print "Invalid VCPU(%s)"%(vcpu)
+
+xm.prog(ProgVcpuhotplug)
+
+
 class ProgDomid(Prog):
     group = 'domain'
     name = 'domid'
diff -urN vcpu_down/xen/include/public/io/domain_controller.h 
vcpu_cntl/xen/include/public/io/domain_controller.h
--- vcpu_down/xen/include/public/io/domain_controller.h 2005-06-12 
22:13:41.000000000 -0500
+++ vcpu_cntl/xen/include/public/io/domain_controller.h 2005-06-13 
13:41:55.000000000 -0500
@@ -61,6 +61,7 @@
 #define CMSG_MEM_REQUEST    7  /* Memory reservation reqs */
 #define CMSG_USBIF_BE       8  /* USB controller backend  */
 #define CMSG_USBIF_FE       9  /* USB controller frontend */
+#define CMSG_VCPU_HOTPLUG  10  /* Hotplug VCPU messages   */
 
 /******************************************************************************
  * CONSOLE DEFINITIONS
@@ -758,6 +759,25 @@
 } PACKED shutdown_sysrq_t; /* 4 bytes */
 
 /******************************************************************************
+ * VCPU HOTPLUG CONTROLS
+ */
+
+/*
+ * Subtypes for shutdown messages.
+ */
+#define CMSG_VCPU_HOTPLUG_OFF   0   /* turn vcpu off */
+#define CMSG_VCPU_HOTPLUG_ON    1   /* turn vcpu on  */
+
+/*
+ * CMSG_VCPU_HOTPLUG:
+ *  Indicate which vcpu's state should change
+ */
+typedef struct {
+    u32 vcpu;         /* 0: VCPU's whose state will change */
+    u32 status;       /* 4: Return code indicates success or failure. */
+} PACKED vcpu_hotplug_t;
+
+/******************************************************************************
  * MEMORY CONTROLS
  */
 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel