[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: [Xen-devel] [PATCH] add dom0 vcpu hotplug control


  • To: "Ryan Harper" <ryanh@xxxxxxxxxx>, <xen-devel@xxxxxxxxxxxxxxxxxxx>
  • From: "Ian Pratt" <m+Ian.Pratt@xxxxxxxxxxxx>
  • Date: Tue, 7 Jun 2005 23:26:42 +0100
  • Delivery-date: Tue, 07 Jun 2005 22:25:54 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>
  • Thread-index: AcVrrLiJZS/T9F+TQCmApkfdPHxYOAAAsxtQ
  • Thread-topic: [Xen-devel] [PATCH] add dom0 vcpu hotplug control

 

> -----Original Message-----
> From: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx 
> [mailto:xen-devel-bounces@xxxxxxxxxxxxxxxxxxx] On Behalf Of 
> Ryan Harper
> Sent: 07 June 2005 23:03
> To: xen-devel@xxxxxxxxxxxxxxxxxxx
> Subject: [Xen-devel] [PATCH] add dom0 vcpu hotplug control
> 
> This patch adds new control messages for vcpu hotplug events. 
>  Via the xm vcpu_hotplug sub-program, VCPUS in domains can be 
> enabled/disabled when CONFIG_HOTPLUG_CPU is enabled in the 
> target domain's kernel.
> 
> Currently there is nothing that tracks whether a VCPU is up or down.
> My previous [1]patch added a new per-VCPU flag (VCPUF_down) 
> which could be used to keep track of which VCPUS are up and 
> down.  Right now, there isn't a hypercall that returns the 
> per-VCPU flag status (something equivalent to the per-domain 
> flags in get_dominfo ).  Have we thought about a 
> get_vcpu_info hypercall?  I'd like to get that implemented so 
> I can report VCPU state in the xm list --vcpus output.  That 
> would also make it easier to skip sending control messages 
> that don't change the VCPU's state (e.g. sending a down 
> message to a vcpu that is already down).  

I haven't looked at the code, but can you not use DOM0_GETVCPUCONTEXT to
iterate over all the VCPUs for a domain? (e.g. if you ask for VCPU N and
it doesn't exist (isn't up) you'll get the state for the next highest
numbered one, or ESRCH if there isn't one) That's the behaviour I'd
expect.

Ian


> 
> Please apply.
> 
> 1. 
> http://lists.xensource.com/archives/html/xen-devel/2005-06/msg
> 00192.html
> 
> --
> Ryan Harper
> Software Engineer; Linux Technology Center IBM Corp., Austin, Tx
> (512) 838-9253   T/L: 678-9253
> ryanh@xxxxxxxxxx
> 
> diffstat output:
>  linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c |   
> 81 +++++++++++++++++
>  tools/python/xen/lowlevel/xu/xu.c                      |   16 +++
>  tools/python/xen/xend/XendClient.py                    |    6 +
>  tools/python/xen/xend/XendDomain.py                    |    4 
>  tools/python/xen/xend/XendDomainInfo.py                |   12 ++
>  tools/python/xen/xend/server/SrvDomain.py              |    8 +
>  tools/python/xen/xend/server/messages.py               |   18 +++
>  tools/python/xen/xm/main.py                            |   21 ++++
>  xen/include/public/io/domain_controller.h              |   20 ++++
>  9 files changed, 186 insertions(+)
> 
> Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx>
> ---
> diff -urN 
> a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c 
> b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
> --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c  
> 2005-06-06 22:05:33.000000000 -0500
> +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c  
> 2005-06-07 16:53:49.362987126 -0500
> @@ -85,6 +85,13 @@
>  /* Set when the idlers are all forked */  int smp_threads_ready;
>  
> +#ifdef CONFIG_HOTPLUG_CPU
> +struct vcpu_hotplug_handler_t {
> +    void (*fn)();
> +    u32 vcpu;
> +};
> +#endif
> +
>  #if 0
>  /*
>   * Trampoline 80x86 program as an array.
> @@ -1297,6 +1304,9 @@
>  }
>  
>  #ifdef CONFIG_HOTPLUG_CPU
> +#include <asm-xen/ctrl_if.h>
> +/* hotplug down/up funtion pointer and target vcpu */ struct 
> +vcpu_hotplug_handler_t vcpu_hotplug_handler;
>  
>  /* must be called with the cpucontrol mutex held */  static 
> int __devinit cpu_enable(unsigned int cpu) @@ -1357,6 +1367,77 @@
>       }
>       printk(KERN_ERR "CPU %u didn't die...\n", cpu);  }
> +
> +static int vcpu_hotplug_cpu_process(void *unused) {
> +    struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler;
> +
> +    if ( handler->fn ) {
> +        (*(handler->fn))(handler->vcpu);
> +        handler->fn = NULL;
> +    }
> +    return 0;
> +}
> +
> +static void __vcpu_hotplug_handler(void *unused) {
> +    int err;
> +
> +    err = kernel_thread(vcpu_hotplug_cpu_process, 
> +                                         NULL, CLONE_FS | 
> CLONE_FILES);
> +    if ( err < 0 )
> +        printk(KERN_ALERT "Error creating hotplug_cpu process!\n");
> +
> +}
> +
> +static void vcpu_hotplug_event_handler(ctrl_msg_t *msg, 
> unsigned long 
> +id) {
> +    static DECLARE_WORK(vcpu_hotplug_work, 
> __vcpu_hotplug_handler, NULL);
> +    vcpu_hotplug_t *req = (vcpu_hotplug_t *)&msg->msg[0];
> +    struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler;
> +    ssize_t ret;
> +
> +    if ( msg->length != sizeof(vcpu_hotplug_t) )
> +        goto parse_error;
> +
> +    /* grab target vcpu from msg */
> +    handler->vcpu = req->vcpu;
> +
> +    /* determine which function to call based on msg subtype */
> +    switch ( msg->subtype ) {
> +        case CMSG_VCPU_HOTPLUG_OFF:
> +            handler->fn = (void *)&cpu_down;
> +            ret = schedule_work(&vcpu_hotplug_work);
> +            req->status = (u32) ret;
> +        break;
> +        case CMSG_VCPU_HOTPLUG_ON:
> +            handler->fn = (void *)&cpu_up;
> +            ret = schedule_work(&vcpu_hotplug_work);
> +            req->status = (u32) ret;
> +        break;
> +        default:
> +            goto parse_error;
> +    }
> +
> +    ctrl_if_send_response(msg);
> +    return;
> + parse_error:
> +    msg->length = 0;
> +    ctrl_if_send_response(msg);
> +}
> +
> +static int __init setup_vcpu_hotplug_event(void) {
> +    struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler;
> +
> +    handler->fn = NULL;
> +    ctrl_if_register_receiver(CMSG_VCPU_HOTPLUG, 
> + vcpu_hotplug_event_handler, 0);
> +
> +    return 0;
> +}
> +
> +__initcall(setup_vcpu_hotplug_event);
> +
>  #else /* ... !CONFIG_HOTPLUG_CPU */
>  int __cpu_disable(void)
>  {
> diff -urN a/tools/python/xen/lowlevel/xu/xu.c 
> b/tools/python/xen/lowlevel/xu/xu.c
> --- a/tools/python/xen/lowlevel/xu/xu.c       2005-06-06 
> 22:05:28.000000000 -0500
> +++ b/tools/python/xen/lowlevel/xu/xu.c       2005-06-07 
> 16:51:43.084342691 -0500
> @@ -744,6 +744,14 @@
>          C2P(mem_request_t, target, Int, Long);
>          C2P(mem_request_t, status, Int, Long);
>          return dict;
> +    case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF):
> +        C2P(vcpu_hotplug_t, vcpu, Int, Long);
> +        C2P(vcpu_hotplug_t, status, Int, Long);
> +        return dict;
> +    case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON):
> +        C2P(vcpu_hotplug_t, vcpu, Int, Long);
> +        C2P(vcpu_hotplug_t, status, Int, Long);
> +        return dict;
>      }
>  
>      return PyString_FromStringAndSize((char *)xum->msg.msg, 
> xum->msg.length); @@ -910,6 +918,14 @@
>          P2C(mem_request_t, target, u32);
>          P2C(mem_request_t, status, u32);
>          break;
> +    case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF):
> +        P2C(vcpu_hotplug_t, vcpu, u32);
> +        P2C(vcpu_hotplug_t, status, u32);
> +        break;
> +    case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON):
> +        P2C(vcpu_hotplug_t, vcpu, u32);
> +        P2C(vcpu_hotplug_t, status, u32);
> +        break;
>      case TYPE(CMSG_USBIF_FE, CMSG_USBIF_FE_INTERFACE_STATUS_CHANGED):
>          P2C(usbif_fe_interface_status_changed_t, status, u32);
>          P2C(usbif_fe_interface_status_changed_t, evtchn, 
> u16); diff -urN a/tools/python/xen/xend/XendClient.py 
> b/tools/python/xen/xend/XendClient.py
> --- a/tools/python/xen/xend/XendClient.py     2005-06-06 
> 22:05:27.000000000 -0500
> +++ b/tools/python/xen/xend/XendClient.py     2005-06-07 
> 16:51:43.086342400 -0500
> @@ -271,6 +271,12 @@
>                               'target'    : mem_target })
>          return val
>  
> +    def xend_domain_vcpu_hotplug(self, id, vcpu, state):
> +        return self.xendPost(self.domainurl(id),
> +                            {'op'         : 'vcpu_hotplug',
> +                             'vcpu'       : vcpu,
> +                             'state'      : state })
> +
>      def xend_domain_vif_limit(self, id, vif, credit, period):
>          return self.xendPost(self.domainurl(id),
>                              { 'op'      : 'vif_limit_set',
> diff -urN a/tools/python/xen/xend/XendDomain.py 
> b/tools/python/xen/xend/XendDomain.py
> --- a/tools/python/xen/xend/XendDomain.py     2005-06-06 
> 22:05:32.000000000 -0500
> +++ b/tools/python/xen/xend/XendDomain.py     2005-06-07 
> 16:51:43.090341819 -0500
> @@ -703,6 +703,10 @@
>          dominfo = self.domain_lookup(id)
>          return dominfo.mem_target_set(target)
>          
> +    def domain_vcpu_hotplug(self, id, vcpu, state):
> +        dominfo = self.domain_lookup(id)
> +        return dominfo.vcpu_hotplug(vcpu, state)
> +        
>  
>  
>  def instance():
> diff -urN a/tools/python/xen/xend/XendDomainInfo.py 
> b/tools/python/xen/xend/XendDomainInfo.py
> --- a/tools/python/xen/xend/XendDomainInfo.py 2005-06-06 
> 22:05:33.000000000 -0500
> +++ b/tools/python/xen/xend/XendDomainInfo.py 2005-06-07 
> 16:51:43.092341528 -0500
> @@ -1071,6 +1071,18 @@
>              msg = messages.packMsg('mem_request_t', { 
> 'target' : target * (1 << 8)} )
>              self.channel.writeRequest(msg)
>  
> +    def vcpu_hotplug(self, vcpu, state):
> +        """Disable or enable VCPU in domain.
> +        """
> +        if self.channel:
> +            if int(state) == 0:
> +                msg = messages.packMsg('vcpu_hotplug_off_t', 
> { 'vcpu' : vcpu} )
> +            else:
> +                msg = messages.packMsg('vcpu_hotplug_on_t',  
> { 'vcpu' : 
> + vcpu} )
> +
> +            self.channel.writeRequest(msg)
> +
> +
>      def shutdown(self, reason, key=0):
>          msgtype = shutdown_messages.get(reason)
>          if not msgtype:
> diff -urN a/tools/python/xen/xend/server/SrvDomain.py 
> b/tools/python/xen/xend/server/SrvDomain.py
> --- a/tools/python/xen/xend/server/SrvDomain.py       
> 2005-06-06 22:05:31.000000000 -0500
> +++ b/tools/python/xen/xend/server/SrvDomain.py       
> 2005-06-07 16:51:43.095341092 -0500
> @@ -180,6 +180,14 @@
>          val = fn(req.args, {'dom': self.dom.id})
>          return val
>  
> +    def op_vcpu_hotplug(self, op, req):
> +        fn = FormFn(self.xd.domain_vcpu_hotplug,
> +                    [['dom', 'str'],
> +                     ['vcpu', 'int'],
> +                     ['state', 'int']])
> +        val = fn(req.args, {'dom': self.dom.id})
> +        return val
> +
>      def render_POST(self, req):
>          return self.perform(req)
>          
> diff -urN a/tools/python/xen/xend/server/messages.py 
> b/tools/python/xen/xend/server/messages.py
> --- a/tools/python/xen/xend/server/messages.py        
> 2005-06-06 22:05:28.000000000 -0500
> +++ b/tools/python/xen/xend/server/messages.py        
> 2005-06-07 16:51:43.125336730 -0500
> @@ -309,6 +309,24 @@
>  msg_formats.update(mem_request_formats)
>  
>  
> #=============================================================
> ===============
> +# Domain vcpu hotplug message.
> +#============================================================
> ==========
> +======
> +
> +CMSG_VCPU_HOTPLUG     = 10
> +CMSG_VCPU_HOTPLUG_OFF = 0
> +CMSG_VCPU_HOTPLUG_ON  = 1
> +
> +vcpu_hotplug_formats = {
> +    'vcpu_hotplug_off_t':
> +    (CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF),
> +
> +    'vcpu_hotplug_on_t':
> +    (CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON)
> +    }
> +
> +msg_formats.update(vcpu_hotplug_formats)
> +
> +#============================================================
> ==========
> +======
>  class Msg:
>      pass
>  
> diff -urN a/tools/python/xen/xm/main.py b/tools/python/xen/xm/main.py
> --- a/tools/python/xen/xm/main.py     2005-06-06 
> 22:05:31.000000000 -0500
> +++ b/tools/python/xen/xm/main.py     2005-06-07 
> 16:51:43.127336439 -0500
> @@ -568,6 +568,27 @@
>  
>  xm.prog(ProgBalloon)
>  
> +
> +class ProgVcpuhotplug(Prog):
> +    group = 'domain'
> +    name  = 'vcpu_hotplug'
> +    info  = """Enable or disable a VCPU in a domain."""
> +
> +    def help(self, args):
> +        print args[0], "DOM VCPU [0|1]"
> +        print """\nRequest virtual processor VCPU to be disabled or 
> +enabled in domain DOM"""
> +
> +    def main(self, args):
> +        if len(args) != 4: self.err("%s: Invalid 
> arguments(s)" % args[0])
> +        dom = args[1]
> +        vcpu = args[2]
> +        state = args[3]
> +        server.xend_domain_vcpu_hotplug(dom, vcpu, state)
> +
> +xm.prog(ProgVcpuhotplug)
> +
> +
>  class ProgDomid(Prog):
>      group = 'domain'
>      name = 'domid'
> diff -urN a/xen/include/public/io/domain_controller.h 
> b/xen/include/public/io/domain_controller.h
> --- a/xen/include/public/io/domain_controller.h       
> 2005-06-06 22:05:31.000000000 -0500
> +++ b/xen/include/public/io/domain_controller.h       
> 2005-06-07 16:51:43.139334694 -0500
> @@ -61,6 +61,7 @@
>  #define CMSG_MEM_REQUEST    7  /* Memory reservation reqs */
>  #define CMSG_USBIF_BE       8  /* USB controller backend  */
>  #define CMSG_USBIF_FE       9  /* USB controller frontend */
> +#define CMSG_VCPU_HOTPLUG  10  /* Hotplug VCPU messages   */
>  
>  
> /*************************************************************
> *****************
>   * CONSOLE DEFINITIONS
> @@ -758,6 +759,25 @@
>  } PACKED shutdown_sysrq_t; /* 4 bytes */
>  
>  
> /*************************************************************
> *****************
> + * VCPU HOTPLUG CONTROLS
> + */
> +
> +/*
> + * Subtypes for shutdown messages.
> + */
> +#define CMSG_VCPU_HOTPLUG_OFF   0   /* turn vcpu off */
> +#define CMSG_VCPU_HOTPLUG_ON    1   /* turn vcpu on  */
> +
> +/*
> + * CMSG_VCPU_HOTPLUG:
> + *  Indicate which vcpu's state should change  */ typedef struct {
> +    u32 vcpu;         /* 0: VCPU's whose state will change */
> +    u32 status;       /* 4: Return code indicates success or 
> failure. */
> +} PACKED vcpu_hotplug_t;
> +
> +/************************************************************
> **********
> +********
>   * MEMORY CONTROLS
>   */
>  
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.