[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 4/8] x86: dynamically attach/detach CQM service for a guest


  • To: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
  • From: "Xu, Dongxiao" <dongxiao.xu@xxxxxxxxx>
  • Date: Wed, 20 Nov 2013 13:19:34 +0000
  • Accept-language: en-US
  • Cc: "xen-devel@xxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxx>
  • Delivery-date: Wed, 20 Nov 2013 13:19:45 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>
  • Thread-index: AQHO5eX0/XHG7y6olkmsKH0leckmgZouEf8Q
  • Thread-topic: [Xen-devel] [PATCH 4/8] x86: dynamically attach/detach CQM service for a guest

> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: Wednesday, November 20, 2013 7:45 PM
> To: Xu, Dongxiao
> Cc: xen-devel@xxxxxxxxxxxxx
> Subject: Re: [Xen-devel] [PATCH 4/8] x86: dynamically attach/detach CQM 
> service
> for a guest
> 
> On 20/11/13 03:27, dongxiao.xu@xxxxxxxxx wrote:
> > From: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
> >
> > Add hypervisor side support for dynamically attach and detach CQM
> > services for a certain guest.
> >
> > Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx>
> > Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
> > ---
> >  xen/arch/x86/domctl.c       |   66
> +++++++++++++++++++++++++++++++++++++++++++
> >  xen/include/public/domctl.h |   15 ++++++++++
> >  2 files changed, 81 insertions(+)
> >
> > diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> > index f7e4586..df6a373 100644
> > --- a/xen/arch/x86/domctl.c
> > +++ b/xen/arch/x86/domctl.c
> > @@ -35,6 +35,7 @@
> >  #include <asm/mem_sharing.h>
> >  #include <asm/xstate.h>
> >  #include <asm/debugger.h>
> > +#include <asm/pqos.h>
> >
> >  static int gdbsx_guest_mem_io(
> >      domid_t domid, struct xen_domctl_gdbsx_memio *iop)
> > @@ -1223,6 +1224,71 @@ long arch_do_domctl(
> >      }
> >      break;
> >
> > +    case XEN_DOMCTL_attach_pqos:
> > +    {
> > +        struct domain *d;
> > +
> > +        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
> > +        {
> > +            ret = -EBUSY;
> > +            break;
> > +        }
> 
> do_domctl has done this for us.  There is also no need to shadow 'd'.

OK, will remove the logic.

> 
> > +
> > +        if ( domctl->u.qos_res.flags & XEN_DOMCTL_ADF_pqos_cqm )
> > +        {
> > +            if ( !system_support_cqm() )
> > +                ret = -ENODEV;
> 
> Surely this is generic, rather than condition on the caller specifying
> XEN_DOMCTL_ADF_pqos_cqm

CQM is just one type of monitoring resource, there may be other types later.

> 
> > +            else if ( d->arch.pqos_cqm_rmid > 0 )
> > +                ret = -EINVAL;
> > +            else
> > +            {
> > +                ret = 0;
> > +                d->arch.pqos_cqm_rmid =
> alloc_cqm_resource(d->domain_id);
> > +                if ( d->arch.pqos_cqm_rmid <= 0 )
> > +                {
> > +                    /* set to default value */
> > +                    d->arch.pqos_cqm_rmid = 0;
> > +                    ret = -ENODEV;
> 
> Perhaps E2BIG or EBUSY ?  This error can be fixed by detaching cqm from
> other domains.

OK, maybe EBUSY is a better choice.

> 
> > +                }
> > +            }
> > +        }
> > +        else
> > +            ret = -EINVAL;
> > +
> > +        rcu_unlock_domain(d);
> > +    }
> > +    break;
> > +
> > +    case XEN_DOMCTL_detach_pqos:
> > +    {
> > +        struct domain *d;
> > +
> > +        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
> > +        {
> > +            ret = -EBUSY;
> > +            break;
> > +        }
> > +
> > +        if ( domctl->u.qos_res.flags & XEN_DOMCTL_ADF_pqos_cqm )
> > +        {
> > +            if ( !system_support_cqm() )
> > +                ret = -ENODEV;
> > +            else if ( d->arch.pqos_cqm_rmid > 0 )
> > +            {
> > +                free_cqm_resource(d->domain_id);
> > +                d->arch.pqos_cqm_rmid = 0;
> > +                ret = 0;
> > +            }
> > +            else
> > +                ret = -EINVAL;
> > +        }
> > +        else
> > +            ret = -EINVAL;
> > +
> > +        rcu_unlock_domain(d);
> > +    }
> > +    break;
> > +
> >      default:
> >          ret = iommu_do_domctl(domctl, d, u_domctl);
> >          break;
> > diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> > index 47a850a..4fe21db 100644
> > --- a/xen/include/public/domctl.h
> > +++ b/xen/include/public/domctl.h
> > @@ -872,6 +872,17 @@ struct xen_domctl_set_max_evtchn {
> >  typedef struct xen_domctl_set_max_evtchn
> xen_domctl_set_max_evtchn_t;
> >  DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t);
> >
> > +/* XEN_DOMCTL_attach_pqos */
> > +/* XEN_DOMCTL_detach_pqos */
> > +struct xen_domctl_qos_resource {
> > + /* Attach or detach flag for cqm */
> > +#define _XEN_DOMCTL_ADF_pqos_cqm      0
> > +#define XEN_DOMCTL_ADF_pqos_cqm
> (1U<<_XEN_DOMCTL_ADF_pqos_cqm)
> > +    uint32_t flags;
> 
> The implementation semantics of this flag is redundant.  You
> differentiate attach/detach both on the domctl subop, and the flag in
> this structure, leaving two invalid states.
> 
> It might be better to have a XEN_DOMCTL_qpos_op and a sub command, if
> more of these hypercalls are on their way.

Flag is to identify the type of QoS monitoring resource, and CQM is one example.

> 
> > +};
> > +typedef struct xen_domctl_qos_resource xen_domctl_qos_resource_t;
> > +DEFINE_XEN_GUEST_HANDLE(xen_domctl_qos_resource_t);
> > +
> >  struct xen_domctl {
> >      uint32_t cmd;
> >  #define XEN_DOMCTL_createdomain                   1
> > @@ -941,6 +952,9 @@ struct xen_domctl {
> >  #define XEN_DOMCTL_setnodeaffinity               68
> >  #define XEN_DOMCTL_getnodeaffinity               69
> >  #define XEN_DOMCTL_set_max_evtchn                70
> > +#define XEN_DOMCTL_attach_pqos                   71
> > +#define XEN_DOMCTL_detach_pqos                   72
> > +#define XEN_DOMCTL_list_pqos                     73
> 
> You introduce this list_pqos domctl with no implementation.

Yes, it should be in the patch 6/8.

Thanks,
Dongxiao

> 
> >  #define XEN_DOMCTL_gdbsx_guestmemio            1000
> >  #define XEN_DOMCTL_gdbsx_pausevcpu             1001
> >  #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
> > @@ -1001,6 +1015,7 @@ struct xen_domctl {
> >          struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
> >          struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
> >          struct xen_domctl_gdbsx_domstatus   gdbsx_domstatus;
> > +        struct xen_domctl_qos_resource      qos_res;
> >          uint8_t                             pad[128];
> >      } u;
> >  };


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.