[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 2/7] x86: handle CQM resource when creating/destroying guests



On 29/11/13 14:18, Andrew Cooper wrote:
> On 29/11/13 05:48, dongxiao.xu@xxxxxxxxx wrote:
>> From: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
>>
>> Allocate an RMID for a guest when it is created. This per-guest
>> RMID will be used to monitor Cache QoS related data. The RMID will
>> be relinquished when guest is destroyed.
>>
>> Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx>
>> Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
>> ---
>>  xen/arch/x86/domain.c        |    8 ++++++
>>  xen/arch/x86/pqos.c          |   55 
>> ++++++++++++++++++++++++++++++++++++++++++
>>  xen/common/domctl.c          |    5 +++-
>>  xen/include/asm-x86/domain.h |    2 ++
>>  xen/include/asm-x86/pqos.h   |    5 ++++
>>  xen/include/public/domctl.h  |    3 +++
>>  xen/include/xen/sched.h      |    3 +++
>>  7 files changed, 80 insertions(+), 1 deletion(-)
>>
>> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
>> index a3868f9..41e1fc6 100644
>> --- a/xen/arch/x86/domain.c
>> +++ b/xen/arch/x86/domain.c
>> @@ -60,6 +60,7 @@
>>  #include <xen/numa.h>
>>  #include <xen/iommu.h>
>>  #include <compat/vcpu.h>
>> +#include <asm/pqos.h>
>>  
>>  DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
>>  DEFINE_PER_CPU(unsigned long, cr4);
>> @@ -579,6 +580,11 @@ int arch_domain_create(struct domain *d, unsigned int 
>> domcr_flags)
>>      tsc_set_info(d, TSC_MODE_DEFAULT, 0UL, 0, 0);
>>      spin_lock_init(&d->arch.vtsc_lock);
>>  
>> +    /* Allocate CQM RMID for guest */
>> +    d->arch.pqos_cqm_rmid = 0;
>> +    if ( system_supports_cqm() && (domcr_flags & DOMCRF_pqos_cqm) )
>> +        alloc_cqm_rmid(d);
>> +
> Should we fail domain creation if an rmid cannot be allocated?  I really
> cant decide which would be better.
>
> In other words, if the toolstack issues a build with DOMCRF_pqos_cqm,
> and the build hypercall returns success, should the toolstack reasonably
> expect everything to be set up?  I would think so.
>
> If others agree wrt this expectation, then domain creation needs to fail
> also if "(domcr_flags & DOMCRF_pqos_cqm) && !system_supports_cqm()"

On further consideration, I am not so sure that DOMCRF_pqos_cqm should
exist.

If a toolstack wants to make a domain with pqos from the start, it can
create the domain paused and issue the domctl from the next patch.  This
means that there is only one canonical way to turn on pqos, which does
give substantially more useful error information in failure cases.

~Andrew

>
>>      return 0;
>>  
>>   fail:
>> @@ -612,6 +618,8 @@ void arch_domain_destroy(struct domain *d)
>>  
>>      free_xenheap_page(d->shared_info);
>>      cleanup_domain_irq_mapping(d);
>> +
>> +    free_cqm_rmid(d);
>>  }
>>  
>>  unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long 
>> guest_cr4)
>> diff --git a/xen/arch/x86/pqos.c b/xen/arch/x86/pqos.c
>> index e2172c4..1148f3b 100644
>> --- a/xen/arch/x86/pqos.c
>> +++ b/xen/arch/x86/pqos.c
>> @@ -20,6 +20,7 @@
>>   */
>>  #include <asm/processor.h>
>>  #include <xen/init.h>
>> +#include <xen/spinlock.h>
>>  #include <asm/pqos.h>
>>  
>>  static bool_t __initdata pqos_enabled = 1;
>> @@ -31,6 +32,7 @@ integer_param("cqm_rmid_count", cqm_rmid_count);
>>  unsigned int cqm_upscaling_factor = 0;
>>  bool_t cqm_enabled = 0;
>>  domid_t *cqm_rmid_array = NULL;
>> +static DEFINE_SPINLOCK(cqm_lock);
>>  
>>  static void __init init_cqm(void)
>>  {
>> @@ -84,6 +86,59 @@ void __init init_platform_qos(void)
>>      init_qos_monitor();
>>  }
>>  
>> +bool_t system_supports_cqm(void)
>> +{
>> +    return cqm_enabled;
>> +}
>> +
>> +int alloc_cqm_rmid(struct domain *d)
>> +{
>> +    int rc = 0;
>> +    unsigned int rmid;
>> +    unsigned long flags;
>> +
>> +    ASSERT(system_supports_cqm());
>> +
>> +    spin_lock_irqsave(&cqm_lock, flags);
>> +    /* RMID=0 is reserved, enumerate from 1 */
>> +    for ( rmid = 1; rmid < cqm_rmid_count; rmid++ )
>> +    {
>> +        if ( cqm_rmid_array[rmid] != DOMID_INVALID)
>> +            continue;
>> +
>> +        cqm_rmid_array[rmid] = d->domain_id;
>> +        break;
>> +    }
>> +    spin_unlock_irqrestore(&cqm_lock, flags);
>> +
>> +    /* No CQM RMID available, assign RMID=0 by default */
>> +    if ( rmid == cqm_rmid_count )
>> +    {
>> +        rmid = 0;
>> +        rc = -1;
>> +    }
>> +
>> +    d->arch.pqos_cqm_rmid = rmid;
>> +
>> +    return rc;
>> +}
>> +
>> +void free_cqm_rmid(struct domain *d)
>> +{
>> +    unsigned int rmid = d->arch.pqos_cqm_rmid;
>> +    unsigned long flags;
>> +
>> +    /* We do not free system reserved "RMID=0" */
>> +    if ( rmid == 0 )
>> +        return;
>> +
>> +    spin_lock_irqsave(&cqm_lock, flags);
>> +    cqm_rmid_array[rmid] = DOMID_INVALID;
>> +    spin_unlock_irqrestore(&cqm_lock, flags);
> Does this need the spinlock? given rmid in the range 1 to max_rmids,
> there can't be any competition over who owns the entry, and setting it
> back to DOMID_INVALID wont race with the allocation loop.
>
>> +
>> +    d->arch.pqos_cqm_rmid = 0;
>> +}
>> +
>>  /*
>>   * Local variables:
>>   * mode: C
>> diff --git a/xen/common/domctl.c b/xen/common/domctl.c
>> index 904d27b..1c2e320 100644
>> --- a/xen/common/domctl.c
>> +++ b/xen/common/domctl.c
>> @@ -425,7 +425,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
>> u_domctl)
>>                 | XEN_DOMCTL_CDF_pvh_guest
>>                 | XEN_DOMCTL_CDF_hap
>>                 | XEN_DOMCTL_CDF_s3_integrity
>> -               | XEN_DOMCTL_CDF_oos_off)) )
>> +               | XEN_DOMCTL_CDF_oos_off
>> +               | XEN_DOMCTL_CDF_pqos_cqm)) )
> If you move the final bracket onto a new line now, future additions will
> be a single line addition rather than one removal and two additions.
>
> I am not sure what the prerogative is with that in terms of the Xen
> coding style.
>
> ~Andrew
>
>>              break;
>>  
>>          dom = op->domain;
>> @@ -467,6 +468,8 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) 
>> u_domctl)
>>              domcr_flags |= DOMCRF_s3_integrity;
>>          if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_oos_off )
>>              domcr_flags |= DOMCRF_oos_off;
>> +        if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_pqos_cqm )
>> +            domcr_flags |= DOMCRF_pqos_cqm;
>>  
>>          d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
>>          if ( IS_ERR(d) )
>> diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
>> index 9d39061..9487251 100644
>> --- a/xen/include/asm-x86/domain.h
>> +++ b/xen/include/asm-x86/domain.h
>> @@ -313,6 +313,8 @@ struct arch_domain
>>      spinlock_t e820_lock;
>>      struct e820entry *e820;
>>      unsigned int nr_e820;
>> +
>> +    unsigned int pqos_cqm_rmid;       /* CQM RMID assigned to the domain */
>>  } __cacheline_aligned;
>>  
>>  #define has_arch_pdevs(d)    (!list_empty(&(d)->arch.pdev_list))
>> diff --git a/xen/include/asm-x86/pqos.h b/xen/include/asm-x86/pqos.h
>> index e8caca2..c54905b 100644
>> --- a/xen/include/asm-x86/pqos.h
>> +++ b/xen/include/asm-x86/pqos.h
>> @@ -20,6 +20,7 @@
>>   */
>>  #ifndef ASM_PQOS_H
>>  #define ASM_PQOS_H
>> +#include <xen/sched.h>
>>  
>>  /* QoS Resource Type Enumeration */
>>  #define QOS_MONITOR_TYPE_L3            0x2
>> @@ -29,4 +30,8 @@
>>  
>>  void init_platform_qos(void);
>>  
>> +bool_t system_supports_cqm(void);
>> +int alloc_cqm_rmid(struct domain *d);
>> +void free_cqm_rmid(struct domain *d);
>> +
>>  #endif
>> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
>> index 01a3652..47a850a 100644
>> --- a/xen/include/public/domctl.h
>> +++ b/xen/include/public/domctl.h
>> @@ -62,6 +62,9 @@ struct xen_domctl_createdomain {
>>   /* Is this a PVH guest (as opposed to an HVM or PV guest)? */
>>  #define _XEN_DOMCTL_CDF_pvh_guest     4
>>  #define XEN_DOMCTL_CDF_pvh_guest      (1U<<_XEN_DOMCTL_CDF_pvh_guest)
>> + /* Enable pqos-cqm? */
>> +#define _XEN_DOMCTL_CDF_pqos_cqm      5
>> +#define XEN_DOMCTL_CDF_pqos_cqm       (1U<<_XEN_DOMCTL_CDF_pqos_cqm)
>>      uint32_t flags;
>>  };
>>  typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
>> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
>> index cbdf377..3a42656 100644
>> --- a/xen/include/xen/sched.h
>> +++ b/xen/include/xen/sched.h
>> @@ -507,6 +507,9 @@ struct domain *domain_create(
>>   /* DOMCRF_pvh: Create PV domain in HVM container. */
>>  #define _DOMCRF_pvh             5
>>  #define DOMCRF_pvh              (1U<<_DOMCRF_pvh)
>> + /* DOMCRF_pqos_cqm: Create a domain with CQM support */
>> +#define _DOMCRF_pqos_cqm        6
>> +#define DOMCRF_pqos_cqm         (1U<<_DOMCRF_pqos_cqm)
>>  
>>  /*
>>   * rcu_lock_domain_by_id() is more efficient than get_domain_by_id().
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.