[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 17/26] x86/pv: Provide custom cpumasks for PV domains



On Wed, Mar 23, 2016 at 04:36:20PM +0000, Andrew Cooper wrote:
> And use them in preference to cpumask_defaults on context switch.  HVM domains

Extra space before HVM
> must not be masked (to avoid interfering with cpuid calls within the guest),
> so always lazily context switch to the host default.

Could you add please:
Host default being set by cpuid_mask_* boot paramters.

> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Reviewed-by: Jan Beulich <JBeulich@xxxxxxxx>
> ---
> v2:
>  * s/cpumasks/cpuidmasks/
>  * Use structure assignment
>  * Fix error path in arch_domain_create()
> v3:
>  * Indentation fixes.
>  * Only allocate PV cpuidmasks if the host is has cpumasks to use.
> ---
>  xen/arch/x86/cpu/amd.c       |  4 +++-
>  xen/arch/x86/cpu/intel.c     |  5 ++++-
>  xen/arch/x86/domain.c        | 14 ++++++++++++++
>  xen/include/asm-x86/domain.h |  2 ++
>  4 files changed, 23 insertions(+), 2 deletions(-)
> 
> diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
> index 484d4b0..8cb04f0 100644
> --- a/xen/arch/x86/cpu/amd.c
> +++ b/xen/arch/x86/cpu/amd.c
> @@ -206,7 +206,9 @@ static void __init noinline probe_masking_msrs(void)
>  static void amd_ctxt_switch_levelling(const struct domain *nextd)
>  {
>       struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
> -     const struct cpuidmasks *masks = &cpuidmask_defaults;
> +     const struct cpuidmasks *masks =
> +             (nextd && is_pv_domain(nextd) && 
> nextd->arch.pv_domain.cpuidmasks)
> +             ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
>  
>  #define LAZY(cap, msr, field)                                                
> \
>       ({                                                              \
> diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
> index 71b1199..00a9987 100644
> --- a/xen/arch/x86/cpu/intel.c
> +++ b/xen/arch/x86/cpu/intel.c
> @@ -154,13 +154,16 @@ static void __init probe_masking_msrs(void)
>  static void intel_ctxt_switch_levelling(const struct domain *nextd)
>  {
>       struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
> -     const struct cpuidmasks *masks = &cpuidmask_defaults;
> +     const struct cpuidmasks *masks;
>  
>       if (cpu_has_cpuid_faulting) {
>               set_cpuid_faulting(nextd && is_pv_domain(nextd));
>               return;
>       }
>  
> +     masks = (nextd && is_pv_domain(nextd) && 
> nextd->arch.pv_domain.cpuidmasks)
> +             ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
> +
>  #define LAZY(msr, field)                                             \
>       ({                                                              \
>               if (unlikely(these_masks->field != masks->field) &&     \
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index abc7194..d0d9773 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -577,6 +577,14 @@ int arch_domain_create(struct domain *d, unsigned int 
> domcr_flags,
>              goto fail;
>          clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
>  
> +        if ( levelling_caps & ~LCAP_faulting )
> +        {
> +            d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
> +            if ( !d->arch.pv_domain.cpuidmasks )
> +                goto fail;
> +            *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
> +        }
> +
>          rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
>                                        GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
>                                        NULL, NULL);
> @@ -672,7 +680,10 @@ int arch_domain_create(struct domain *d, unsigned int 
> domcr_flags,
>          paging_final_teardown(d);
>      free_perdomain_mappings(d);
>      if ( is_pv_domain(d) )
> +    {
> +        xfree(d->arch.pv_domain.cpuidmasks);
>          free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
> +    }
>      psr_domain_free(d);
>      return rc;
>  }
> @@ -692,7 +703,10 @@ void arch_domain_destroy(struct domain *d)
>  
>      free_perdomain_mappings(d);
>      if ( is_pv_domain(d) )
> +    {
>          free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
> +        xfree(d->arch.pv_domain.cpuidmasks);
> +    }
>  
>      free_xenheap_page(d->shared_info);
>      cleanup_domain_irq_mapping(d);
> diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
> index de60def..90f021f 100644
> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -252,6 +252,8 @@ struct pv_domain
>  
>      /* map_domain_page() mapping cache. */
>      struct mapcache_domain mapcache;
> +
> +    struct cpuidmasks *cpuidmasks;
>  };
>  
>  struct monitor_write_data {
> -- 
> 2.1.4
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.