[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v24 10/15] x86/VPMU: Use pre-computed masks when checking validity of MSRs



> From: Boris Ostrovsky [mailto:boris.ostrovsky@xxxxxxxxxx]
> Sent: Wednesday, June 10, 2015 11:04 PM
> 
> No need to compute those masks on every MSR access.
> 
> Also, when checking MSR_P6_EVNTSELx registers make sure that bit 21
> (which is a reserved bit) is not set.
> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>

Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> ---
> Changes in v24:
> * Mentioned in the commit message that newly-checked bit 21 is reserved
> 
>  xen/arch/x86/hvm/vmx/vpmu_core2.c | 28 ++++++++++++++++++----------
>  1 file changed, 18 insertions(+), 10 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> index 089154e..166277a 100644
> --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> @@ -80,9 +80,16 @@ static bool_t __read_mostly full_width_write;
>  #define FIXED_CTR_CTRL_BITS 4
>  #define FIXED_CTR_CTRL_MASK ((1 << FIXED_CTR_CTRL_BITS) - 1)
> 
> +#define ARCH_CNTR_ENABLED   (1ULL << 22)
> +
>  /* Number of general-purpose and fixed performance counters */
>  static unsigned int __read_mostly arch_pmc_cnt, fixed_pmc_cnt;
> 
> +/* Masks used for testing whether and MSR is valid */
> +#define ARCH_CTRL_MASK  (~((1ull << 32) - 1) | (1ull << 21))
> +static uint64_t __read_mostly fixed_ctrl_mask, fixed_counters_mask;
> +static uint64_t __read_mostly global_ovf_ctrl_mask;
> +
>  /*
>   * QUIRK to workaround an issue on various family 6 cpus.
>   * The issue leads to endless PMC interrupt loops on the processor.
> @@ -479,9 +486,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t
> msr_content,
> 
>      ASSERT(!supported);
> 
> -    if ( type == MSR_TYPE_COUNTER &&
> -         (msr_content &
> -          ~((1ull << core2_get_bitwidth_fix_count()) - 1)) )
> +    if ( (type == MSR_TYPE_COUNTER) && (msr_content & fixed_counters_mask) )
>          /* Writing unsupported bits to a fixed counter */
>          return -EINVAL;
> 
> @@ -490,9 +495,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t
> msr_content,
>      switch ( msr )
>      {
>      case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> -        if ( msr_content & ~(0xC000000000000000 |
> -                             (((1ULL << fixed_pmc_cnt) - 1) << 32) |
> -                             ((1ULL << arch_pmc_cnt) - 1)) )
> +        if ( msr_content & global_ovf_ctrl_mask )
>              return -EINVAL;
>          core2_vpmu_cxt->global_status &= ~msr_content;
>          wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, msr_content);
> @@ -526,8 +529,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t
> msr_content,
>          core2_vpmu_cxt->global_ctrl = msr_content;
>          break;
>      case MSR_CORE_PERF_FIXED_CTR_CTRL:
> -        if ( msr_content &
> -             ( ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1)) )
> +        if ( msr_content & fixed_ctrl_mask )
>              return -EINVAL;
> 
>          if ( has_hvm_container_vcpu(v) )
> @@ -556,7 +558,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t
> msr_content,
>              struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
>                  vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
> 
> -            if ( msr_content & (~((1ull << 32) - 1)) )
> +            if ( msr_content & ARCH_CTRL_MASK )
>                  return -EINVAL;
> 
>              if ( has_hvm_container_vcpu(v) )
> @@ -565,7 +567,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t
> msr_content,
>              else
>                  rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 
> core2_vpmu_cxt->global_ctrl);
> 
> -            if ( msr_content & (1ULL << 22) )
> +            if ( msr_content & ARCH_CNTR_ENABLED )
>                  *enabled_cntrs |= 1ULL << tmp;
>              else
>                  *enabled_cntrs &= ~(1ULL << tmp);
> @@ -915,6 +917,12 @@ int __init core2_vpmu_init(void)
>      rdmsrl(MSR_IA32_PERF_CAPABILITIES, caps);
>      full_width_write = (caps >> 13) & 1;
> 
> +    fixed_ctrl_mask = ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1);
> +    fixed_counters_mask = ~((1ull << core2_get_bitwidth_fix_count()) - 1);
> +    global_ovf_ctrl_mask = ~(0xC000000000000000 |
> +                             (((1ULL << fixed_pmc_cnt) - 1) << 32) |
> +                             ((1ULL << arch_pmc_cnt) - 1));
> +
>      check_pmc_quirk();
> 
>      if ( sizeof(struct xen_pmu_data) + sizeof(uint64_t) * fixed_pmc_cnt +
> --
> 1.8.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.