[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v10 13/20] x86/VPMU: When handling MSR accesses, leave fault injection to callers



> From: Boris Ostrovsky [mailto:boris.ostrovsky@xxxxxxxxxx]
> Sent: Wednesday, September 03, 2014 8:41 PM
> 
> With this patch return value of 1 of vpmu_do_msr() will now indicate whether
> an
> error was encountered during MSR processing (instead of stating that the
> access
> was to a VPMU register).
> 
> As part of this patch we also check for validity of certain MSR accesses right
> when we determine which register is being written, as opposed to postponing
> this
> until later.
> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>

Acked-by: Kevin Tian <kevin.tian@xxxxxxxxx>

> ---
>  xen/arch/x86/hvm/svm/svm.c        |  6 ++-
>  xen/arch/x86/hvm/svm/vpmu.c       |  6 +--
>  xen/arch/x86/hvm/vmx/vmx.c        | 24 +++++++++---
>  xen/arch/x86/hvm/vmx/vpmu_core2.c | 78
> ++++++++++++++-------------------------
>  4 files changed, 53 insertions(+), 61 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 8b9a34e..326cad9 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1654,7 +1654,8 @@ static int svm_msr_read_intercept(unsigned int msr,
> uint64_t *msr_content)
>      case MSR_AMD_FAM15H_EVNTSEL3:
>      case MSR_AMD_FAM15H_EVNTSEL4:
>      case MSR_AMD_FAM15H_EVNTSEL5:
> -        vpmu_do_rdmsr(msr, msr_content);
> +        if ( vpmu_do_rdmsr(msr, msr_content) )
> +            goto gpf;
>          break;
> 
>      case MSR_AMD64_DR0_ADDRESS_MASK:
> @@ -1805,7 +1806,8 @@ static int svm_msr_write_intercept(unsigned int msr,
> uint64_t msr_content)
>      case MSR_AMD_FAM15H_EVNTSEL3:
>      case MSR_AMD_FAM15H_EVNTSEL4:
>      case MSR_AMD_FAM15H_EVNTSEL5:
> -        vpmu_do_wrmsr(msr, msr_content, 0);
> +        if ( vpmu_do_wrmsr(msr, msr_content, 0) )
> +            goto gpf;
>          break;
> 
>      case MSR_IA32_MCx_MISC(4): /* Threshold register */
> diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
> index be3ab27..63c099c 100644
> --- a/xen/arch/x86/hvm/svm/vpmu.c
> +++ b/xen/arch/x86/hvm/svm/vpmu.c
> @@ -306,7 +306,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
>          is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu,
> VPMU_RUNNING) )
>      {
>          if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
> -            return 1;
> +            return 0;
>          vpmu_set(vpmu, VPMU_RUNNING);
> 
>          if ( has_hvm_container_domain(v->domain) &&
> is_msr_bitmap_on(vpmu) )
> @@ -336,7 +336,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
> 
>      /* Write to hw counters */
>      wrmsrl(msr, msr_content);
> -    return 1;
> +    return 0;
>  }
> 
>  static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
> @@ -354,7 +354,7 @@ static int amd_vpmu_do_rdmsr(unsigned int msr,
> uint64_t *msr_content)
> 
>      rdmsrl(msr, *msr_content);
> 
> -    return 1;
> +    return 0;
>  }
> 
>  static int amd_vpmu_initialise(struct vcpu *v)
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 6ca7c32..3c63bb0 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2076,12 +2076,17 @@ static int vmx_msr_read_intercept(unsigned int
> msr, uint64_t *msr_content)
>          *msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
>                         MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
>          /* Perhaps vpmu will change some bits. */
> +        /* FALLTHROUGH */
> +    case MSR_P6_PERFCTR0...MSR_P6_PERFCTR1:
> +    case MSR_P6_EVNTSEL0...MSR_P6_EVNTSEL1:
> +    case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
> +    case
> MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> +    case MSR_IA32_PEBS_ENABLE:
> +    case MSR_IA32_DS_AREA:
>          if ( vpmu_do_rdmsr(msr, msr_content) )
> -            goto done;
> +            goto gp_fault;
>          break;
>      default:
> -        if ( vpmu_do_rdmsr(msr, msr_content) )
> -            break;
>          if ( passive_domain_do_rdmsr(msr, msr_content) )
>              goto done;
>          switch ( long_mode_do_msr_read(msr, msr_content) )
> @@ -2254,7 +2259,7 @@ static int vmx_msr_write_intercept(unsigned int
> msr, uint64_t msr_content)
>          if ( msr_content & ~supported )
>          {
>              /* Perhaps some other bits are supported in vpmu. */
> -            if ( !vpmu_do_wrmsr(msr, msr_content, supported) )
> +            if ( vpmu_do_wrmsr(msr, msr_content, supported) )
>                  break;
>          }
>          if ( msr_content & IA32_DEBUGCTLMSR_LBR )
> @@ -2282,9 +2287,16 @@ static int vmx_msr_write_intercept(unsigned int
> msr, uint64_t msr_content)
>          if ( !nvmx_msr_write_intercept(msr, msr_content) )
>              goto gp_fault;
>          break;
> +    case MSR_P6_PERFCTR0...MSR_P6_PERFCTR1:
> +    case MSR_P6_EVNTSEL0...MSR_P6_EVNTSEL1:
> +    case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
> +    case
> MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> +    case MSR_IA32_PEBS_ENABLE:
> +    case MSR_IA32_DS_AREA:
> +         if ( vpmu_do_wrmsr(msr, msr_content, 0) )
> +            goto gp_fault;
> +        break;
>      default:
> -        if ( vpmu_do_wrmsr(msr, msr_content, 0) )
> -            return X86EMUL_OKAY;
>          if ( passive_domain_do_wrmsr(msr, msr_content) )
>              return X86EMUL_OKAY;
> 
> diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> index 5c9fa19..8d8ce97 100644
> --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> @@ -468,36 +468,41 @@ static int core2_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
>                               IA32_DEBUGCTLMSR_BTS_OFF_USR;
>              if ( !(msr_content & ~supported) &&
>                   vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
> -                return 1;
> +                return 0;
>              if ( (msr_content & supported) &&
>                   !vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
>                  printk(XENLOG_G_WARNING
>                         "%pv: Debug Store unsupported on this CPU\n",
>                         current);
>          }
> -        return 0;
> +        return 1;
>      }
> 
>      ASSERT(!supported);
> 
> +    if ( type == MSR_TYPE_COUNTER &&
> +         (msr_content &
> +          ~((1ull << core2_get_bitwidth_fix_count()) - 1)) )
> +        /* Writing unsupported bits to a fixed counter */
> +        return 1;
> +
>      core2_vpmu_cxt = vpmu->context;
>      enabled_cntrs = vpmu->priv_context;
>      switch ( msr )
>      {
>      case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
>          core2_vpmu_cxt->global_status &= ~msr_content;
> -        return 1;
> +        return 0;
>      case MSR_CORE_PERF_GLOBAL_STATUS:
>          gdprintk(XENLOG_INFO, "Can not write readonly MSR: "
>                   "MSR_PERF_GLOBAL_STATUS(0x38E)!\n");
> -        hvm_inject_hw_exception(TRAP_gp_fault, 0);
>          return 1;
>      case MSR_IA32_PEBS_ENABLE:
>          if ( msr_content & 1 )
>              gdprintk(XENLOG_WARNING, "Guest is trying to enable PEBS, "
>                       "which is not supported.\n");
>          core2_vpmu_cxt->pebs_enable = msr_content;
> -        return 1;
> +        return 0;
>      case MSR_IA32_DS_AREA:
>          if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) )
>          {
> @@ -506,18 +511,21 @@ static int core2_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
>                  gdprintk(XENLOG_WARNING,
>                           "Illegal address for IA32_DS_AREA: %#" PRIx64
> "x\n",
>                           msr_content);
> -                hvm_inject_hw_exception(TRAP_gp_fault, 0);
>                  return 1;
>              }
>              core2_vpmu_cxt->ds_area = msr_content;
>              break;
>          }
>          gdprintk(XENLOG_WARNING, "Guest setting of DTS is ignored.\n");
> -        return 1;
> +        return 0;
>      case MSR_CORE_PERF_GLOBAL_CTRL:
>          global_ctrl = msr_content;
>          break;
>      case MSR_CORE_PERF_FIXED_CTR_CTRL:
> +        if ( msr_content &
> +             ( ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1)) )
> +            return 1;
> +
>          vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
> &global_ctrl);
>          *enabled_cntrs &= ~(((1ULL << fixed_pmc_cnt) - 1) << 32);
>          if ( msr_content != 0 )
> @@ -540,6 +548,9 @@ static int core2_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
>              struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
>                  vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
> 
> +            if ( msr_content & (~((1ull << 32) - 1)) )
> +                return 1;
> +
>              vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
> &global_ctrl);
> 
>              if ( msr_content & (1ULL << 22) )
> @@ -551,45 +562,17 @@ static int core2_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
>          }
>      }
> 
> +    if ( type != MSR_TYPE_GLOBAL )
> +        wrmsrl(msr, msr_content);
> +    else
> +        vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
> msr_content);
> +
>      if ( (global_ctrl & *enabled_cntrs) || (core2_vpmu_cxt->ds_area != 0) )
>          vpmu_set(vpmu, VPMU_RUNNING);
>      else
>          vpmu_reset(vpmu, VPMU_RUNNING);
> 
> -    if ( type != MSR_TYPE_GLOBAL )
> -    {
> -        u64 mask;
> -        int inject_gp = 0;
> -        switch ( type )
> -        {
> -        case MSR_TYPE_ARCH_CTRL:      /* MSR_P6_EVNTSEL[0,...] */
> -            mask = ~((1ull << 32) - 1);
> -            if (msr_content & mask)
> -                inject_gp = 1;
> -            break;
> -        case MSR_TYPE_CTRL:           /* IA32_FIXED_CTR_CTRL */
> -            if  ( msr == MSR_IA32_DS_AREA )
> -                break;
> -            /* 4 bits per counter, currently 3 fixed counters implemented.
> */
> -            mask = ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) -
> 1);
> -            if (msr_content & mask)
> -                inject_gp = 1;
> -            break;
> -        case MSR_TYPE_COUNTER:        /* IA32_FIXED_CTR[0-2] */
> -            mask = ~((1ull << core2_get_bitwidth_fix_count()) - 1);
> -            if (msr_content & mask)
> -                inject_gp = 1;
> -            break;
> -        }
> -        if (inject_gp)
> -            hvm_inject_hw_exception(TRAP_gp_fault, 0);
> -        else
> -            wrmsrl(msr, msr_content);
> -    }
> -    else
> -        vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
> msr_content);
> -
> -    return 1;
> +    return 0;
>  }
> 
>  static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
> @@ -617,19 +600,14 @@ static int core2_vpmu_do_rdmsr(unsigned int msr,
> uint64_t *msr_content)
>              rdmsrl(msr, *msr_content);
>          }
>      }
> -    else
> +    else if ( msr == MSR_IA32_MISC_ENABLE )
>      {
>          /* Extension for BTS */
> -        if ( msr == MSR_IA32_MISC_ENABLE )
> -        {
> -            if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
> -                *msr_content &=
> ~MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
> -        }
> -        else
> -            return 0;
> +        if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
> +            *msr_content &= ~MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
>      }
> 
> -    return 1;
> +    return 0;
>  }
> 
>  static void core2_vpmu_do_cpuid(unsigned int input,
> --
> 1.8.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.