[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 10/13] x86/PMU: Add support for PMU registes handling on PV guests



Am Freitag 20 September 2013, 05:42:09 schrieb Boris Ostrovsky:
> Intercept accesses to PMU MSRs and LVTPC APIC vector (only
> APIC_LVT_MASKED bit is processed) and process them in VPMU
> module.
> 
> Dump VPMU state for all domains (HVM and PV) when requested.
> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> ---
>  xen/arch/x86/domain.c             |  3 +-
>  xen/arch/x86/hvm/vmx/vpmu_core2.c | 90 
> ++++++++++++++++++++++++++++++---------
>  xen/arch/x86/hvm/vpmu.c           | 16 +++++++
>  xen/arch/x86/traps.c              | 39 ++++++++++++++++-
>  xen/include/public/xenpmu.h       |  1 +
>  5 files changed, 125 insertions(+), 24 deletions(-)
> 
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index e119d7b..36f4192 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -1940,8 +1940,7 @@ void arch_dump_vcpu_info(struct vcpu *v)
>  {
>      paging_dump_vcpu_info(v);
>  
> -    if ( is_hvm_vcpu(v) )
> -        vpmu_dump(v);
> +    vpmu_dump(v);
>  }
>  
>  void domain_cpuid(
> diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c 
> b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> index 5726610..ebbb516 100644
> --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> @@ -27,6 +27,7 @@
>  #include <asm/regs.h>
>  #include <asm/types.h>
>  #include <asm/apic.h>
> +#include <asm/traps.h>
>  #include <asm/msr.h>
>  #include <asm/msr-index.h>
>  #include <asm/hvm/support.h>
> @@ -281,6 +282,9 @@ static inline void __core2_vpmu_save(struct vcpu *v)
>          rdmsrl(MSR_CORE_PERF_FIXED_CTR0 + i, 
> core2_vpmu_cxt->fix_counters[i]);
>      for ( i = 0; i < arch_pmc_cnt; i++ )
>          rdmsrl(MSR_IA32_PERFCTR0 + i, 
> core2_vpmu_cxt->arch_msr_pair[i].counter);
> +
> +    if ( !is_hvm_domain(v->domain) )
> +        rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
>  }
>  
>  static int core2_vpmu_save(struct vcpu *v)
> @@ -290,10 +294,14 @@ static int core2_vpmu_save(struct vcpu *v)
>      if ( !vpmu_is_set_all(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) )
>          return 0;
>  
> +    if ( !is_hvm_domain(v->domain) )
> +        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
> +
>      __core2_vpmu_save(v);
>  
>      /* Unset PMU MSR bitmap to trap lazy load. */
> -    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
> +    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && cpu_has_vmx_msr_bitmap 
> +        && is_hvm_domain(v->domain) )
>          core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
>  
>      return 1;
> @@ -315,6 +323,12 @@ static inline void __core2_vpmu_load(struct vcpu *v)
>  
>      for ( i = 0; i < arch_pmc_cnt; i++ )
>          wrmsrl(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control);
> +
> +    if ( !is_hvm_domain(v->domain) )
> +    {
> +        wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, 
> core2_vpmu_cxt->global_ovf_ctrl);
> +        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
> +    }
>  }
>  
>  static void core2_vpmu_load(struct vcpu *v)
> @@ -421,7 +435,12 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content)
>                  if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
>                      return 1;
>                  gdprintk(XENLOG_WARNING, "Debug Store is not supported on 
> this cpu\n");
> -                hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +
> +                if ( is_hvm_domain(v->domain) )
> +                    hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +                else
> +                    send_guest_trap(v->domain, v->vcpu_id, TRAP_gp_fault);

Maybe use a macro or function for these 4 lines?

> +
>                  return 0;
>              }
>          }
> @@ -433,11 +452,15 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content)
>      {
>      case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
>          core2_vpmu_cxt->global_ovf_status &= ~msr_content;
> +        core2_vpmu_cxt->global_ovf_ctrl = msr_content;
>          return 1;
>      case MSR_CORE_PERF_GLOBAL_STATUS:
>          gdprintk(XENLOG_INFO, "Can not write readonly MSR: "
>                   "MSR_PERF_GLOBAL_STATUS(0x38E)!\n");
> -        hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +        if ( is_hvm_domain(v->domain) )
> +            hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +        else
> +            send_guest_trap(v->domain, v->vcpu_id, TRAP_gp_fault);

Macro/function?

>          return 1;
>      case MSR_IA32_PEBS_ENABLE:
>          if ( msr_content & 1 )
> @@ -453,7 +476,10 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content)
>                  gdprintk(XENLOG_WARNING,
>                           "Illegal address for IA32_DS_AREA: %#" PRIx64 "x\n",
>                           msr_content);
> -                hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +                if ( is_hvm_domain(v->domain) )
> +                    hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +                else
> +                    send_guest_trap(v->domain, v->vcpu_id, TRAP_gp_fault);   
>     

Macro/function and trailing spaces?

>                  return 1;
>              }
>              core2_vpmu_cxt->ds_area = msr_content;
> @@ -478,10 +504,14 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content)
>              non_global_ctrl >>= FIXED_CTR_CTRL_BITS;
>              global_ctrl >>= 1;
>          }
> +        core2_vpmu_cxt->global_ctrl = msr_content;
>          break;
>      case MSR_CORE_PERF_FIXED_CTR_CTRL:
>          non_global_ctrl = msr_content;
> -        vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
> +        if ( is_hvm_domain(v->domain) )
> +            vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
> +        else
> +            rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_ctrl);
>          global_ctrl >>= 32;
>          for ( i = 0; i < fixed_pmc_cnt; i++ )
>          {
> @@ -495,7 +525,10 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content)
>          tmp = msr - MSR_P6_EVNTSEL0;
>          if ( tmp >= 0 && tmp < arch_pmc_cnt )
>          {
> -            vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
> +            if ( is_hvm_domain(v->domain) )
> +                vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
> +            else
> +                rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_ctrl);
>              core2_vpmu_cxt->arch_msr_pair[tmp].control = msr_content;
>              for ( i = 0; i < arch_pmc_cnt && !pmu_enable; i++ )
>                  pmu_enable += (global_ctrl >> i) &
> @@ -509,17 +542,20 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content)
>      else
>          vpmu_reset(vpmu, VPMU_RUNNING);
>  
> -    /* Setup LVTPC in local apic */
> -    if ( vpmu_is_set(vpmu, VPMU_RUNNING) &&
> -         is_vlapic_lvtpc_enabled(vcpu_vlapic(v)) )
> -    {
> -        apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR);
> -        vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR;
> -    }
> -    else
> +    if ( is_hvm_domain(v->domain) )
>      {
> -        apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
> -        vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
> +        /* Setup LVTPC in local apic */
> +        if ( vpmu_is_set(vpmu, VPMU_RUNNING) &&
> +             is_vlapic_lvtpc_enabled(vcpu_vlapic(v)) )
> +        {
> +            apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR);
> +            vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR;
> +        }
> +        else
> +        {
> +            apic_write_around(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
> +            vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
> +        }
>      }
>  
>      if ( type != MSR_TYPE_GLOBAL )
> @@ -547,13 +583,24 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, 
> uint64_t msr_content)
>                  inject_gp = 1;
>              break;
>          }
> -        if (inject_gp)
> -            hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +
> +        if (inject_gp) 
> +        {
> +           if ( is_hvm_domain(v->domain) )
> +               hvm_inject_hw_exception(TRAP_gp_fault, 0);
> +           else
> +               send_guest_trap(v->domain, v->vcpu_id, TRAP_gp_fault);

Macro/function?

Dietmar.

> +        }
>          else
>              wrmsrl(msr, msr_content);
>      }
>      else
> -        vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
> +    {
> +       if ( is_hvm_domain(v->domain) )
> +           vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
> +       else
> +           wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
> +    }
>  
>      return 1;
>  }
> @@ -577,7 +624,10 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, 
> uint64_t *msr_content)
>              *msr_content = core2_vpmu_cxt->global_ovf_status;
>              break;
>          case MSR_CORE_PERF_GLOBAL_CTRL:
> -            vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
> +            if ( is_hvm_domain(v->domain) )
> +                vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
> +            else
> +                rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content);
>              break;
>          default:
>              rdmsrl(msr, *msr_content);
> diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
> index 69aaa7b..4638193 100644
> --- a/xen/arch/x86/hvm/vpmu.c
> +++ b/xen/arch/x86/hvm/vpmu.c
> @@ -70,6 +70,14 @@ static void __init parse_vpmu_param(char *s)
>      }
>  }
>  
> +static void vpmu_lvtpc_update(uint32_t val)
> +{
> +     struct vpmu_struct *vpmu = vcpu_vpmu(current);
> +
> +     vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | (val & APIC_LVT_MASKED);
> +     apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
> +}
> +
>  int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
>  {
>      struct vpmu_struct *vpmu = vcpu_vpmu(current);
> @@ -428,6 +436,14 @@ long do_xenpmu_op(int op, XEN_GUEST_HANDLE_PARAM(void) 
> arg)
>              return -EFAULT;
>          pvpmu_finish(current->domain, &pmu_params);
>          break;
> +
> +    case XENPMU_lvtpc_set:
> +        if ( copy_from_guest(&pmu_params, arg, 1) )
> +            return -EFAULT;
> +
> +        vpmu_lvtpc_update((uint32_t)pmu_params.val);
> +        ret = 0;
> +        break;
>      }
>  
>      return ret;
> diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
> index 57dbd0c..f378a24 100644
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -71,6 +71,7 @@
>  #include <asm/apic.h>
>  #include <asm/mc146818rtc.h>
>  #include <asm/hpet.h>
> +#include <asm/hvm/vpmu.h>
>  #include <public/arch-x86/cpuid.h>
>  #include <xsm/xsm.h>
>  
> @@ -871,7 +872,6 @@ static void pv_cpuid(struct cpu_user_regs *regs)
>          break;
>  
>      case 0x00000005: /* MONITOR/MWAIT */
> -    case 0x0000000a: /* Architectural Performance Monitor Features */
>      case 0x0000000b: /* Extended Topology Enumeration */
>      case 0x8000000a: /* SVM revision and features */
>      case 0x8000001b: /* Instruction Based Sampling */
> @@ -880,7 +880,9 @@ static void pv_cpuid(struct cpu_user_regs *regs)
>      unsupported:
>          a = b = c = d = 0;
>          break;
> -
> +    case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
> +        vpmu_do_cpuid(0xa, &a, &b, &c, &d);
> +        break;
>      default:
>          (void)cpuid_hypervisor_leaves(regs->eax, 0, &a, &b, &c, &d);
>          break;
> @@ -2486,6 +2488,17 @@ static int emulate_privileged_op(struct cpu_user_regs 
> *regs)
>              if ( wrmsr_safe(regs->ecx, msr_content) != 0 )
>                  goto fail;
>              break;
> +        case MSR_P6_PERFCTR0...MSR_P6_PERFCTR1:
> +        case MSR_P6_EVNTSEL0...MSR_P6_EVNTSEL1:
> +        case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
> +        case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> +        case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
> +            if ( !vpmu_do_wrmsr(regs->ecx, msr_content) )
> +            {
> +                if ( (vpmu_mode & XENPMU_MODE_PRIV) && (v->domain == dom0) )
> +                    goto invalid;
> +            }
> +            break;
>          default:
>              if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 )
>                  break;
> @@ -2574,6 +2587,24 @@ static int emulate_privileged_op(struct cpu_user_regs 
> *regs)
>              regs->eax = (uint32_t)msr_content;
>              regs->edx = (uint32_t)(msr_content >> 32);
>              break;
> +        case MSR_IA32_PERF_CAPABILITIES:
> +            if ( rdmsr_safe(regs->ecx, msr_content) )
> +                goto fail;
> +            /* Full-Width Writes not supported */
> +            regs->eax = (uint32_t)msr_content & ~(1 << 13);
> +            regs->edx = (uint32_t)(msr_content >> 32);
> +            break;
> +        case MSR_P6_PERFCTR0...MSR_P6_PERFCTR1:
> +        case MSR_P6_EVNTSEL0...MSR_P6_EVNTSEL1:
> +        case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
> +        case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> +        case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
> +            if ( vpmu_do_rdmsr(regs->ecx, &msr_content) ) {
> +                regs->eax = (uint32_t)msr_content;
> +                regs->edx = (uint32_t)(msr_content >> 32);
> +                break;
> +            }
> +            goto rdmsr_normal;
>          default:
>              if ( rdmsr_hypervisor_regs(regs->ecx, &val) )
>              {
> @@ -2606,6 +2637,10 @@ static int emulate_privileged_op(struct cpu_user_regs 
> *regs)
>          pv_cpuid(regs);
>          break;
>  
> +    case 0x33: /* RDPMC */
> +        rdpmc(regs->ecx, regs->eax, regs->edx);
> +        break;
> +
>      default:
>          goto fail;
>      }
> diff --git a/xen/include/public/xenpmu.h b/xen/include/public/xenpmu.h
> index ec49097..0060670 100644
> --- a/xen/include/public/xenpmu.h
> +++ b/xen/include/public/xenpmu.h
> @@ -27,6 +27,7 @@
>  #define XENPMU_flags_set       3
>  #define XENPMU_init            4
>  #define XENPMU_finish          5
> +#define XENPMU_lvtpc_set       6
>  /* ` } */
>  
>  /* Parameters structure for HYPERVISOR_xenpmu_op call */
> 
-- 
Company details: http://ts.fujitsu.com/imprint.html

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.