[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 3/8] x86/AMD: Read VPMU MSRs from context when it is not loaded into HW
Boris,I tried booting the guest HVM after the patch, I still see PERF only working in Software mode only. I'll look more into this. Suravee On 4/9/2013 12:26 PM, Boris Ostrovsky wrote: Mark context as LOADED on any MSR write (and on vpmu_restore()). This will allow us to know whether to read an MSR from HW or from context: guest may write into an MSR without enabling it (thus not marking the context as RUNNING) and then be migrated. Reading this MSR again will not match the pervious write since registers will not be loaded into HW. In addition, we should be saving the context when it is LOADED, not RUNNING --- otherwise we need to save it any time it becomes non-RUNNING, which may be a frequent occurrence. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> --- xen/arch/x86/hvm/svm/vpmu.c | 48 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c index f194975..3115923 100644 --- a/xen/arch/x86/hvm/svm/vpmu.c +++ b/xen/arch/x86/hvm/svm/vpmu.c @@ -225,6 +225,8 @@ static void amd_vpmu_restore(struct vcpu *v)context_restore(v);apic_write(APIC_LVTPC, ctxt->hw_lapic_lvtpc); + + vpmu_set(vpmu, VPMU_CONTEXT_LOADED); }static inline void context_save(struct vcpu *v)@@ -246,7 +248,7 @@ static void amd_vpmu_save(struct vcpu *v) struct amd_vpmu_context *ctx = vpmu->context;if ( !(vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) &&- vpmu_is_set(vpmu, VPMU_RUNNING)) ) + vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)) ) return;context_save(v);@@ -256,6 +258,35 @@ static void amd_vpmu_save(struct vcpu *v)ctx->hw_lapic_lvtpc = apic_read(APIC_LVTPC);apic_write(APIC_LVTPC, ctx->hw_lapic_lvtpc | APIC_LVT_MASKED); + vpmu_reset(vpmu, VPMU_CONTEXT_LOADED); +} + +static void context_read(unsigned int msr, u64 *msr_content) +{ + unsigned int i; + struct vcpu *v = current; + struct vpmu_struct *vpmu = vcpu_vpmu(v); + struct amd_vpmu_context *ctxt = vpmu->context; + + if ( k7_counters_mirrored && + ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)) ) + { + msr = get_fam15h_addr(msr); + } + + for ( i = 0; i < num_counters; i++ ) + { + if ( msr == ctrls[i] ) + { + *msr_content = ctxt->ctrls[i]; + return; + } + else if (msr == counters[i] ) + { + *msr_content = ctxt->counters[i]; + return; + } + } }static void context_update(unsigned int msr, u64 msr_content)@@ -318,6 +349,12 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) release_pmu_ownship(PMU_OWNER_HVM); }+ if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )+ { + context_restore(v); + vpmu_set(vpmu, VPMU_CONTEXT_LOADED); + } + /* Update vpmu context immediately */ context_update(msr, msr_content);@@ -328,7 +365,14 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content){ - rdmsrl(msr, *msr_content); + struct vcpu *v = current; + struct vpmu_struct *vpmu = vcpu_vpmu(v); + + if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) ) + context_read(msr, msr_content); + else + rdmsrl(msr, *msr_content); + return 1; } _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |