# HG changeset patch # User Wei Huang # Date 1302812238 18000 # Branch lwp3 # Node ID ee8656e10937121d2780862351a245ed874273fa # Parent bccbc5ecf62e49482c06149bb94dfc3dee8882f1 SVM: enable LWP for SVM guest This patch enables LWP support for SVM guests. It saves and restores LWP_CFG MSRs on each VCPU context switch. Additionally it handles LWP CPUIDs and MSRs. Signed-off-by: Wei Huang diff -r bccbc5ecf62e -r ee8656e10937 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Thu Apr 14 15:15:36 2011 -0500 +++ b/xen/arch/x86/hvm/svm/svm.c Thu Apr 14 15:17:18 2011 -0500 @@ -680,6 +680,26 @@ *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */ } +static void svm_lwp_save(struct vcpu *v) +{ + if ( cpu_has_lwp ) + { + rdmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.guest_lwp_cfg); + wrmsrl(MSR_AMD64_LWP_CFG, 0x0); + /* Disable LWP for next VCPU */ + wrmsrl(MSR_AMD64_LWP_CBADDR, 0x0); + } +} + +static void svm_lwp_load(struct vcpu *v) +{ + if ( cpu_has_lwp ) + { + /* Only LWP_CFG is reloaded. LWP_CBADDR will be reloaded via xrstor */ + wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.guest_lwp_cfg); + } +} + static void svm_ctxt_switch_from(struct vcpu *v) { int cpu = smp_processor_id(); @@ -688,6 +708,7 @@ svm_save_dr(v); vpmu_save(v); + svm_lwp_save(v); svm_sync_vmcb(v); svm_vmload(per_cpu(root_vmcb, cpu)); @@ -731,6 +752,7 @@ svm_vmload(vmcb); vmcb->cleanbits.bytes = 0; vpmu_load(v); + svm_lwp_load(v); if ( cpu_has_rdtscp ) wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); @@ -1100,6 +1122,24 @@ if ( vlapic_hw_disabled(vcpu_vlapic(v)) ) __clear_bit(X86_FEATURE_APIC & 31, edx); break; + case 0x8000001c: + { + /* LWP capability CPUID */ + uint64_t lwp_cfg = v->arch.hvm_svm.guest_lwp_cfg; + + if ( cpu_has_lwp ) + { + if ( !(v->arch.xcr0 & XSTATE_LWP) ) + { + *eax = 0x0; + break; + } + + /* turn on avail bit and other features specified in lwp_cfg */ + *eax = (*edx & lwp_cfg) | 0x00000001; + } + break; + } default: break; } @@ -1189,6 +1229,10 @@ case MSR_IA32_LASTINTTOIP: *msr_content = vmcb_get_lastinttoip(vmcb); + break; + + case MSR_AMD64_LWP_CFG: + *msr_content = v->arch.hvm_svm.guest_lwp_cfg; break; case MSR_K7_PERFCTR0: @@ -1287,6 +1331,24 @@ case MSR_IA32_LASTINTTOIP: vmcb_set_lastinttoip(vmcb, msr_content); + break; + + case MSR_AMD64_LWP_CFG: + if ( cpu_has_lwp ) + { + unsigned int eax, ebx, ecx, edx; + uint32_t msr_low; + + hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx); + msr_low = (uint32_t)msr_content; + + /* generate #GP if guest triest to turn on unsupported features. */ + if ( msr_low & ~edx) + goto gpf; + + wrmsrl(msr, msr_content); + v->arch.hvm_svm.guest_lwp_cfg = msr_content; + } break; case MSR_K7_PERFCTR0: diff -r bccbc5ecf62e -r ee8656e10937 xen/arch/x86/hvm/svm/vmcb.c --- a/xen/arch/x86/hvm/svm/vmcb.c Thu Apr 14 15:15:36 2011 -0500 +++ b/xen/arch/x86/hvm/svm/vmcb.c Thu Apr 14 15:17:18 2011 -0500 @@ -120,6 +120,8 @@ svm_disable_intercept_for_msr(v, MSR_LSTAR); svm_disable_intercept_for_msr(v, MSR_STAR); svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK); + if ( cpu_has_lwp) + svm_disable_intercept_for_msr(v, MSR_AMD64_LWP_CBADDR); vmcb->_msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm); vmcb->_iopm_base_pa = (u64)virt_to_maddr(hvm_io_bitmap); diff -r bccbc5ecf62e -r ee8656e10937 xen/include/asm-x86/cpufeature.h --- a/xen/include/asm-x86/cpufeature.h Thu Apr 14 15:15:36 2011 -0500 +++ b/xen/include/asm-x86/cpufeature.h Thu Apr 14 15:17:18 2011 -0500 @@ -208,6 +208,8 @@ #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) +#define cpu_has_lwp boot_cpu_has(X86_FEATURE_LWP) + #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) #define cpu_has_rdtscp boot_cpu_has(X86_FEATURE_RDTSCP) diff -r bccbc5ecf62e -r ee8656e10937 xen/include/asm-x86/hvm/svm/vmcb.h --- a/xen/include/asm-x86/hvm/svm/vmcb.h Thu Apr 14 15:15:36 2011 -0500 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h Thu Apr 14 15:17:18 2011 -0500 @@ -507,6 +507,9 @@ uint64_t guest_sysenter_cs; uint64_t guest_sysenter_esp; uint64_t guest_sysenter_eip; + + /* AMD lightweight profiling MSR */ + uint64_t guest_lwp_cfg; }; struct vmcb_struct *alloc_vmcb(void); diff -r bccbc5ecf62e -r ee8656e10937 xen/include/asm-x86/msr-index.h --- a/xen/include/asm-x86/msr-index.h Thu Apr 14 15:15:36 2011 -0500 +++ b/xen/include/asm-x86/msr-index.h Thu Apr 14 15:17:18 2011 -0500 @@ -253,6 +253,10 @@ #define MSR_AMD_PATCHLEVEL 0x0000008b #define MSR_AMD_PATCHLOADER 0xc0010020 +/* AMD Lightweight Profiling MSRs */ +#define MSR_AMD64_LWP_CFG 0xc0000105 +#define MSR_AMD64_LWP_CBADDR 0xc0000106 + /* AMD OS Visible Workaround MSRs */ #define MSR_AMD_OSVW_ID_LENGTH 0xc0010140 #define MSR_AMD_OSVW_STATUS 0xc0010141