WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86/LWP: Add LWP support for SVM guests

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86/LWP: Add LWP support for SVM guests
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Wed, 11 May 2011 04:40:21 +0100
Delivery-date: Tue, 10 May 2011 20:49:02 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Wei Huang <wei.huang2@xxxxxxx>
# Date 1304937642 -3600
# Node ID af8d8408a8d2ddd658a7ca75e67aae364cbf1eed
# Parent  750eb14af93ec1ce8e04fd1afccbe422c095f57a
x86/LWP: Add LWP support for SVM guests

This patch enables SVM to handle LWP related MSRs and CPUID. It
intercepts guests read/write to LWP_CFG. It also save/restore LWP_CFG
when guests touch this MSR. The LWP_CBADDR MSR is not intercepted
because this MSR is handled by xsave/xrstor.

Signed-off-by: Wei Huang <wei.huang2@xxxxxxx>
---


diff -r 750eb14af93e -r af8d8408a8d2 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Mon May 09 11:40:05 2011 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Mon May 09 11:40:42 2011 +0100
@@ -58,7 +58,8 @@
 #include <asm/hvm/trace.h>
 #include <asm/hap.h>
 #include <asm/apic.h>
-#include <asm/debugger.h>       
+#include <asm/debugger.h>
+#include <asm/xstate.h>
 
 u32 svm_feature_flags;
 
@@ -695,6 +696,50 @@
     *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
 }
 
+static inline void svm_lwp_save(struct vcpu *v)
+{
+    /* Don't mess up with other guests. Disable LWP for next VCPU. */
+    if ( v->arch.hvm_svm.guest_lwp_cfg )
+    {
+        wrmsrl(MSR_AMD64_LWP_CFG, 0x0);
+        wrmsrl(MSR_AMD64_LWP_CBADDR, 0x0);
+    }
+}
+
+static inline void svm_lwp_load(struct vcpu *v)
+{
+    /* Only LWP_CFG is reloaded. LWP_CBADDR will be reloaded via xrstor. */
+   if ( v->arch.hvm_svm.guest_lwp_cfg ) 
+       wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm_svm.guest_lwp_cfg);
+}
+
+/* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */
+static int svm_update_lwp_cfg(struct vcpu *v, uint64_t msr_content)
+{
+    unsigned int eax, ebx, ecx, edx;
+    uint32_t msr_low;
+    
+    if ( xsave_enabled(v) && cpu_has_lwp )
+    {
+        hvm_cpuid(0x8000001c, &eax, &ebx, &ecx, &edx);
+        msr_low = (uint32_t)msr_content;
+        
+        /* generate #GP if guest tries to turn on unsupported features. */
+        if ( msr_low & ~edx)
+            return -1;
+        
+        wrmsrl(MSR_AMD64_LWP_CFG, msr_content);
+        /* CPU might automatically correct reserved bits. So read it back. */
+        rdmsrl(MSR_AMD64_LWP_CFG, msr_content);
+        v->arch.hvm_svm.guest_lwp_cfg = msr_content;
+
+        /* track nonalzy state if LWP_CFG is non-zero. */
+        v->arch.nonlazy_xstate_used = !!(msr_content);
+    }
+
+    return 0;
+}
+
 static void svm_ctxt_switch_from(struct vcpu *v)
 {
     int cpu = smp_processor_id();
@@ -703,6 +748,7 @@
 
     svm_save_dr(v);
     vpmu_save(v);
+    svm_lwp_save(v);
 
     svm_sync_vmcb(v);
     svm_vmload(per_cpu(root_vmcb, cpu));
@@ -746,6 +792,7 @@
     svm_vmload(vmcb);
     vmcb->cleanbits.bytes = 0;
     vpmu_load(v);
+    svm_lwp_load(v);
 
     if ( cpu_has_rdtscp )
         wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
@@ -1120,6 +1167,24 @@
         if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
             __clear_bit(X86_FEATURE_APIC & 31, edx);
         break;
+    case 0x8000001c: 
+    {
+        /* LWP capability CPUID */
+        uint64_t lwp_cfg = v->arch.hvm_svm.guest_lwp_cfg;
+
+        if ( cpu_has_lwp )
+        {
+            if ( !(v->arch.xcr0 & XSTATE_LWP) )
+           {
+                *eax = 0x0;
+                break;
+            }
+
+            /* turn on available bit and other features specified in lwp_cfg */
+            *eax = (*edx & lwp_cfg) | 0x00000001;
+        }
+        break;
+    }
     default:
         break;
     }
@@ -1227,6 +1292,10 @@
         *msr_content = vmcb_get_lastinttoip(vmcb);
         break;
 
+    case MSR_AMD64_LWP_CFG:
+        *msr_content = v->arch.hvm_svm.guest_lwp_cfg;
+        break;
+
     case MSR_K7_PERFCTR0:
     case MSR_K7_PERFCTR1:
     case MSR_K7_PERFCTR2:
@@ -1337,6 +1406,11 @@
         vmcb_set_lastinttoip(vmcb, msr_content);
         break;
 
+    case MSR_AMD64_LWP_CFG:
+        if ( svm_update_lwp_cfg(v, msr_content) < 0 )
+            goto gpf;
+        break;
+
     case MSR_K7_PERFCTR0:
     case MSR_K7_PERFCTR1:
     case MSR_K7_PERFCTR2:
diff -r 750eb14af93e -r af8d8408a8d2 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Mon May 09 11:40:05 2011 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Mon May 09 11:40:42 2011 +0100
@@ -121,6 +121,11 @@
     svm_disable_intercept_for_msr(v, MSR_STAR);
     svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK);
 
+    /* LWP_CBADDR MSR is saved and restored by FPU code. So SVM doesn't need to
+     * intercept it. */
+    if ( cpu_has_lwp )
+        svm_disable_intercept_for_msr(v, MSR_AMD64_LWP_CBADDR);
+
     vmcb->_msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm);
     vmcb->_iopm_base_pa  = (u64)virt_to_maddr(hvm_io_bitmap);
 
diff -r 750eb14af93e -r af8d8408a8d2 xen/include/asm-x86/cpufeature.h
--- a/xen/include/asm-x86/cpufeature.h  Mon May 09 11:40:05 2011 +0100
+++ b/xen/include/asm-x86/cpufeature.h  Mon May 09 11:40:42 2011 +0100
@@ -208,6 +208,8 @@
 
 #define cpu_has_xsave           boot_cpu_has(X86_FEATURE_XSAVE)
 
+#define cpu_has_lwp             boot_cpu_has(X86_FEATURE_LWP)
+
 #define cpu_has_arch_perfmon    boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
 
 #define cpu_has_rdtscp          boot_cpu_has(X86_FEATURE_RDTSCP)
diff -r 750eb14af93e -r af8d8408a8d2 xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Mon May 09 11:40:05 2011 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Mon May 09 11:40:42 2011 +0100
@@ -512,6 +512,9 @@
     uint64_t guest_sysenter_cs;
     uint64_t guest_sysenter_esp;
     uint64_t guest_sysenter_eip;
+    
+    /* AMD lightweight profiling MSR */
+    uint64_t guest_lwp_cfg;
 };
 
 struct vmcb_struct *alloc_vmcb(void);
diff -r 750eb14af93e -r af8d8408a8d2 xen/include/asm-x86/msr-index.h
--- a/xen/include/asm-x86/msr-index.h   Mon May 09 11:40:05 2011 +0100
+++ b/xen/include/asm-x86/msr-index.h   Mon May 09 11:40:42 2011 +0100
@@ -266,6 +266,10 @@
 #define MSR_AMD_PATCHLEVEL             0x0000008b
 #define MSR_AMD_PATCHLOADER            0xc0010020
 
+/* AMD Lightweight Profiling MSRs */
+#define MSR_AMD64_LWP_CFG              0xc0000105
+#define MSR_AMD64_LWP_CBADDR           0xc0000106
+
 /* AMD OS Visible Workaround MSRs */
 #define MSR_AMD_OSVW_ID_LENGTH          0xc0010140
 #define MSR_AMD_OSVW_STATUS             0xc0010141

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86/LWP: Add LWP support for SVM guests, Xen patchbot-unstable <=