WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] svm: last branch recording MSR emulation

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] svm: last branch recording MSR emulation
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 12 Oct 2007 08:30:15 -0700
Delivery-date: Fri, 12 Oct 2007 08:30:54 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1192180795 -3600
# Node ID 4746c8c9372fd1b2c9db48210a9be2c3eb0bdb77
# Parent  f6a06b2eefc5931a9cd9ec71baec94d67fe06909
svm: last branch recording MSR emulation
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c         |   55 ++++++++++++++++++++++++++++++++++++-
 xen/arch/x86/hvm/svm/vmcb.c        |   26 +++++++++--------
 xen/include/asm-x86/hvm/svm/svm.h  |   12 ++++++++
 xen/include/asm-x86/hvm/svm/vmcb.h |   35 ++++++++++++++++-------
 4 files changed, 104 insertions(+), 24 deletions(-)

diff -r f6a06b2eefc5 -r 4746c8c9372f xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu Oct 11 19:23:40 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Fri Oct 12 10:19:55 2007 +0100
@@ -50,6 +50,8 @@
 #include <asm/hvm/trace.h>
 #include <asm/hap.h>
 
+u32 svm_feature_flags;
+
 #define set_segment_register(name, value)  \
     asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
 
@@ -935,13 +937,16 @@ int start_svm(struct cpuinfo_x86 *c)
 
     setup_vmcb_dump();
 
+    svm_feature_flags = ((cpuid_eax(0x80000000) >= 0x8000000A) ?
+                         cpuid_edx(0x8000000A) : 0);
+
 #ifdef __x86_64__
     /*
      * Check CPUID for nested paging support. We support NPT only on 64-bit
      * hosts since the phys-to-machine table is in host format. Hence 32-bit
      * Xen could only support guests using NPT with up to a 4GB memory map.
      */
-    svm_function_table.hap_supported = (cpuid_edx(0x8000000A) & 1);
+    svm_function_table.hap_supported = cpu_has_svm_npt;
 #endif
 
     hvm_enable(&svm_function_table);
@@ -1810,6 +1815,26 @@ static void svm_do_msr_access(
             msr_content = 0;
             break;
 
+        case MSR_IA32_DEBUGCTLMSR:
+            msr_content = vmcb->debugctlmsr;
+            break;
+
+        case MSR_IA32_LASTBRANCHFROMIP:
+            msr_content = vmcb->lastbranchfromip;
+            break;
+
+        case MSR_IA32_LASTBRANCHTOIP:
+            msr_content = vmcb->lastbranchtoip;
+            break;
+
+        case MSR_IA32_LASTINTFROMIP:
+            msr_content = vmcb->lastintfromip;
+            break;
+
+        case MSR_IA32_LASTINTTOIP:
+            msr_content = vmcb->lastinttoip;
+            break;
+
         default:
             if ( rdmsr_hypervisor_regs(ecx, &eax, &edx) ||
                  rdmsr_safe(ecx, eax, edx) == 0 )
@@ -1850,6 +1875,34 @@ static void svm_do_msr_access(
 
         case MSR_K8_VM_HSAVE_PA:
             svm_inject_exception(v, TRAP_gp_fault, 1, 0);
+            break;
+
+        case MSR_IA32_DEBUGCTLMSR:
+            vmcb->debugctlmsr = msr_content;
+            if ( !msr_content || !cpu_has_svm_lbrv )
+                break;
+            vmcb->lbr_control.fields.enable = 1;
+            svm_disable_intercept_for_msr(v, MSR_IA32_DEBUGCTLMSR);
+            svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHFROMIP);
+            svm_disable_intercept_for_msr(v, MSR_IA32_LASTBRANCHTOIP);
+            svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTFROMIP);
+            svm_disable_intercept_for_msr(v, MSR_IA32_LASTINTTOIP);
+            break;
+
+        case MSR_IA32_LASTBRANCHFROMIP:
+            vmcb->lastbranchfromip = msr_content;
+            break;
+
+        case MSR_IA32_LASTBRANCHTOIP:
+            vmcb->lastbranchtoip = msr_content;
+            break;
+
+        case MSR_IA32_LASTINTFROMIP:
+            vmcb->lastintfromip = msr_content;
+            break;
+
+        case MSR_IA32_LASTINTTOIP:
+            vmcb->lastinttoip = msr_content;
             break;
 
         default:
diff -r f6a06b2eefc5 -r 4746c8c9372f xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Thu Oct 11 19:23:40 2007 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Fri Oct 12 10:19:55 2007 +0100
@@ -80,8 +80,10 @@ struct host_save_area *alloc_host_save_a
     return hsa;
 }
 
-static void disable_intercept_for_msr(char *msr_bitmap, u32 msr)
-{
+void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr)
+{
+    char *msr_bitmap = v->arch.hvm_svm.msrpm;
+
     /*
      * See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address).
      */
@@ -142,16 +144,16 @@ static int construct_vmcb(struct vcpu *v
         return -ENOMEM;
     memset(arch_svm->msrpm, 0xff, MSRPM_SIZE);
 
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_FS_BASE);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_GS_BASE);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_SHADOW_GS_BASE);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_CSTAR);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_LSTAR);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_STAR);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_SYSCALL_MASK);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_CS);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_ESP);
-    disable_intercept_for_msr((char *)arch_svm->msrpm, MSR_IA32_SYSENTER_EIP);
+    svm_disable_intercept_for_msr(v, MSR_FS_BASE);
+    svm_disable_intercept_for_msr(v, MSR_GS_BASE);
+    svm_disable_intercept_for_msr(v, MSR_SHADOW_GS_BASE);
+    svm_disable_intercept_for_msr(v, MSR_CSTAR);
+    svm_disable_intercept_for_msr(v, MSR_LSTAR);
+    svm_disable_intercept_for_msr(v, MSR_STAR);
+    svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK);
+    svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
+    svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
+    svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
 
     vmcb->msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm);
     vmcb->iopm_base_pa  = (u64)virt_to_maddr(hvm_io_bitmap);
diff -r f6a06b2eefc5 -r 4746c8c9372f xen/include/asm-x86/hvm/svm/svm.h
--- a/xen/include/asm-x86/hvm/svm/svm.h Thu Oct 11 19:23:40 2007 +0100
+++ b/xen/include/asm-x86/hvm/svm/svm.h Fri Oct 12 10:19:55 2007 +0100
@@ -61,4 +61,16 @@ static inline void svm_vmsave(void *vmcb
         : : "a" (__pa(vmcb)) : "memory" );
 }
 
+extern u32 svm_feature_flags;
+
+#define SVM_FEATURE_NPT     0
+#define SVM_FEATURE_LBRV    1
+#define SVM_FEATURE_SVML    2
+#define SVM_FEATURE_NRIPS   3
+
+#define cpu_has_svm_npt     test_bit(SVM_FEATURE_NPT, &svm_feature_flags)
+#define cpu_has_svm_lbrv    test_bit(SVM_FEATURE_LBRV, &svm_feature_flags)
+#define cpu_has_svm_svml    test_bit(SVM_FEATURE_SVML, &svm_feature_flags)
+#define cpu_has_svm_nrips   test_bit(SVM_FEATURE_NRIPS, &svm_feature_flags)
+
 #endif /* __ASM_X86_HVM_SVM_H__ */
diff -r f6a06b2eefc5 -r 4746c8c9372f xen/include/asm-x86/hvm/svm/vmcb.h
--- a/xen/include/asm-x86/hvm/svm/vmcb.h        Thu Oct 11 19:23:40 2007 +0100
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h        Fri Oct 12 10:19:55 2007 +0100
@@ -355,6 +355,15 @@ typedef union
     } fields;
 } __attribute__ ((packed)) ioio_info_t;
 
+typedef union
+{
+    u64 bytes;
+    struct
+    {
+        u64 enable:1;
+    } fields;
+} __attribute__ ((packed)) lbrctrl_t;
+
 struct vmcb_struct {
     u32 cr_intercepts;          /* offset 0x00 */
     u32 dr_intercepts;          /* offset 0x04 */
@@ -383,7 +392,8 @@ struct vmcb_struct {
     u64 res08[2];
     eventinj_t  eventinj;       /* offset 0xA8 */
     u64 h_cr3;                  /* offset 0xB0 */
-    u64 res09[105];             /* offset 0xB8 pad to save area */
+    lbrctrl_t lbr_control;      /* offset 0xB8 */
+    u64 res09[104];             /* offset 0xC0 pad to save area */
 
     svm_segment_register_t es;      /* offset 1024 */
     svm_segment_register_t cs;
@@ -426,20 +436,21 @@ struct vmcb_struct {
     u64 pdpe2;
     u64 pdpe3;
     u64 g_pat;
-    u64 res16[50];
-    u64 res17[128];
-    u64 res18[128];
+    u64 debugctlmsr;
+    u64 lastbranchfromip;
+    u64 lastbranchtoip;
+    u64 lastintfromip;
+    u64 lastinttoip;
+    u64 res16[301];
 } __attribute__ ((packed));
-
 
 struct arch_svm_struct {
     struct vmcb_struct *vmcb;
-    u64                 vmcb_pa;
-    u64                 asid_generation; /* ASID tracking, moved here to
-                                            prevent cacheline misses. */
-    u32                *msrpm;
-    int                 launch_core;
-    bool_t              vmcb_in_sync;     /* VMCB sync'ed with VMSAVE? */
+    u64    vmcb_pa;
+    u64    asid_generation; /* ASID tracking, moved here for cache locality. */
+    char  *msrpm;
+    int    launch_core;
+    bool_t vmcb_in_sync;    /* VMCB sync'ed with VMSAVE? */
 };
 
 struct vmcb_struct *alloc_vmcb(void);
@@ -450,6 +461,8 @@ void svm_destroy_vmcb(struct vcpu *v);
 void svm_destroy_vmcb(struct vcpu *v);
 
 void setup_vmcb_dump(void);
+
+void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr);
 
 #endif /* ASM_X86_HVM_SVM_VMCS_H__ */
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] svm: last branch recording MSR emulation, Xen patchbot-unstable <=