WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86 hvm: msr-handling cleanup

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86 hvm: msr-handling cleanup
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 10 Jun 2010 02:25:20 -0700
Delivery-date: Thu, 10 Jun 2010 02:27:13 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1276154647 -3600
# Node ID f70c8a65653c589e283d4100232e28d697b109c6
# Parent  875a7ba3247e57193c313b31ad85799b5cf5f2f4
x86 hvm: msr-handling cleanup

Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
---
 xen/arch/x86/hvm/emulate.c        |   19 ----
 xen/arch/x86/hvm/hvm.c            |   67 ++++++--------
 xen/arch/x86/hvm/svm/svm.c        |   96 ++++++++++-----------
 xen/arch/x86/hvm/vmx/vmx.c        |  171 ++++++++++++++++++--------------------
 xen/arch/x86/oprofile/nmi_int.c   |   37 +++-----
 xen/include/asm-x86/hvm/hvm.h     |    4 
 xen/include/asm-x86/hvm/support.h |    4 
 xen/include/asm-x86/xenoprof.h    |    4 
 8 files changed, 186 insertions(+), 216 deletions(-)

diff -r 875a7ba3247e -r f70c8a65653c xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Thu Jun 10 08:19:58 2010 +0100
+++ b/xen/arch/x86/hvm/emulate.c        Thu Jun 10 08:24:07 2010 +0100
@@ -825,16 +825,7 @@ static int hvmemul_read_msr(
     uint64_t *val,
     struct x86_emulate_ctxt *ctxt)
 {
-    struct cpu_user_regs _regs;
-    int rc;
-
-    _regs.ecx = (uint32_t)reg;
-
-    if ( (rc = hvm_msr_read_intercept(&_regs)) != X86EMUL_OKAY )
-        return rc;
-
-    *val = ((uint64_t)(uint32_t)_regs.edx << 32) | (uint32_t)_regs.eax;
-    return X86EMUL_OKAY;
+    return hvm_msr_read_intercept(reg, val);
 }
 
 static int hvmemul_write_msr(
@@ -842,13 +833,7 @@ static int hvmemul_write_msr(
     uint64_t val,
     struct x86_emulate_ctxt *ctxt)
 {
-    struct cpu_user_regs _regs;
-
-    _regs.edx = (uint32_t)(val >> 32);
-    _regs.eax = (uint32_t)val;
-    _regs.ecx = (uint32_t)reg;
-
-    return hvm_msr_write_intercept(&_regs);
+    return hvm_msr_write_intercept(reg, val);
 }
 
 static int hvmemul_wbinvd(
diff -r 875a7ba3247e -r f70c8a65653c xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Jun 10 08:19:58 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Thu Jun 10 08:24:07 2010 +0100
@@ -2010,10 +2010,8 @@ void hvm_rdtsc_intercept(struct cpu_user
     regs->edx = (uint32_t)(tsc >> 32);
 }
 
-int hvm_msr_read_intercept(struct cpu_user_regs *regs)
-{
-    uint32_t ecx = regs->ecx;
-    uint64_t msr_content = 0;
+int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
+{
     struct vcpu *v = current;
     uint64_t *var_range_base, *fixed_range_base;
     int index, mtrr;
@@ -2026,58 +2024,58 @@ int hvm_msr_read_intercept(struct cpu_us
     hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
     mtrr = !!(cpuid[3] & bitmaskof(X86_FEATURE_MTRR));
 
-    switch ( ecx )
+    switch ( msr )
     {
     case MSR_IA32_TSC:
-        msr_content = hvm_get_guest_tsc(v);
+        *msr_content = hvm_get_guest_tsc(v);
         break;
 
     case MSR_TSC_AUX:
-        msr_content = hvm_msr_tsc_aux(v);
+        *msr_content = hvm_msr_tsc_aux(v);
         break;
 
     case MSR_IA32_APICBASE:
-        msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
+        *msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
         break;
 
     case MSR_IA32_CR_PAT:
-        msr_content = v->arch.hvm_vcpu.pat_cr;
+        *msr_content = v->arch.hvm_vcpu.pat_cr;
         break;
 
     case MSR_MTRRcap:
         if ( !mtrr )
             goto gp_fault;
-        msr_content = v->arch.hvm_vcpu.mtrr.mtrr_cap;
+        *msr_content = v->arch.hvm_vcpu.mtrr.mtrr_cap;
         break;
     case MSR_MTRRdefType:
         if ( !mtrr )
             goto gp_fault;
-        msr_content = v->arch.hvm_vcpu.mtrr.def_type
+        *msr_content = v->arch.hvm_vcpu.mtrr.def_type
                         | (v->arch.hvm_vcpu.mtrr.enabled << 10);
         break;
     case MSR_MTRRfix64K_00000:
         if ( !mtrr )
             goto gp_fault;
-        msr_content = fixed_range_base[0];
+        *msr_content = fixed_range_base[0];
         break;
     case MSR_MTRRfix16K_80000:
     case MSR_MTRRfix16K_A0000:
         if ( !mtrr )
             goto gp_fault;
-        index = regs->ecx - MSR_MTRRfix16K_80000;
-        msr_content = fixed_range_base[index + 1];
+        index = msr - MSR_MTRRfix16K_80000;
+        *msr_content = fixed_range_base[index + 1];
         break;
     case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000:
         if ( !mtrr )
             goto gp_fault;
-        index = regs->ecx - MSR_MTRRfix4K_C0000;
-        msr_content = fixed_range_base[index + 3];
+        index = msr - MSR_MTRRfix4K_C0000;
+        *msr_content = fixed_range_base[index + 3];
         break;
     case MSR_IA32_MTRR_PHYSBASE0...MSR_IA32_MTRR_PHYSMASK7:
         if ( !mtrr )
             goto gp_fault;
-        index = regs->ecx - MSR_IA32_MTRR_PHYSBASE0;
-        msr_content = var_range_base[index];
+        index = msr - MSR_IA32_MTRR_PHYSBASE0;
+        *msr_content = var_range_base[index];
         break;
 
     case MSR_K8_ENABLE_C1E:
@@ -2087,22 +2085,21 @@ int hvm_msr_read_intercept(struct cpu_us
           * has been migrated to an Intel host. This fixes a guest crash
           * in this case.
           */
-         msr_content = 0;
+         *msr_content = 0;
          break;
 
     default:
-        ret = vmce_rdmsr(ecx, &msr_content);
+        ret = vmce_rdmsr(msr, msr_content);
         if ( ret < 0 )
             goto gp_fault;
         else if ( ret )
             break;
         /* ret == 0, This is not an MCE MSR, see other MSRs */
-        else if (!ret)
-            return hvm_funcs.msr_read_intercept(regs);
-    }
-
-    regs->eax = (uint32_t)msr_content;
-    regs->edx = (uint32_t)(msr_content >> 32);
+        else if (!ret) {
+            return hvm_funcs.msr_read_intercept(msr, msr_content);
+        }
+    }
+
     return X86EMUL_OKAY;
 
 gp_fault:
@@ -2110,10 +2107,8 @@ gp_fault:
     return X86EMUL_EXCEPTION;
 }
 
-int hvm_msr_write_intercept(struct cpu_user_regs *regs)
-{
-    uint32_t ecx = regs->ecx;
-    uint64_t msr_content = (uint32_t)regs->eax | ((uint64_t)regs->edx << 32);
+int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
+{
     struct vcpu *v = current;
     int index, mtrr;
     uint32_t cpuid[4];
@@ -2122,7 +2117,7 @@ int hvm_msr_write_intercept(struct cpu_u
     hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
     mtrr = !!(cpuid[3] & bitmaskof(X86_FEATURE_MTRR));
 
-    switch ( ecx )
+    switch ( msr )
     {
     case MSR_IA32_TSC:
         hvm_set_guest_tsc(v, msr_content);
@@ -2164,7 +2159,7 @@ int hvm_msr_write_intercept(struct cpu_u
     case MSR_MTRRfix16K_A0000:
         if ( !mtrr )
             goto gp_fault;
-        index = regs->ecx - MSR_MTRRfix16K_80000 + 1;
+        index = msr - MSR_MTRRfix16K_80000 + 1;
         if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr,
                                      index, msr_content) )
             goto gp_fault;
@@ -2172,7 +2167,7 @@ int hvm_msr_write_intercept(struct cpu_u
     case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000:
         if ( !mtrr )
             goto gp_fault;
-        index = regs->ecx - MSR_MTRRfix4K_C0000 + 3;
+        index = msr - MSR_MTRRfix4K_C0000 + 3;
         if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr,
                                      index, msr_content) )
             goto gp_fault;
@@ -2181,7 +2176,7 @@ int hvm_msr_write_intercept(struct cpu_u
         if ( !mtrr )
             goto gp_fault;
         if ( !mtrr_var_range_msr_set(&v->arch.hvm_vcpu.mtrr,
-                                     regs->ecx, msr_content) )
+                                     msr, msr_content) )
             goto gp_fault;
         break;
 
@@ -2190,13 +2185,13 @@ int hvm_msr_write_intercept(struct cpu_u
         break;
 
     default:
-        ret = vmce_wrmsr(ecx, msr_content);
+        ret = vmce_wrmsr(msr, msr_content);
         if ( ret < 0 )
             goto gp_fault;
         else if ( ret )
             break;
         else if (!ret)
-            return hvm_funcs.msr_write_intercept(regs);
+            return hvm_funcs.msr_write_intercept(msr, msr_content);
     }
 
     return X86EMUL_OKAY;
diff -r 875a7ba3247e -r f70c8a65653c xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu Jun 10 08:19:58 2010 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu Jun 10 08:24:07 2010 +0100
@@ -106,15 +106,13 @@ static void svm_cpu_down(void)
     write_efer(read_efer() & ~EFER_SVME);
 }
 
-static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
-{
-    u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
-    u32 ecx = regs->ecx;
-
+static enum handler_return
+long_mode_do_msr_write(unsigned int msr, uint64_t msr_content)
+{
     HVM_DBG_LOG(DBG_LEVEL_0, "msr %x msr_content %"PRIx64,
-                ecx, msr_content);
-
-    switch ( ecx )
+                msr, msr_content);
+
+    switch ( msr )
     {
     case MSR_EFER:
         if ( hvm_set_efer(msr_content) )
@@ -1033,27 +1031,26 @@ static void svm_dr_access(struct vcpu *v
     __restore_debug_registers(v);
 }
 
-static int svm_msr_read_intercept(struct cpu_user_regs *regs)
-{
-    u64 msr_content = 0;
-    u32 ecx = regs->ecx, eax, edx;
+static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
+{
+    u32 eax, edx;
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-    switch ( ecx )
+    switch ( msr )
     {
     case MSR_EFER:
-        msr_content = v->arch.hvm_vcpu.guest_efer;
+        *msr_content = v->arch.hvm_vcpu.guest_efer;
         break;
 
     case MSR_IA32_SYSENTER_CS:
-        msr_content = v->arch.hvm_svm.guest_sysenter_cs;
+        *msr_content = v->arch.hvm_svm.guest_sysenter_cs;
         break;
     case MSR_IA32_SYSENTER_ESP:
-        msr_content = v->arch.hvm_svm.guest_sysenter_esp;
+        *msr_content = v->arch.hvm_svm.guest_sysenter_esp;
         break;
     case MSR_IA32_SYSENTER_EIP:
-        msr_content = v->arch.hvm_svm.guest_sysenter_eip;
+        *msr_content = v->arch.hvm_svm.guest_sysenter_eip;
         break;
 
     case MSR_IA32_MC4_MISC: /* Threshold register */
@@ -1062,7 +1059,7 @@ static int svm_msr_read_intercept(struct
          * MCA/MCE: We report that the threshold register is unavailable
          * for OS use (locked by the BIOS).
          */
-        msr_content = 1ULL << 61; /* MC4_MISC.Locked */
+        *msr_content = 1ULL << 61; /* MC4_MISC.Locked */
         break;
 
     case MSR_IA32_EBC_FREQUENCY_ID:
@@ -1071,30 +1068,30 @@ static int svm_msr_read_intercept(struct
          * has been migrated from an Intel host. The value zero is not
          * particularly meaningful, but at least avoids the guest crashing!
          */
-        msr_content = 0;
+        *msr_content = 0;
         break;
 
     case MSR_K8_VM_HSAVE_PA:
         goto gpf;
 
     case MSR_IA32_DEBUGCTLMSR:
-        msr_content = vmcb->debugctlmsr;
+        *msr_content = vmcb->debugctlmsr;
         break;
 
     case MSR_IA32_LASTBRANCHFROMIP:
-        msr_content = vmcb->lastbranchfromip;
+        *msr_content = vmcb->lastbranchfromip;
         break;
 
     case MSR_IA32_LASTBRANCHTOIP:
-        msr_content = vmcb->lastbranchtoip;
+        *msr_content = vmcb->lastbranchtoip;
         break;
 
     case MSR_IA32_LASTINTFROMIP:
-        msr_content = vmcb->lastintfromip;
+        *msr_content = vmcb->lastintfromip;
         break;
 
     case MSR_IA32_LASTINTTOIP:
-        msr_content = vmcb->lastinttoip;
+        *msr_content = vmcb->lastinttoip;
         break;
 
     case MSR_K7_PERFCTR0:
@@ -1105,30 +1102,28 @@ static int svm_msr_read_intercept(struct
     case MSR_K7_EVNTSEL1:
     case MSR_K7_EVNTSEL2:
     case MSR_K7_EVNTSEL3:
-        vpmu_do_rdmsr(ecx, &msr_content);
+        vpmu_do_rdmsr(msr, msr_content);
         break;
 
     default:
 
-        if ( rdmsr_viridian_regs(ecx, &msr_content) ||
-             rdmsr_hypervisor_regs(ecx, &msr_content) )
+        if ( rdmsr_viridian_regs(msr, msr_content) ||
+             rdmsr_hypervisor_regs(msr, msr_content) )
             break;
 
-        if ( rdmsr_safe(ecx, eax, edx) == 0 )
+        if ( rdmsr_safe(msr, eax, edx) == 0 )
         {
-            msr_content = ((uint64_t)edx << 32) | eax;
+            *msr_content = ((uint64_t)edx << 32) | eax;
             break;
         }
 
         goto gpf;
     }
 
-    regs->eax = (uint32_t)msr_content;
-    regs->edx = (uint32_t)(msr_content >> 32);
-
-    HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
-    HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
-                ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
+    HVMTRACE_3D (MSR_READ, msr,
+                (uint32_t)*msr_content, (uint32_t)(*msr_content>>32));
+    HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, msr_value=%"PRIx64,
+                msr, *msr_content);
     return X86EMUL_OKAY;
 
  gpf:
@@ -1136,18 +1131,15 @@ static int svm_msr_read_intercept(struct
     return X86EMUL_EXCEPTION;
 }
 
-static int svm_msr_write_intercept(struct cpu_user_regs *regs)
-{
-    u64 msr_content = 0;
-    u32 ecx = regs->ecx;
+static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
+{
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
-    msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
-
-    HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx);
-
-    switch ( ecx )
+    HVMTRACE_3D(MSR_WRITE, msr,
+               (uint32_t)msr_content, (uint32_t)(msr_content >> 32)); 
+
+    switch ( msr )
     {
     case MSR_K8_VM_HSAVE_PA:
         goto gpf;
@@ -1198,17 +1190,17 @@ static int svm_msr_write_intercept(struc
     case MSR_K7_EVNTSEL1:
     case MSR_K7_EVNTSEL2:
     case MSR_K7_EVNTSEL3:
-        vpmu_do_wrmsr(ecx, msr_content);
+        vpmu_do_wrmsr(msr, msr_content);
         break;
 
     default:
-        if ( wrmsr_viridian_regs(ecx, msr_content) )
+        if ( wrmsr_viridian_regs(msr, msr_content) )
             break;
 
-        switch ( long_mode_do_msr_write(regs) )
+        switch ( long_mode_do_msr_write(msr, msr_content) )
         {
         case HNDL_unhandled:
-            wrmsr_hypervisor_regs(ecx, msr_content);
+            wrmsr_hypervisor_regs(msr, msr_content);
             break;
         case HNDL_exception_raised:
             return X86EMUL_EXCEPTION;
@@ -1229,18 +1221,22 @@ static void svm_do_msr_access(struct cpu
     int rc, inst_len;
     struct vcpu *v = current;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+    uint64_t msr_content;
 
     if ( vmcb->exitinfo1 == 0 )
     {
         if ( (inst_len = __get_instruction_length(v, INSTR_RDMSR)) == 0 )
             return;
-        rc = hvm_msr_read_intercept(regs);
+        rc = hvm_msr_read_intercept(regs->ecx, &msr_content);
+        regs->eax = (uint32_t)msr_content;
+        regs->edx = (uint32_t)(msr_content >> 32);
     }
     else
     {
         if ( (inst_len = __get_instruction_length(v, INSTR_WRMSR)) == 0 )
             return;
-        rc = hvm_msr_write_intercept(regs);
+        msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
+        rc = hvm_msr_write_intercept(regs->ecx, msr_content);
     }
 
     if ( rc == X86EMUL_OKAY )
diff -r 875a7ba3247e -r f70c8a65653c xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 10 08:19:58 2010 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 10 08:24:07 2010 +0100
@@ -69,8 +69,8 @@ static void vmx_cpuid_intercept(
     unsigned int *ecx, unsigned int *edx);
 static void vmx_wbinvd_intercept(void);
 static void vmx_fpu_dirty_intercept(void);
-static int vmx_msr_read_intercept(struct cpu_user_regs *regs);
-static int vmx_msr_write_intercept(struct cpu_user_regs *regs);
+static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
+static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content);
 static void vmx_invlpg_intercept(unsigned long vaddr);
 static void __ept_sync_domain(void *info);
 
@@ -160,70 +160,65 @@ void vmx_save_host_msrs(void)
         set_bit(VMX_INDEX_MSR_ ## address, &host_msr_state->flags);     \
         break
 
-static enum handler_return long_mode_do_msr_read(struct cpu_user_regs *regs)
-{
-    u64 msr_content = 0;
-    u32 ecx = regs->ecx;
+static enum handler_return
+long_mode_do_msr_read(unsigned int msr, uint64_t *msr_content)
+{
     struct vcpu *v = current;
     struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
 
-    switch ( ecx )
+    switch ( msr )
     {
     case MSR_EFER:
-        msr_content = v->arch.hvm_vcpu.guest_efer;
+        *msr_content = v->arch.hvm_vcpu.guest_efer;
         break;
 
     case MSR_FS_BASE:
-        msr_content = __vmread(GUEST_FS_BASE);
+        *msr_content = __vmread(GUEST_FS_BASE);
         break;
 
     case MSR_GS_BASE:
-        msr_content = __vmread(GUEST_GS_BASE);
+        *msr_content = __vmread(GUEST_GS_BASE);
         break;
 
     case MSR_SHADOW_GS_BASE:
-        rdmsrl(MSR_SHADOW_GS_BASE, msr_content);
+        rdmsrl(MSR_SHADOW_GS_BASE, *msr_content);
         break;
 
     case MSR_STAR:
-        msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_STAR];
+        *msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_STAR];
         break;
 
     case MSR_LSTAR:
-        msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_LSTAR];
+        *msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_LSTAR];
         break;
 
     case MSR_CSTAR:
-        msr_content = v->arch.hvm_vmx.cstar;
+        *msr_content = v->arch.hvm_vmx.cstar;
         break;
 
     case MSR_SYSCALL_MASK:
-        msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
+        *msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
         break;
 
     default:
         return HNDL_unhandled;
     }
 
-    HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, ecx, msr_content);
-
-    regs->eax = (u32)(msr_content >>  0);
-    regs->edx = (u32)(msr_content >> 32);
+    HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, msr, *msr_content);
 
     return HNDL_done;
 }
 
-static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
-{
-    u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
-    u32 ecx = regs->ecx;
+static enum handler_return
+long_mode_do_msr_write(unsigned int msr, uint64_t msr_content)
+{
     struct vcpu *v = current;
     struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
     struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
 
-    HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, ecx, msr_content);
-
-    switch ( ecx )
+    HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, msr, msr_content);
+
+    switch ( msr )
     {
     case MSR_EFER:
         if ( hvm_set_efer(msr_content) )
@@ -236,9 +231,9 @@ static enum handler_return long_mode_do_
         if ( !is_canonical_address(msr_content) )
             goto uncanonical_address;
 
-        if ( ecx == MSR_FS_BASE )
+        if ( msr == MSR_FS_BASE )
             __vmwrite(GUEST_FS_BASE, msr_content);
-        else if ( ecx == MSR_GS_BASE )
+        else if ( msr == MSR_GS_BASE )
             __vmwrite(GUEST_GS_BASE, msr_content);
         else
             wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
@@ -269,7 +264,7 @@ static enum handler_return long_mode_do_
     return HNDL_done;
 
  uncanonical_address:
-    HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", ecx);
+    HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", msr);
     vmx_inject_hw_exception(TRAP_gp_fault, 0);
  exception_raised:
     return HNDL_exception_raised;
@@ -1813,44 +1808,43 @@ static int is_last_branch_msr(u32 ecx)
     return 0;
 }
 
-static int vmx_msr_read_intercept(struct cpu_user_regs *regs)
-{
-    u64 msr_content = 0;
-    u32 ecx = regs->ecx, eax, edx;
-
-    HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x", ecx);
-
-    switch ( ecx )
+static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
+{
+    u32 eax, edx;
+
+    HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x", msr);
+
+    switch ( msr )
     {
     case MSR_IA32_SYSENTER_CS:
-        msr_content = (u32)__vmread(GUEST_SYSENTER_CS);
+        *msr_content = (u32)__vmread(GUEST_SYSENTER_CS);
         break;
     case MSR_IA32_SYSENTER_ESP:
-        msr_content = __vmread(GUEST_SYSENTER_ESP);
+        *msr_content = __vmread(GUEST_SYSENTER_ESP);
         break;
     case MSR_IA32_SYSENTER_EIP:
-        msr_content = __vmread(GUEST_SYSENTER_EIP);
+        *msr_content = __vmread(GUEST_SYSENTER_EIP);
         break;
     case MSR_IA32_DEBUGCTLMSR:
-        msr_content = __vmread(GUEST_IA32_DEBUGCTL);
+        *msr_content = __vmread(GUEST_IA32_DEBUGCTL);
 #ifdef __i386__
-        msr_content |= (u64)__vmread(GUEST_IA32_DEBUGCTL_HIGH) << 32;
+        *msr_content |= (u64)__vmread(GUEST_IA32_DEBUGCTL_HIGH) << 32;
 #endif
         break;
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
         goto gp_fault;
     case MSR_IA32_MISC_ENABLE:
-        rdmsrl(MSR_IA32_MISC_ENABLE, msr_content);
+        rdmsrl(MSR_IA32_MISC_ENABLE, *msr_content);
         /* Debug Trace Store is not supported. */
-        msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
+        *msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
                        MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
         break;
     default:
-        if ( vpmu_do_rdmsr(ecx, &msr_content) )
+        if ( vpmu_do_rdmsr(msr, msr_content) )
             break;
-        if ( passive_domain_do_rdmsr(regs) )
+        if ( passive_domain_do_rdmsr(msr, msr_content) )
             goto done;
-        switch ( long_mode_do_msr_read(regs) )
+        switch ( long_mode_do_msr_read(msr, msr_content) )
         {
             case HNDL_unhandled:
                 break;
@@ -1860,36 +1854,33 @@ static int vmx_msr_read_intercept(struct
                 goto done;
         }
 
-        if ( vmx_read_guest_msr(ecx, &msr_content) == 0 )
+        if ( vmx_read_guest_msr(msr, msr_content) == 0 )
             break;
 
-        if ( is_last_branch_msr(ecx) )
-        {
-            msr_content = 0;
+        if ( is_last_branch_msr(msr) )
+        {
+            *msr_content = 0;
             break;
         }
 
-        if ( rdmsr_viridian_regs(ecx, &msr_content) ||
-             rdmsr_hypervisor_regs(ecx, &msr_content) )
+        if ( rdmsr_viridian_regs(msr, msr_content) ||
+             rdmsr_hypervisor_regs(msr, msr_content) )
             break;
 
-        if ( rdmsr_safe(ecx, eax, edx) == 0 )
-        {
-            msr_content = ((uint64_t)edx << 32) | eax;
+        if ( rdmsr_safe(msr, eax, edx) == 0 )
+        {
+            *msr_content = ((uint64_t)edx << 32) | eax;
             break;
         }
 
         goto gp_fault;
     }
 
-    regs->eax = (uint32_t)msr_content;
-    regs->edx = (uint32_t)(msr_content >> 32);
-
 done:
-    HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
-    HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
-                ecx, (unsigned long)regs->eax,
-                (unsigned long)regs->edx);
+    HVMTRACE_3D(MSR_READ, msr,
+                (uint32_t)*msr_content, (uint32_t)(*msr_content >> 32));
+    HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, msr_value=0x%"PRIx64,
+                msr, *msr_content);
     return X86EMUL_OKAY;
 
 gp_fault:
@@ -1957,20 +1948,17 @@ void vmx_vlapic_msr_changed(struct vcpu 
     vmx_vmcs_exit(v);
 }
 
-static int vmx_msr_write_intercept(struct cpu_user_regs *regs)
-{
-    u32 ecx = regs->ecx;
-    u64 msr_content;
+static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
+{
     struct vcpu *v = current;
 
-    HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x",
-                ecx, (u32)regs->eax, (u32)regs->edx);
-
-    msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
-
-    HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx);
-
-    switch ( ecx )
+    HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, msr_value=0x%"PRIx64,
+                msr, msr_content);
+
+    HVMTRACE_3D(MSR_WRITE, msr,
+               (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
+
+    switch ( msr )
     {
     case MSR_IA32_SYSENTER_CS:
         __vmwrite(GUEST_SYSENTER_CS, msr_content);
@@ -2000,7 +1988,7 @@ static int vmx_msr_write_intercept(struc
         }
 
         if ( (rc < 0) ||
-             (vmx_add_host_load_msr(ecx) < 0) )
+             (vmx_add_host_load_msr(msr) < 0) )
             vmx_inject_hw_exception(TRAP_machine_check, 0);
         else
         {
@@ -2015,20 +2003,20 @@ static int vmx_msr_write_intercept(struc
     case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
         goto gp_fault;
     default:
-        if ( vpmu_do_wrmsr(ecx, msr_content) )
+        if ( vpmu_do_wrmsr(msr, msr_content) )
             return X86EMUL_OKAY;
-        if ( passive_domain_do_wrmsr(regs) )
+        if ( passive_domain_do_wrmsr(msr, msr_content) )
             return X86EMUL_OKAY;
 
-        if ( wrmsr_viridian_regs(ecx, msr_content) ) 
+        if ( wrmsr_viridian_regs(msr, msr_content) ) 
             break;
 
-        switch ( long_mode_do_msr_write(regs) )
+        switch ( long_mode_do_msr_write(msr, msr_content) )
         {
             case HNDL_unhandled:
-                if ( (vmx_write_guest_msr(ecx, msr_content) != 0) &&
-                     !is_last_branch_msr(ecx) )
-                    wrmsr_hypervisor_regs(ecx, msr_content);
+                if ( (vmx_write_guest_msr(msr, msr_content) != 0) &&
+                     !is_last_branch_msr(msr) )
+                    wrmsr_hypervisor_regs(msr, msr_content);
                 break;
             case HNDL_exception_raised:
                 return X86EMUL_EXCEPTION;
@@ -2572,15 +2560,26 @@ asmlinkage void vmx_vmexit_handler(struc
         vmx_dr_access(exit_qualification, regs);
         break;
     case EXIT_REASON_MSR_READ:
+    {
+        uint64_t msr_content;
         inst_len = __get_instruction_length(); /* Safe: RDMSR */
-        if ( hvm_msr_read_intercept(regs) == X86EMUL_OKAY )
+        if ( hvm_msr_read_intercept(regs->ecx, &msr_content) == X86EMUL_OKAY )
+        {
+            regs->eax = (uint32_t)msr_content;
+            regs->edx = (uint32_t)(msr_content >> 32);
             __update_guest_eip(inst_len);
-        break;
+        }
+        break;
+    }
     case EXIT_REASON_MSR_WRITE:
+    {
+        uint64_t msr_content;
         inst_len = __get_instruction_length(); /* Safe: WRMSR */
-        if ( hvm_msr_write_intercept(regs) == X86EMUL_OKAY )
+        msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
+        if ( hvm_msr_write_intercept(regs->ecx, msr_content) == X86EMUL_OKAY )
             __update_guest_eip(inst_len);
         break;
+    }
 
     case EXIT_REASON_MWAIT_INSTRUCTION:
     case EXIT_REASON_MONITOR_INSTRUCTION:
diff -r 875a7ba3247e -r f70c8a65653c xen/arch/x86/oprofile/nmi_int.c
--- a/xen/arch/x86/oprofile/nmi_int.c   Thu Jun 10 08:19:58 2010 +0100
+++ b/xen/arch/x86/oprofile/nmi_int.c   Thu Jun 10 08:24:07 2010 +0100
@@ -35,14 +35,14 @@ static unsigned long saved_lvtpc[NR_CPUS
 
 static char *cpu_type;
 
-static int passive_domain_msr_op_checks(struct cpu_user_regs *regs ,int 
*typep, int *indexp)
+static int passive_domain_msr_op_checks(unsigned int msr, int *typep, int 
*indexp)
 {
        struct vpmu_struct *vpmu = vcpu_vpmu(current);
        if ( model == NULL )
                return 0;
        if ( model->is_arch_pmu_msr == NULL )
                return 0;
-       if ( !model->is_arch_pmu_msr((u64)regs->ecx, typep, indexp) )
+       if ( !model->is_arch_pmu_msr(msr, typep, indexp) )
                return 0;
 
        if ( !(vpmu->flags & PASSIVE_DOMAIN_ALLOCATED) )
@@ -51,29 +51,24 @@ static int passive_domain_msr_op_checks(
        return 1;
 }
 
-int passive_domain_do_rdmsr(struct cpu_user_regs *regs)
-{
-       u64 msr_content;
+int passive_domain_do_rdmsr(unsigned int msr, uint64_t *msr_content)
+{
        int type, index;
 
-       if ( !passive_domain_msr_op_checks(regs, &type, &index))
-               return 0;
-
-       model->load_msr(current, type, index, &msr_content);
-       regs->eax = msr_content & 0xFFFFFFFF;
-       regs->edx = msr_content >> 32;
-       return 1;
-}
-
-int passive_domain_do_wrmsr(struct cpu_user_regs *regs)
-{
-       u64 msr_content;
+       if ( !passive_domain_msr_op_checks(msr, &type, &index))
+               return 0;
+
+       model->load_msr(current, type, index, msr_content);
+       return 1;
+}
+
+int passive_domain_do_wrmsr(unsigned int msr, uint64_t msr_content)
+{
        int type, index;
 
-       if ( !passive_domain_msr_op_checks(regs, &type, &index))
-               return 0;
-
-       msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+       if ( !passive_domain_msr_op_checks(msr, &type, &index))
+               return 0;
+
        model->save_msr(current, type, index, msr_content);
        return 1;
 }
diff -r 875a7ba3247e -r f70c8a65653c xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Thu Jun 10 08:19:58 2010 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Thu Jun 10 08:24:07 2010 +0100
@@ -138,8 +138,8 @@ struct hvm_function_table {
         unsigned int *ecx, unsigned int *edx);
     void (*wbinvd_intercept)(void);
     void (*fpu_dirty_intercept)(void);
-    int (*msr_read_intercept)(struct cpu_user_regs *regs);
-    int (*msr_write_intercept)(struct cpu_user_regs *regs);
+    int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content);
+    int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content);
     void (*invlpg_intercept)(unsigned long vaddr);
     void (*set_uc_mode)(struct vcpu *v);
     void (*set_info_guest)(struct vcpu *v);
diff -r 875a7ba3247e -r f70c8a65653c xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Thu Jun 10 08:19:58 2010 +0100
+++ b/xen/include/asm-x86/hvm/support.h Thu Jun 10 08:24:07 2010 +0100
@@ -133,7 +133,7 @@ int hvm_set_cr0(unsigned long value);
 int hvm_set_cr0(unsigned long value);
 int hvm_set_cr3(unsigned long value);
 int hvm_set_cr4(unsigned long value);
-int hvm_msr_read_intercept(struct cpu_user_regs *regs);
-int hvm_msr_write_intercept(struct cpu_user_regs *regs);
+int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
+int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content);
 
 #endif /* __ASM_X86_HVM_SUPPORT_H__ */
diff -r 875a7ba3247e -r f70c8a65653c xen/include/asm-x86/xenoprof.h
--- a/xen/include/asm-x86/xenoprof.h    Thu Jun 10 08:19:58 2010 +0100
+++ b/xen/include/asm-x86/xenoprof.h    Thu Jun 10 08:24:07 2010 +0100
@@ -64,8 +64,8 @@ void xenoprof_backtrace(
                  "xenoprof/x86 with autotranslated mode enabled"    \
                  "isn't supported yet\n");                          \
     } while (0)
-int passive_domain_do_rdmsr(struct cpu_user_regs *regs);
-int passive_domain_do_wrmsr(struct cpu_user_regs *regs);
+int passive_domain_do_rdmsr(unsigned int msr, uint64_t *msr_content);
+int passive_domain_do_wrmsr(unsigned int msr, uint64_t msr_content);
 void passive_domain_destroy(struct vcpu *v);
 
 #endif /* __ASM_X86_XENOPROF_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86 hvm: msr-handling cleanup, Xen patchbot-unstable <=