diff -r 04e81a65d27e xen/arch/x86/cpu/amd.c --- a/xen/arch/x86/cpu/amd.c Thu Jun 10 22:39:52 2010 +0100 +++ b/xen/arch/x86/cpu/amd.c Fri Jun 11 11:31:13 2010 +0200 @@ -235,18 +235,18 @@ int force_mwait __cpuinitdata; static void disable_c1e(void *unused) { - u32 lo, hi; + uint64_t msr_content; /* * Disable C1E mode, as the APIC timer stops in that mode. * The MSR does not exist in all FamilyF CPUs (only Rev F and above), * but we safely catch the #GP in that case. */ - if ((rdmsr_safe(MSR_K8_ENABLE_C1E, lo, hi) == 0) && - (lo & (3u << 27)) && - (wrmsr_safe(MSR_K8_ENABLE_C1E, lo & ~(3u << 27), hi) != 0)) - printk(KERN_ERR "Failed to disable C1E on CPU#%u (%08x)\n", - smp_processor_id(), lo); + if ((rdmsr_safe(MSR_K8_ENABLE_C1E, msr_content) == 0) && + (msr_content & (3u << 27)) && + (wrmsr_safe(MSR_K8_ENABLE_C1E, msr_content & ~(3u << 27)) != 0)) + printk(KERN_ERR "Failed to disable C1E on CPU#%u (%16"PRIx64")\n", + smp_processor_id(), msr_content); } static void check_disable_c1e(unsigned int port, u8 value) diff -r 04e81a65d27e xen/arch/x86/cpu/mtrr/generic.c --- a/xen/arch/x86/cpu/mtrr/generic.c Thu Jun 10 22:39:52 2010 +0100 +++ b/xen/arch/x86/cpu/mtrr/generic.c Fri Jun 11 11:31:13 2010 +0200 @@ -107,7 +107,10 @@ void __init mtrr_state_warn(void) worth it because the best error handling is to ignore it. */ void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) { - if (wrmsr_safe(msr, a, b) < 0) + uint64_t msr_content; + + msr_content = ((uint64_t)b << 32) | a; + if (wrmsr_safe(msr, msr_content) < 0) printk(KERN_ERR "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", smp_processor_id(), msr, a, b); diff -r 04e81a65d27e xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Thu Jun 10 22:39:52 2010 +0100 +++ b/xen/arch/x86/hvm/svm/svm.c Fri Jun 11 11:31:13 2010 +0200 @@ -857,14 +857,14 @@ static void svm_init_erratum_383(struct static int svm_cpu_up(void) { - u32 eax, edx, phys_hsa_lo, phys_hsa_hi; - u64 phys_hsa; + u32 phys_hsa_lo, phys_hsa_hi; + uint64_t phys_hsa, msr_content; int rc, cpu = smp_processor_id(); struct cpuinfo_x86 *c = &cpu_data[cpu]; /* Check whether SVM feature is disabled in BIOS */ - rdmsr(MSR_K8_VM_CR, eax, edx); - if ( eax & K8_VMCR_SVME_DISABLE ) + rdmsrl(MSR_K8_VM_CR, msr_content); + if ( msr_content & K8_VMCR_SVME_DISABLE ) { printk("CPU%d: AMD SVM Extension is disabled in BIOS.\n", cpu); return -EINVAL; @@ -892,15 +892,14 @@ static int svm_cpu_up(void) * Check whether EFER.LMSLE can be written. * Unfortunately there's no feature bit defined for this. */ - eax = read_efer(); - edx = read_efer() >> 32; - if ( wrmsr_safe(MSR_EFER, eax | EFER_LMSLE, edx) == 0 ) - rdmsr(MSR_EFER, eax, edx); - if ( eax & EFER_LMSLE ) + msr_content = read_efer(); + if ( wrmsr_safe(MSR_EFER, msr_content | EFER_LMSLE) == 0 ) + rdmsrl(MSR_EFER, msr_content); + if ( msr_content & EFER_LMSLE ) { if ( c == &boot_cpu_data ) cpu_has_lmsl = 1; - wrmsr(MSR_EFER, eax ^ EFER_LMSLE, edx); + wrmsrl(MSR_EFER, msr_content ^ EFER_LMSLE); } else { @@ -1033,7 +1032,6 @@ static void svm_dr_access(struct vcpu *v static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) { - u32 eax, edx; struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; @@ -1111,11 +1109,8 @@ static int svm_msr_read_intercept(unsign rdmsr_hypervisor_regs(msr, msr_content) ) break; - if ( rdmsr_safe(msr, eax, edx) == 0 ) - { - *msr_content = ((uint64_t)edx << 32) | eax; + if ( rdmsr_safe(msr, *msr_content) == 0 ) break; - } goto gpf; } diff -r 04e81a65d27e xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Thu Jun 10 22:39:52 2010 +0100 +++ b/xen/arch/x86/hvm/vmx/vmx.c Fri Jun 11 11:31:13 2010 +0200 @@ -1809,8 +1809,6 @@ static int is_last_branch_msr(u32 ecx) static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content) { - u32 eax, edx; - HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x", msr); switch ( msr ) @@ -1866,11 +1864,8 @@ static int vmx_msr_read_intercept(unsign rdmsr_hypervisor_regs(msr, msr_content) ) break; - if ( rdmsr_safe(msr, eax, edx) == 0 ) - { - *msr_content = ((uint64_t)edx << 32) | eax; + if ( rdmsr_safe(msr, *msr_content) == 0 ) break; - } goto gp_fault; } diff -r 04e81a65d27e xen/arch/x86/traps.c --- a/xen/arch/x86/traps.c Thu Jun 10 22:39:52 2010 +0100 +++ b/xen/arch/x86/traps.c Fri Jun 11 11:31:13 2010 +0200 @@ -1716,8 +1716,7 @@ static int emulate_privileged_op(struct unsigned long code_base, code_limit; char io_emul_stub[32]; void (*io_emul)(struct cpu_user_regs *) __attribute__((__regparm__(1))); - uint32_t l, h; - uint64_t val; + uint64_t val, msr_content; if ( !read_descriptor(regs->cs, v, regs, &code_base, &code_limit, &ar, @@ -2185,32 +2184,32 @@ static int emulate_privileged_op(struct break; case 0x30: /* WRMSR */ { - u32 eax = regs->eax; - u32 edx = regs->edx; - u64 val = ((u64)edx << 32) | eax; + uint32_t eax = regs->eax; + uint32_t edx = regs->edx; + msr_content = ((uint64_t)edx << 32) | eax; switch ( (u32)regs->ecx ) { #ifdef CONFIG_X86_64 case MSR_FS_BASE: if ( is_pv_32on64_vcpu(v) ) goto fail; - if ( wrmsr_safe(MSR_FS_BASE, eax, edx) ) + if ( wrmsr_safe(MSR_FS_BASE, msr_content) ) goto fail; - v->arch.guest_context.fs_base = val; + v->arch.guest_context.fs_base = msr_content; break; case MSR_GS_BASE: if ( is_pv_32on64_vcpu(v) ) goto fail; - if ( wrmsr_safe(MSR_GS_BASE, eax, edx) ) + if ( wrmsr_safe(MSR_GS_BASE, msr_content) ) goto fail; - v->arch.guest_context.gs_base_kernel = val; + v->arch.guest_context.gs_base_kernel = msr_content; break; case MSR_SHADOW_GS_BASE: if ( is_pv_32on64_vcpu(v) ) goto fail; - if ( wrmsr_safe(MSR_SHADOW_GS_BASE, eax, edx) ) + if ( wrmsr_safe(MSR_SHADOW_GS_BASE, msr_content) ) goto fail; - v->arch.guest_context.gs_base_user = val; + v->arch.guest_context.gs_base_user = msr_content; break; #endif case MSR_K7_FID_VID_STATUS: @@ -2230,7 +2229,7 @@ static int emulate_privileged_op(struct goto fail; if ( !is_cpufreq_controller(v->domain) ) break; - if ( wrmsr_safe(regs->ecx, eax, edx) != 0 ) + if ( wrmsr_safe(regs->ecx, msr_content) != 0 ) goto fail; break; case MSR_AMD64_NB_CFG: @@ -2239,11 +2238,11 @@ static int emulate_privileged_op(struct goto fail; if ( !IS_PRIV(v->domain) ) break; - if ( (rdmsr_safe(MSR_AMD64_NB_CFG, l, h) != 0) || - (eax != l) || - ((edx ^ h) & ~(1 << (AMD64_NB_CFG_CF8_EXT_ENABLE_BIT - 32))) ) + if ( (rdmsr_safe(MSR_AMD64_NB_CFG, val) != 0) || + (eax != (uint32_t)val) || + ((edx ^ (val >> 32)) & ~(1 << (AMD64_NB_CFG_CF8_EXT_ENABLE_BIT - 32))) ) goto invalid; - if ( wrmsr_safe(MSR_AMD64_NB_CFG, eax, edx) != 0 ) + if ( wrmsr_safe(MSR_AMD64_NB_CFG, msr_content) != 0 ) goto fail; break; case MSR_FAM10H_MMIO_CONF_BASE: @@ -2252,15 +2251,15 @@ static int emulate_privileged_op(struct goto fail; if ( !IS_PRIV(v->domain) ) break; - if ( (rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, l, h) != 0) || - (((((u64)h << 32) | l) ^ val) & + if ( (rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, val) != 0) || + ((val ^ msr_content) & ~( FAM10H_MMIO_CONF_ENABLE | (FAM10H_MMIO_CONF_BUSRANGE_MASK << FAM10H_MMIO_CONF_BUSRANGE_SHIFT) | ((u64)FAM10H_MMIO_CONF_BASE_MASK << FAM10H_MMIO_CONF_BASE_SHIFT))) ) goto invalid; - if ( wrmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, eax, edx) != 0 ) + if ( wrmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, msr_content) != 0 ) goto fail; break; case MSR_IA32_MPERF: @@ -2270,7 +2269,7 @@ static int emulate_privileged_op(struct goto fail; if ( !is_cpufreq_controller(v->domain) ) break; - if ( wrmsr_safe(regs->ecx, eax, edx) != 0 ) + if ( wrmsr_safe(regs->ecx, msr_content) != 0 ) goto fail; break; case MSR_IA32_THERM_CONTROL: @@ -2278,25 +2277,25 @@ static int emulate_privileged_op(struct goto fail; if ( (v->domain->domain_id != 0) || !v->domain->is_pinned ) break; - if ( wrmsr_safe(regs->ecx, eax, edx) != 0 ) + if ( wrmsr_safe(regs->ecx, msr_content) != 0 ) goto fail; break; default: - if ( wrmsr_hypervisor_regs(regs->ecx, val) ) + if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) ) break; - rc = vmce_wrmsr(regs->ecx, val); + rc = vmce_wrmsr(regs->ecx, msr_content); if ( rc < 0 ) goto fail; if ( rc ) break; - if ( (rdmsr_safe(regs->ecx, l, h) != 0) || - (eax != l) || (edx != h) ) + if ( (rdmsr_safe(regs->ecx, val) != 0) || + (eax != (uint32_t)val) || (edx != (uint32_t)(val >> 32)) ) invalid: gdprintk(XENLOG_WARNING, "Domain attempted WRMSR %p from " - "%08x:%08x to %08x:%08x.\n", - _p(regs->ecx), h, l, edx, eax); + "0x%16"PRIx64" to 0x%16"PRIx64".\n", + _p(regs->ecx), val, msr_content); break; } break; @@ -2355,12 +2354,16 @@ static int emulate_privileged_op(struct regs->eax = regs->edx = 0; break; } - if ( rdmsr_safe(regs->ecx, regs->eax, regs->edx) != 0 ) + if ( rdmsr_safe(regs->ecx, msr_content) != 0 ) goto fail; + regs->eax = (uint32_t)msr_content; + regs->edx = (uint32_t)(msr_content >> 32); break; case MSR_IA32_MISC_ENABLE: - if ( rdmsr_safe(regs->ecx, regs->eax, regs->edx) ) + if ( rdmsr_safe(regs->ecx, msr_content) ) goto fail; + regs->eax = (uint32_t)msr_content; + regs->edx = (uint32_t)(msr_content >> 32); regs->eax &= ~(MSR_IA32_MISC_ENABLE_PERF_AVAIL | MSR_IA32_MISC_ENABLE_MONITOR_ENABLE); regs->eax |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL | @@ -2387,8 +2390,10 @@ static int emulate_privileged_op(struct /* Everyone can read the MSR space. */ /* gdprintk(XENLOG_WARNING,"Domain attempted RDMSR %p.\n", _p(regs->ecx));*/ - if ( rdmsr_safe(regs->ecx, regs->eax, regs->edx) ) + if ( rdmsr_safe(regs->ecx, msr_content) ) goto fail; + regs->eax = (uint32_t)msr_content; + regs->edx = (uint32_t)(msr_content >> 32); break; } break; diff -r 04e81a65d27e xen/arch/x86/x86_64/mm.c --- a/xen/arch/x86/x86_64/mm.c Thu Jun 10 22:39:52 2010 +0100 +++ b/xen/arch/x86/x86_64/mm.c Fri Jun 11 11:31:13 2010 +0200 @@ -1079,21 +1079,21 @@ long do_set_segment_base(unsigned int wh switch ( which ) { case SEGBASE_FS: - if ( wrmsr_safe(MSR_FS_BASE, base, base>>32) ) + if ( wrmsr_safe(MSR_FS_BASE, base) ) ret = -EFAULT; else v->arch.guest_context.fs_base = base; break; case SEGBASE_GS_USER: - if ( wrmsr_safe(MSR_SHADOW_GS_BASE, base, base>>32) ) + if ( wrmsr_safe(MSR_SHADOW_GS_BASE, base) ) ret = -EFAULT; else v->arch.guest_context.gs_base_user = base; break; case SEGBASE_GS_KERNEL: - if ( wrmsr_safe(MSR_GS_BASE, base, base>>32) ) + if ( wrmsr_safe(MSR_GS_BASE, base) ) ret = -EFAULT; else v->arch.guest_context.gs_base_kernel = base; diff -r 04e81a65d27e xen/arch/x86/x86_64/mmconfig-shared.c --- a/xen/arch/x86/x86_64/mmconfig-shared.c Thu Jun 10 22:39:52 2010 +0100 +++ b/xen/arch/x86/x86_64/mmconfig-shared.c Fri Jun 11 11:31:13 2010 +0200 @@ -125,8 +125,8 @@ static const char __init *pci_mmcfg_inte static const char __init *pci_mmcfg_amd_fam10h(void) { - u32 low, high, address; - u64 base, msr; + uint32_t address; + uint64_t base, msr_content; int i; unsigned segnbits = 0, busnbits; @@ -134,20 +134,17 @@ static const char __init *pci_mmcfg_amd_ return NULL; address = MSR_FAM10H_MMIO_CONF_BASE; - if (rdmsr_safe(address, low, high)) + if (rdmsr_safe(address, msr_content)) return NULL; - msr = high; - msr <<= 32; - msr |= low; - /* mmconfig is not enable */ - if (!(msr & FAM10H_MMIO_CONF_ENABLE)) + if (!(msr_content & FAM10H_MMIO_CONF_ENABLE)) return NULL; - base = msr & (FAM10H_MMIO_CONF_BASE_MASK<> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & + busnbits = (msr_content >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & FAM10H_MMIO_CONF_BUSRANGE_MASK; /* diff -r 04e81a65d27e xen/include/asm-x86/msr.h --- a/xen/include/asm-x86/msr.h Thu Jun 10 22:39:52 2010 +0100 +++ b/xen/include/asm-x86/msr.h Fri Jun 11 11:31:13 2010 +0200 @@ -6,7 +6,9 @@ #ifndef __ASSEMBLY__ #include +#include #include +#include #define rdmsr(msr,val1,val2) \ __asm__ __volatile__("rdmsr" \ @@ -34,8 +36,9 @@ static inline void wrmsrl(unsigned int m } /* rdmsr with exception handling */ -#define rdmsr_safe(msr,val1,val2) ({\ +#define rdmsr_safe(msr,val) ({\ int _rc; \ + uint32_t val1, val2; \ __asm__ __volatile__( \ "1: rdmsr\n2:\n" \ ".section .fixup,\"ax\"\n" \ @@ -47,23 +50,30 @@ static inline void wrmsrl(unsigned int m ".previous\n" \ : "=a" (val1), "=d" (val2), "=&r" (_rc) \ : "c" (msr), "2" (0), "i" (-EFAULT)); \ + val = val2 | ((uint64_t)val1 << 32); \ _rc; }) /* wrmsr with exception handling */ -#define wrmsr_safe(msr,val1,val2) ({\ - int _rc; \ - __asm__ __volatile__( \ - "1: wrmsr\n2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %5,%0\n; jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " "__FIXUP_ALIGN"\n" \ - " "__FIXUP_WORD" 1b,3b\n" \ - ".previous\n" \ - : "=&r" (_rc) \ - : "c" (msr), "a" (val1), "d" (val2), "0" (0), "i" (-EFAULT)); \ - _rc; }) +static inline int wrmsr_safe(unsigned int msr, uint64_t val) +{ + int _rc; + uint32_t lo, hi; + lo = (uint32_t)val; + hi = (uint32_t)(val >> 32); + + __asm__ __volatile__( + "1: wrmsr\n2:\n" + ".section .fixup,\"ax\"\n" + "3: movl %5,%0\n; jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " "__FIXUP_ALIGN"\n" + " "__FIXUP_WORD" 1b,3b\n" + ".previous\n" + : "=&r" (_rc) + : "c" (msr), "a" (lo), "d" (hi), "0" (0), "i" (-EFAULT)); + return _rc; +} #define rdtsc(low,high) \ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))