diff -r 7994e7c5991e xen/arch/x86/acpi/cpufreq/cpufreq.c --- a/xen/arch/x86/acpi/cpufreq/cpufreq.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/acpi/cpufreq/cpufreq.c Wed May 26 18:32:03 2010 +0200 @@ -137,13 +137,12 @@ struct drv_cmd { static void do_drv_read(void *drvcmd) { struct drv_cmd *cmd; - u32 h; cmd = (struct drv_cmd *)drvcmd; switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: - rdmsr(cmd->addr.msr.reg, cmd->val, h); + cmd->val = rdmsr(cmd->addr.msr.reg); break; case SYSTEM_IO_CAPABLE: acpi_os_read_port((acpi_io_address)cmd->addr.io.port, @@ -157,15 +156,16 @@ static void do_drv_read(void *drvcmd) static void do_drv_write(void *drvcmd) { struct drv_cmd *cmd; - u32 lo, hi; + uint64_t msr_content; cmd = (struct drv_cmd *)drvcmd; switch (cmd->type) { case SYSTEM_INTEL_MSR_CAPABLE: - rdmsr(cmd->addr.msr.reg, lo, hi); - lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); - wrmsr(cmd->addr.msr.reg, lo, hi); + msr_content = rdmsr(cmd->addr.msr.reg); + msr_content = (msr_content & ~INTEL_MSR_RANGE) + | (cmd->val & INTEL_MSR_RANGE); + wrmsr(cmd->addr.msr.reg, msr_content); break; case SYSTEM_IO_CAPABLE: acpi_os_write_port((acpi_io_address)cmd->addr.io.port, @@ -252,8 +252,8 @@ static void read_measured_perf_ctrs(void { struct perf_pair *readin = _readin; - rdmsr(MSR_IA32_APERF, readin->aperf.split.lo, readin->aperf.split.hi); - rdmsr(MSR_IA32_MPERF, readin->mperf.split.lo, readin->mperf.split.hi); + readin->aperf.whole = rdmsr(MSR_IA32_APERF); + readin->mperf.whole = rdmsr(MSR_IA32_MPERF); } /* diff -r 7994e7c5991e xen/arch/x86/acpi/cpufreq/powernow.c --- a/xen/arch/x86/acpi/cpufreq/powernow.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/acpi/cpufreq/powernow.c Wed May 26 18:32:03 2010 +0200 @@ -77,15 +77,15 @@ static void transition_pstate(void *drvc cmd = (struct drv_cmd *) drvcmd; if (cmd->turbo != CPUFREQ_TURBO_UNSUPPORTED) { - u32 lo, hi; - rdmsr(MSR_K8_HWCR, lo, hi); + uint64_t msr_content; + msr_content = rdmsr(MSR_K8_HWCR); if (cmd->turbo == CPUFREQ_TURBO_ENABLED) - lo &= ~MSR_HWCR_CPBDIS_MASK; + msr_content &= ~MSR_HWCR_CPBDIS_MASK; else - lo |= MSR_HWCR_CPBDIS_MASK; - wrmsr(MSR_K8_HWCR, lo, hi); + msr_content |= MSR_HWCR_CPBDIS_MASK; + wrmsr(MSR_K8_HWCR, msr_content); } - wrmsr(MSR_PSTATE_CTRL, cmd->val, 0); + wrmsr(MSR_PSTATE_CTRL, cmd->val); } static int powernow_cpufreq_target(struct cpufreq_policy *policy, @@ -194,7 +194,8 @@ static int powernow_cpufreq_cpu_init(str struct powernow_cpufreq_data *data; unsigned int result = 0; struct processor_performance *perf; - u32 max_hw_pstate, hi = 0, lo = 0; + u32 max_hw_pstate, hi; + uint64_t msr_content; struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; data = xmalloc(struct powernow_cpufreq_data); @@ -226,7 +227,8 @@ static int powernow_cpufreq_cpu_init(str result = -ENODEV; goto err_unreg; } - rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); + msr_content = rdmsr(MSR_PSTATE_CUR_LIMIT); + hi = (uint32_t)(msr_content >> 32); max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; if (perf->control_register.space_id != perf->status_register.space_id) { diff -r 7994e7c5991e xen/arch/x86/acpi/suspend.c --- a/xen/arch/x86/acpi/suspend.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/acpi/suspend.c Wed May 26 18:32:03 2010 +0200 @@ -25,12 +25,12 @@ void save_rest_processor_state(void) unlazy_fpu(current); #if defined(CONFIG_X86_64) - rdmsrl(MSR_CSTAR, saved_cstar); - rdmsrl(MSR_LSTAR, saved_lstar); + saved_cstar = rdmsr(MSR_CSTAR); + saved_lstar = rdmsr(MSR_LSTAR); if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) { - rdmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp); - rdmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip); + saved_sysenter_esp = rdmsr(MSR_IA32_SYSENTER_ESP); + saved_sysenter_eip = rdmsr(MSR_IA32_SYSENTER_EIP); } #endif } @@ -43,24 +43,23 @@ void restore_rest_processor_state(void) #if defined(CONFIG_X86_64) /* Recover syscall MSRs */ - wrmsrl(MSR_LSTAR, saved_lstar); - wrmsrl(MSR_CSTAR, saved_cstar); - wrmsr(MSR_STAR, 0, (FLAT_RING3_CS32<<16) | __HYPERVISOR_CS); + wrmsr(MSR_LSTAR, saved_lstar); + wrmsr(MSR_CSTAR, saved_cstar); + wrmsr(MSR_STAR, ( (uint64_t)((FLAT_RING3_CS32<<16) | __HYPERVISOR_CS) << 32) ); wrmsr(MSR_SYSCALL_MASK, X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT| - X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_TF, - 0U); + X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_TF); if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) { /* Recover sysenter MSRs */ - wrmsrl(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp); - wrmsrl(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip); - wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS, 0); + wrmsr(MSR_IA32_SYSENTER_ESP, saved_sysenter_esp); + wrmsr(MSR_IA32_SYSENTER_EIP, saved_sysenter_eip); + wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS); } #else /* !defined(CONFIG_X86_64) */ if ( supervisor_mode_kernel && cpu_has_sep ) - wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1, 0); + wrmsr(MSR_IA32_SYSENTER_ESP, &this_cpu(init_tss).esp1); #endif /* Maybe load the debug registers. */ @@ -79,7 +78,7 @@ void restore_rest_processor_state(void) stts(); if (cpu_has_pat) - wrmsrl(MSR_IA32_CR_PAT, host_pat); + wrmsr(MSR_IA32_CR_PAT, host_pat); mtrr_bp_restore(); } diff -r 7994e7c5991e xen/arch/x86/apic.c --- a/xen/arch/x86/apic.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/apic.c Wed May 26 18:32:03 2010 +0200 @@ -342,10 +342,10 @@ void disable_local_APIC(void) apic_write_around(APIC_SPIV, value); if (enabled_via_apicbase) { - unsigned int l, h; - rdmsr(MSR_IA32_APICBASE, l, h); - l &= ~MSR_IA32_APICBASE_ENABLE; - wrmsr(MSR_IA32_APICBASE, l, h); + uint64_t msr_content; + msr_content = rdmsr(MSR_IA32_APICBASE); + msr_content &= ~MSR_IA32_APICBASE_ENABLE; + wrmsr(MSR_IA32_APICBASE, msr_content); } } @@ -705,7 +705,7 @@ int lapic_suspend(void) int lapic_resume(void) { - unsigned int l, h; + uint64_t msr_content; unsigned long flags; int maxlvt; @@ -722,10 +722,10 @@ int lapic_resume(void) */ if ( !x2apic_enabled ) { - rdmsr(MSR_IA32_APICBASE, l, h); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; - wrmsr(MSR_IA32_APICBASE, l, h); + msr_content = rdmsr(MSR_IA32_APICBASE); + msr_content &= ~MSR_IA32_APICBASE_BASE; + msr_content |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; + wrmsr(MSR_IA32_APICBASE, msr_content); } else enable_x2apic(); @@ -814,7 +814,8 @@ custom_param("apic_verbosity", apic_set_ static int __init detect_init_APIC (void) { - u32 h, l, features; + uint64_t msr_content; + u32 features; /* Disabled by kernel option? */ if (enable_local_apic < 0) @@ -851,12 +852,12 @@ static int __init detect_init_APIC (void * software for Intel P6 or later and AMD K7 * (Model > 1) or later. */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (!(l & MSR_IA32_APICBASE_ENABLE)) { + msr_content = rdmsr(MSR_IA32_APICBASE); + if (!(msr_content & MSR_IA32_APICBASE_ENABLE)) { printk("Local APIC disabled by BIOS -- reenabling.\n"); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; - wrmsr(MSR_IA32_APICBASE, l, h); + msr_content &= ~MSR_IA32_APICBASE_BASE; + msr_content |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; + wrmsr(MSR_IA32_APICBASE, msr_content); enabled_via_apicbase = 1; } } @@ -874,9 +875,9 @@ static int __init detect_init_APIC (void mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; /* The BIOS may have set up the APIC at some other address */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (l & MSR_IA32_APICBASE_ENABLE) - mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; + msr_content = rdmsr(MSR_IA32_APICBASE); + if (msr_content & MSR_IA32_APICBASE_ENABLE) + mp_lapic_addr = msr_content & MSR_IA32_APICBASE_BASE; if (nmi_watchdog != NMI_NONE) nmi_watchdog = NMI_LOCAL_APIC; @@ -894,7 +895,7 @@ no_apic: void enable_x2apic(void) { - u32 lo, hi; + uint64_t msr_content; if ( smp_processor_id() == 0 ) { @@ -922,11 +923,12 @@ void enable_x2apic(void) BUG_ON(!x2apic_enabled); /* APs only enable x2apic when BSP did so. */ } - rdmsr(MSR_IA32_APICBASE, lo, hi); - if ( !(lo & MSR_IA32_APICBASE_EXTD) ) + msr_content = rdmsr(MSR_IA32_APICBASE); + if ( !(msr_content & MSR_IA32_APICBASE_EXTD) ) { - lo |= MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD; - wrmsr(MSR_IA32_APICBASE, lo, 0); + msr_content |= MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD; + msr_content &= (uint32_t)msr_content; + wrmsr(MSR_IA32_APICBASE, msr_content); printk("x2APIC mode enabled.\n"); } else diff -r 7994e7c5991e xen/arch/x86/cpu/amd.c --- a/xen/arch/x86/cpu/amd.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/amd.c Wed May 26 18:32:03 2010 +0200 @@ -61,6 +61,7 @@ static inline void wrmsr_amd(unsigned in */ static void __devinit set_cpuidmask(struct cpuinfo_x86 *c) { + uint64_t msr_content; static unsigned int feat_ecx, feat_edx; static unsigned int extfeat_ecx, extfeat_edx; static enum { not_parsed, no_mask, set_mask } status; @@ -146,8 +147,10 @@ static void __devinit set_cpuidmask(stru /* FIXME check if processor supports CPUID masking */ /* AMD processors prior to family 10h required a 32-bit password */ if (c->x86 >= 0x10) { - wrmsr(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx); - wrmsr(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx); + msr_content = (uint64_t)(feat_ecx) << 32 | feat_edx; + wrmsr(MSR_K8_FEATURE_MASK, msr_content); + msr_content = (uint64_t)(extfeat_ecx) << 32 | extfeat_edx; + wrmsr(MSR_K8_EXT_FEATURE_MASK, msr_content); } else if (c->x86 == 0x0f) { wrmsr_amd(MSR_K8_FEATURE_MASK, feat_edx, feat_ecx); wrmsr_amd(MSR_K8_EXT_FEATURE_MASK, extfeat_edx, extfeat_ecx); @@ -235,18 +238,18 @@ int force_mwait __cpuinitdata; static void disable_c1e(void *unused) { - u32 lo, hi; + uint64_t msr_content; /* * Disable C1E mode, as the APIC timer stops in that mode. * The MSR does not exist in all FamilyF CPUs (only Rev F and above), * but we safely catch the #GP in that case. */ - if ((rdmsr_safe(MSR_K8_ENABLE_C1E, lo, hi) == 0) && - (lo & (3u << 27)) && - (wrmsr_safe(MSR_K8_ENABLE_C1E, lo & ~(3u << 27), hi) != 0)) - printk(KERN_ERR "Failed to disable C1E on CPU#%u (%08x)\n", - smp_processor_id(), lo); + if ((rdmsr_safe(MSR_K8_ENABLE_C1E, msr_content) == 0) && + (msr_content & (3u << 27)) && + (wrmsr_safe(MSR_K8_ENABLE_C1E, msr_content & ~(3u << 27)) != 0)) + printk(KERN_ERR "Failed to disable C1E on CPU#%u (%16"PRIx64")\n", + smp_processor_id(), msr_content); } static void check_disable_c1e(unsigned int port, u8 value) @@ -258,13 +261,12 @@ static void check_disable_c1e(unsigned i static void __devinit init_amd(struct cpuinfo_x86 *c) { - u32 l, h; + uint32_t l, h; + uint64_t msr_content; int mbytes = num_physpages >> (20-PAGE_SHIFT); int r; #ifdef CONFIG_SMP - unsigned long long value; - /* Disable TLB flush filter by setting HWCR.FFDIS on K8 * bit 6 of msr C001_0015 * @@ -272,9 +274,9 @@ static void __devinit init_amd(struct cp * Errata 122 for all steppings (F+ have it disabled by default) */ if (c->x86 == 15) { - rdmsrl(MSR_K7_HWCR, value); - value |= 1 << 6; - wrmsrl(MSR_K7_HWCR, value); + msr_content = rdmsr(MSR_K7_HWCR); + msr_content |= 1 << 6; + wrmsr(MSR_K7_HWCR, msr_content); } #endif @@ -353,13 +355,15 @@ static void __devinit init_amd(struct cp if(mbytes>508) mbytes=508; - rdmsr(MSR_K6_WHCR, l, h); - if ((l&0x0000FFFF)==0) { + msr_content = rdmsr(MSR_K6_WHCR); + if ((msr_content & 0x0000FFFF) == 0) { unsigned long flags; - l=(1<<0)|((mbytes/4)<<1); + l = (1 <<0) | ((mbytes/4) << 1); + h = (uint32_t)(msr_content >> 32); local_irq_save(flags); wbinvd(); - wrmsr(MSR_K6_WHCR, l, h); + msr_content = (uint64_t)(h) << 32 | l; + wrmsr(MSR_K6_WHCR, msr_content); local_irq_restore(flags); printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", mbytes); @@ -374,13 +378,15 @@ static void __devinit init_amd(struct cp if(mbytes>4092) mbytes=4092; - rdmsr(MSR_K6_WHCR, l, h); - if ((l&0xFFFF0000)==0) { + msr_content = rdmsr(MSR_K6_WHCR); + if ((msr_content & 0xFFFF0000) == 0) { unsigned long flags; l=((mbytes>>2)<<22)|(1<<16); + h = (uint32_t)(msr_content >> 32); local_irq_save(flags); wbinvd(); - wrmsr(MSR_K6_WHCR, l, h); + msr_content = (uint64_t)(h) << 32 | l; + wrmsr(MSR_K6_WHCR, msr_content); local_irq_restore(flags); printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", mbytes); @@ -408,9 +414,9 @@ static void __devinit init_amd(struct cp if (c->x86_model >= 6 && c->x86_model <= 10) { if (!cpu_has(c, X86_FEATURE_XMM)) { printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); - rdmsr(MSR_K7_HWCR, l, h); - l &= ~0x00008000; - wrmsr(MSR_K7_HWCR, l, h); + msr_content = rdmsr(MSR_K7_HWCR); + msr_content &= ~0x00008000; + wrmsr(MSR_K7_HWCR, msr_content); set_bit(X86_FEATURE_XMM, c->x86_capability); } } @@ -420,11 +426,14 @@ static void __devinit init_amd(struct cp * As per AMD technical note 27212 0.2 */ if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) { - rdmsr(MSR_K7_CLK_CTL, l, h); - if ((l & 0xfff00000) != 0x20000000) { - printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, - ((l & 0x000fffff)|0x20000000)); - wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); + msr_content = rdmsr(MSR_K7_CLK_CTL); + if ((msr_content & 0xfff00000) != 0x20000000) { + printk ("CPU: CLK_CTL MSR was %"PRIx64 + ". Reprogramming to %"PRIx64 + "\n", msr_content, + (msr_content & 0x000fffff)|0x20000000); + msr_content = (msr_content & 0x000fffff) | 0x20000000; + wrmsr(MSR_K7_CLK_CTL, msr_content); } } break; @@ -445,17 +454,18 @@ static void __devinit init_amd(struct cp } if (c->x86 == 15) { - rdmsr(MSR_K7_HWCR, l, h); + msr_content = rdmsr(MSR_K7_HWCR); printk(KERN_INFO "CPU%d: AMD Flush Filter %sabled", - smp_processor_id(), (l & (1<<6)) ? "dis" : "en"); - if ((flush_filter_force > 0) && (l & (1<<6))) { - l &= ~(1<<6); + smp_processor_id(), + (msr_content & (1<<6)) ? "dis" : "en"); + if ((flush_filter_force > 0) && (msr_content & (1<<6))) { + msr_content &= ~(1<<6); printk(" -> Forcibly enabled"); - } else if ((flush_filter_force < 0) && !(l & (1<<6))) { - l |= 1<<6; + } else if ((flush_filter_force < 0) && !(msr_content & (1<<6))) { + msr_content |= 1<<6; printk(" -> Forcibly disabled"); } - wrmsr(MSR_K7_HWCR, l, h); + wrmsr(MSR_K7_HWCR, msr_content); printk("\n"); } diff -r 7994e7c5991e xen/arch/x86/cpu/common.c --- a/xen/arch/x86/cpu/common.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/common.c Wed May 26 18:32:03 2010 +0200 @@ -315,10 +315,10 @@ static void __cpuinit squash_the_stupid_ { if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { /* Disable processor serial number */ - unsigned long lo,hi; - rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi); - lo |= 0x200000; - wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi); + uint64_t msr_content; + msr_content = rdmsr(MSR_IA32_BBL_CR_CTL); + msr_content |= 0x200000; + wrmsr(MSR_IA32_BBL_CR_CTL,msr_content); printk(KERN_NOTICE "CPU serial number disabled.\n"); clear_bit(X86_FEATURE_PN, c->x86_capability); @@ -595,7 +595,7 @@ void __cpuinit cpu_init(void) printk("Initializing CPU#%d\n", cpu); if (cpu_has_pat) - wrmsrl(MSR_IA32_CR_PAT, host_pat); + wrmsr(MSR_IA32_CR_PAT, host_pat); /* Install correct page table. */ write_ptbase(current); diff -r 7994e7c5991e xen/arch/x86/cpu/intel.c --- a/xen/arch/x86/cpu/intel.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/intel.c Wed May 26 18:32:03 2010 +0200 @@ -43,6 +43,7 @@ struct movsl_mask movsl_mask __read_most static void __devinit set_cpuidmask(void) { unsigned int eax, ebx, ecx, edx, model; + uint64_t msr_content; if (!(opt_cpuid_mask_ecx | opt_cpuid_mask_edx)) return; @@ -55,9 +56,9 @@ static void __devinit set_cpuidmask(void return; } - wrmsr(MSR_IA32_CPUID_FEATURE_MASK1, - opt_cpuid_mask_ecx ? : ~0u, - opt_cpuid_mask_edx ? : ~0u); + msr_content = (uint64_t)(opt_cpuid_mask_edx ? : ~0u) << 32; + msr_content |= opt_cpuid_mask_edx ? : ~0u; + wrmsr(MSR_IA32_CPUID_FEATURE_MASK1, msr_content); } void __devinit early_intel_workaround(struct cpuinfo_x86 *c) @@ -75,15 +76,15 @@ void __devinit early_intel_workaround(st */ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) { - unsigned long lo, hi; + uint64_t msr_content; if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { - rdmsr (MSR_IA32_MISC_ENABLE, lo, hi); - if ((lo & (1<<9)) == 0) { + msr_content = rdmsr(MSR_IA32_MISC_ENABLE); + if ((msr_content & (1<<9)) == 0) { printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); - lo |= (1<<9); /* Disable hw prefetching */ - wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); + msr_content |= (1<<9); /* Disable hw prefetching */ + wrmsr (MSR_IA32_MISC_ENABLE, msr_content); } } } diff -r 7994e7c5991e xen/arch/x86/cpu/mcheck/amd_f10.c --- a/xen/arch/x86/cpu/mcheck/amd_f10.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/mcheck/amd_f10.c Wed May 26 18:32:03 2010 +0200 @@ -74,9 +74,9 @@ amd_f10_handler(struct mc_info *mi, uint mc_ext.mc_msr[1].reg = MSR_F10_MC4_MISC2; mc_ext.mc_msr[2].reg = MSR_F10_MC4_MISC3; - mca_rdmsrl(MSR_F10_MC4_MISC1, mc_ext.mc_msr[0].value); - mca_rdmsrl(MSR_F10_MC4_MISC2, mc_ext.mc_msr[1].value); - mca_rdmsrl(MSR_F10_MC4_MISC3, mc_ext.mc_msr[2].value); + mc_ext.mc_msr[0].value = mca_rdmsr(MSR_F10_MC4_MISC1); + mc_ext.mc_msr[1].value = mca_rdmsr(MSR_F10_MC4_MISC2); + mc_ext.mc_msr[2].value = mca_rdmsr(MSR_F10_MC4_MISC3); x86_mcinfo_add(mi, &mc_ext); return MCA_EXTINFO_LOCAL; diff -r 7994e7c5991e xen/arch/x86/cpu/mcheck/amd_k8.c --- a/xen/arch/x86/cpu/mcheck/amd_k8.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/mcheck/amd_k8.c Wed May 26 18:32:03 2010 +0200 @@ -95,8 +95,8 @@ enum mcheck_type amd_k8_mcheck_init(stru mcequirk_amd_apply(quirkflag); } else { /* Enable error reporting of all errors */ - wrmsrl(MSR_IA32_MC0_CTL + 4 * i, 0xffffffffffffffffULL); - wrmsrl(MSR_IA32_MC0_STATUS + 4 * i, 0x0ULL); + mca_wrmsr(MSR_IA32_MCx_CTL(i), 0xffffffffffffffffULL); + mca_wrmsr(MSR_IA32_MCx_STATUS(i), 0x0ULL); break; } } diff -r 7994e7c5991e xen/arch/x86/cpu/mcheck/amd_nonfatal.c --- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c Wed May 26 18:32:03 2010 +0200 @@ -144,7 +144,7 @@ static void mce_amd_work_fn(void *data) uint64_t value; uint32_t counter; - mca_rdmsrl(MSR_IA32_MC4_MISC, value); + value = mca_rdmsr(MSR_IA32_MC4_MISC); /* Only the error counter field is of interest * Bit field is described in AMD K8 BKDG chapter 6.4.5.5 */ @@ -169,7 +169,7 @@ static void mce_amd_work_fn(void *data) value &= ~(0x60FFF00000000ULL); /* Counter enable */ value |= (1ULL << 51); - mca_wrmsrl(MSR_IA32_MC4_MISC, value); + mca_wrmsr(MSR_IA32_MC4_MISC, value); wmb(); } } @@ -212,7 +212,7 @@ void amd_nonfatal_mcheck_init(struct cpu /* hw threshold registers present */ hw_threshold = 1; - rdmsrl(MSR_IA32_MC4_MISC, value); + value = mca_rdmsr(MSR_IA32_MC4_MISC); if (value & (1ULL << 61)) { /* Locked bit */ /* Locked by BIOS. Not available for use */ @@ -233,7 +233,7 @@ void amd_nonfatal_mcheck_init(struct cpu value &= ~(0x60FFF00000000ULL); /* Counter enable */ value |= (1ULL << 51); - wrmsrl(MSR_IA32_MC4_MISC, value); + mca_wrmsr(MSR_IA32_MC4_MISC, value); /* serialize */ wmb(); printk(XENLOG_INFO "MCA: Use hw thresholding to adjust polling frequency\n"); diff -r 7994e7c5991e xen/arch/x86/cpu/mcheck/k7.c --- a/xen/arch/x86/cpu/mcheck/k7.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/mcheck/k7.c Wed May 26 18:32:03 2010 +0200 @@ -20,37 +20,37 @@ static fastcall void k7_machine_check(struct cpu_user_regs * regs, long error_code) { int recover=1; - u32 alow, ahigh, high, low; - u32 mcgstl, mcgsth; + uint64_t msr_content; + uint64_t mcgst; int i; - rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth); - if (mcgstl & (1<<0)) /* Recoverable ? */ + mcgst = mca_rdmsr(MSR_IA32_MCG_STATUS); + if (mcgst & (1ULL<<0)) /* Recoverable ? */ recover=0; - printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", - smp_processor_id(), mcgsth, mcgstl); + printk (KERN_EMERG "CPU %d: Machine Check Exception: %16"PRIx64"\n", + smp_processor_id(), mcgst); for (i=1; imc_status); + mib->mc_status = mca_rdmsr(MSR_IA32_MCx_STATUS(bank)); mib->common.type = MC_TYPE_BANK; mib->common.size = sizeof (struct mcinfo_bank); @@ -150,11 +150,11 @@ static struct mcinfo_bank *mca_init_bank addr = misc = 0; if (mib->mc_status & MCi_STATUS_MISCV) - mca_rdmsrl(MSR_IA32_MC0_MISC + 4 * bank, mib->mc_misc); + mib->mc_misc = mca_rdmsr(MSR_IA32_MCx_MISC(bank)); if (mib->mc_status & MCi_STATUS_ADDRV) { - mca_rdmsrl(MSR_IA32_MC0_ADDR + 4 * bank, mib->mc_addr); + mib->mc_addr = mca_rdmsr(MSR_IA32_MCx_ADDR(bank)); if (mfn_valid(paddr_to_pfn(mib->mc_addr))) { struct domain *d; @@ -167,7 +167,7 @@ static struct mcinfo_bank *mca_init_bank } if (who == MCA_CMCI_HANDLER) { - mca_rdmsrl(MSR_IA32_MC0_CTL2 + bank, mib->mc_ctrl2); + mib->mc_ctrl2 = mca_rdmsr(MSR_IA32_MC0_CTL2 + bank); rdtscll(mib->mc_tsc); } @@ -185,7 +185,7 @@ static int mca_init_global(uint32_t flag memset(mig, 0, sizeof (struct mcinfo_global)); mig->common.type = MC_TYPE_GLOBAL; mig->common.size = sizeof (struct mcinfo_global); - mca_rdmsrl(MSR_IA32_MCG_STATUS, status); + status = mca_rdmsr(MSR_IA32_MCG_STATUS); mig->mc_gstatus = status; mig->mc_domid = mig->mc_vcpuid = -1; mig->mc_flags = flags; @@ -231,7 +231,7 @@ mctelem_cookie_t mcheck_mca_logout(enum int i; enum mca_extinfo cbret = MCA_EXTINFO_IGNORED; - mca_rdmsrl(MSR_IA32_MCG_STATUS, gstatus); + gstatus = mca_rdmsr(MSR_IA32_MCG_STATUS); switch (who) { case MCA_MCE_HANDLER: case MCA_MCE_SCAN: @@ -266,7 +266,7 @@ mctelem_cookie_t mcheck_mca_logout(enum if (!test_bit(i, bankmask)) continue; - mca_rdmsrl(MSR_IA32_MC0_STATUS + i * 4, status); + status = mca_rdmsr(MSR_IA32_MCx_STATUS(i)); if (!(status & MCi_STATUS_VAL)) continue; /* this bank has no valid telemetry */ @@ -316,7 +316,7 @@ mctelem_cookie_t mcheck_mca_logout(enum /* By default, need_clear = 1 */ if (who != MCA_MCE_SCAN && need_clear) /* Clear status */ - mca_wrmsrl(MSR_IA32_MC0_STATUS + 4 * i, 0x0ULL); + mca_wrmsr(MSR_IA32_MCx_STATUS(i), 0x0ULL); else if ( who == MCA_MCE_SCAN && need_clear) set_bit(i, clear_bank); @@ -378,7 +378,7 @@ void mcheck_cmn_handler(struct cpu_user_ /* Read global status; if it does not indicate machine check * in progress then bail as long as we have a valid ip to return to. */ - mca_rdmsrl(MSR_IA32_MCG_STATUS, gstatus); + gstatus = mca_rdmsr(MSR_IA32_MCG_STATUS); ripv = ((gstatus & MCG_STATUS_RIPV) != 0); if (!(gstatus & MCG_STATUS_MCIP) && ripv) { add_taint(TAINT_MACHINE_CHECK); /* questionable */ @@ -397,7 +397,7 @@ void mcheck_cmn_handler(struct cpu_user_ /* Clear MCIP or another #MC will enter shutdown state */ gstatus &= ~MCG_STATUS_MCIP; - mca_wrmsrl(MSR_IA32_MCG_STATUS, gstatus); + mca_wrmsr(MSR_IA32_MCG_STATUS, gstatus); wmb(); /* If no valid errors and our stack is intact, we're done */ @@ -576,10 +576,10 @@ void mcheck_mca_clearbanks(cpu_banks_t b for (i = 0; i < 32 && i < nr_mce_banks; i++) { if (!test_bit(i, bankmask)) continue; - mca_rdmsrl(MSR_IA32_MC0_STATUS + i * 4, status); + status = mca_rdmsr(MSR_IA32_MCx_STATUS(i)); if (!(status & MCi_STATUS_VAL)) continue; - mca_wrmsrl(MSR_IA32_MC0_STATUS + 4 * i, 0x0ULL); + mca_wrmsr(MSR_IA32_MCx_STATUS(i), 0x0ULL); } } @@ -711,10 +711,10 @@ void mcheck_init(struct cpuinfo_x86 *c) /* Don't care banks before firstbank */ memset(h_mci_ctrl, 0xff, sizeof(h_mci_ctrl)); for (i = firstbank; i < nr_mce_banks; i++) - rdmsrl(MSR_IA32_MC0_CTL + 4*i, h_mci_ctrl[i]); + h_mci_ctrl[i] = rdmsr(MSR_IA32_MCx_CTL(i)); } if (g_mcg_cap & MCG_CTL_P) - rdmsrl(MSR_IA32_MCG_CTL, h_mcg_ctl); + h_mcg_ctl = rdmsr(MSR_IA32_MCG_CTL); set_poll_bankmask(c); if (inited != g_type) { @@ -751,25 +751,23 @@ void mcheck_init(struct cpuinfo_x86 *c) u64 mce_cap_init(void) { - u32 l, h; - u64 value; + uint64_t msr_content; - rdmsr(MSR_IA32_MCG_CAP, l, h); - value = ((u64)h << 32) | l; + msr_content = rdmsr(MSR_IA32_MCG_CAP); /* For Guest vMCE usage */ - g_mcg_cap = value & ~MCG_CMCI_P; + g_mcg_cap = msr_content & ~MCG_CMCI_P; - if (l & MCG_CTL_P) /* Control register present ? */ - wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); + if (msr_content & MCG_CTL_P) /* Control register present ? */ + wrmsr(MSR_IA32_MCG_CTL, 0xffffffffffffffffULL); - nr_mce_banks = l & MCG_CAP_COUNT; + nr_mce_banks = msr_content & MCG_CAP_COUNT; if ( nr_mce_banks > MAX_NR_BANKS ) { printk(KERN_WARNING "MCE: exceed max mce banks\n"); g_mcg_cap = (g_mcg_cap & ~MCG_CAP_COUNT) | MAX_NR_BANKS; } - return value; + return msr_content; } static void mcinfo_clear(struct mc_info *mi) @@ -920,7 +918,7 @@ static void do_mc_get_cpu_info(void *v) */ xcp->mc_nmsrvals = __MC_NMSRS; xcp->mc_msrvalues[0].reg = MSR_IA32_MCG_CAP; - rdmsrl(MSR_IA32_MCG_CAP, xcp->mc_msrvalues[0].value); + xcp->mc_msrvalues[0].value = rdmsr(MSR_IA32_MCG_CAP); if (c->cpuid_level >= 1) { cpuid(1, &junk, &ebx, &junk, &junk); @@ -1121,11 +1119,11 @@ static uint64_t x86_mc_hwcr_wren(void) { uint64_t old; - rdmsrl(MSR_K8_HWCR, old); + old = rdmsr(MSR_K8_HWCR); if (!(old & K8_HWCR_MCi_STATUS_WREN)) { uint64_t new = old | K8_HWCR_MCi_STATUS_WREN; - wrmsrl(MSR_K8_HWCR, new); + wrmsr(MSR_K8_HWCR, new); } return old; @@ -1134,7 +1132,7 @@ static uint64_t x86_mc_hwcr_wren(void) static void x86_mc_hwcr_wren_restore(uint64_t hwcr) { if (!(hwcr & K8_HWCR_MCi_STATUS_WREN)) - wrmsrl(MSR_K8_HWCR, hwcr); + wrmsr(MSR_K8_HWCR, hwcr); } static void x86_mc_msrinject(void *data) @@ -1165,7 +1163,7 @@ static void x86_mc_msrinject(void *data) if (intpose) intpose_add(mci->mcinj_cpunr, msr->reg, msr->value); else - wrmsrl(msr->reg, msr->value); + wrmsr(msr->reg, msr->value); } if (mci->mcinj_flags & _MC_MSRINJ_F_REQ_HWCR_WREN) diff -r 7994e7c5991e xen/arch/x86/cpu/mcheck/mce.h --- a/xen/arch/x86/cpu/mcheck/mce.h Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/mcheck/mce.h Wed May 26 18:32:03 2010 +0200 @@ -83,15 +83,18 @@ extern struct intpose_ent *intpose_looku uint64_t *); extern void intpose_inval(unsigned int, uint64_t); -#define mca_rdmsrl(msr, var) do { \ - if (intpose_lookup(smp_processor_id(), msr, &var) == NULL) \ - rdmsrl(msr, var); \ -} while (0) +static inline uint64_t mca_rdmsr(unsigned int msr) +{ + uint64_t val; + if (intpose_lookup(smp_processor_id(), msr, &val) == NULL) + return rdmsr(msr); + return val; +} /* Write an MSR, invalidating any interposed value */ -#define mca_wrmsrl(msr, val) do { \ +#define mca_wrmsr(msr, val) do { \ intpose_inval(smp_processor_id(), msr); \ - wrmsrl(msr, val); \ + wrmsr(msr, val); \ } while (0) diff -r 7994e7c5991e xen/arch/x86/cpu/mcheck/mce_amd_quirks.c --- a/xen/arch/x86/cpu/mcheck/mce_amd_quirks.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/mcheck/mce_amd_quirks.c Wed May 26 18:32:03 2010 +0200 @@ -17,8 +17,9 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include -#include +#include +#include +#include #include "mce_quirks.h" @@ -64,8 +65,8 @@ int mcequirk_amd_apply(enum mcequirk_amd * TBL walk error reporting, which trips off incorrectly * with AGP GART & 3ware & Cerberus. */ - wrmsrl(MSR_IA32_MC4_CTL, ~(1ULL << 10)); - wrmsrl(MSR_IA32_MC4_STATUS, 0ULL); + wrmsr(MSR_IA32_MC4_CTL, ~(1ULL << 10)); + wrmsr(MSR_IA32_MC4_STATUS, 0ULL); break; } diff -r 7994e7c5991e xen/arch/x86/cpu/mcheck/mce_intel.c --- a/xen/arch/x86/cpu/mcheck/mce_intel.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c Wed May 26 18:32:03 2010 +0200 @@ -58,7 +58,7 @@ static void unexpected_thermal_interrupt /* P4/Xeon Thermal transition interrupt handler */ static void intel_thermal_interrupt(struct cpu_user_regs *regs) { - u32 l, h; + uint64_t msr_content; unsigned int cpu = smp_processor_id(); static s_time_t next[NR_CPUS]; @@ -67,8 +67,8 @@ static void intel_thermal_interrupt(stru return; next[cpu] = NOW() + MILLISECS(5000); - rdmsr(MSR_IA32_THERM_STATUS, l, h); - if (l & 0x1) { + msr_content = rdmsr(MSR_IA32_THERM_STATUS); + if (msr_content & 0x1) { printk(KERN_EMERG "CPU%d: Temperature above threshold\n", cpu); printk(KERN_EMERG "CPU%d: Running in modulated clock mode\n", cpu); @@ -94,7 +94,8 @@ fastcall void smp_thermal_interrupt(stru /* P4/Xeon Thermal regulation detect and init */ static void intel_init_thermal(struct cpuinfo_x86 *c) { - u32 l, h; + uint64_t msr_content; + uint32_t val; int tm2 = 0; unsigned int cpu = smp_processor_id(); @@ -110,39 +111,39 @@ static void intel_init_thermal(struct cp * be some SMM goo which handles it, so we can't even put a handler * since it might be delivered via SMI already -zwanem. */ - rdmsr (MSR_IA32_MISC_ENABLE, l, h); - h = apic_read(APIC_LVTTHMR); - if ((l & (1<<3)) && (h & APIC_DM_SMI)) { + msr_content = rdmsr (MSR_IA32_MISC_ENABLE); + val = apic_read(APIC_LVTTHMR); + if ((msr_content & (1ULL<<3)) && (val & APIC_DM_SMI)) { printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",cpu); return; /* -EBUSY */ } - if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) + if (cpu_has(c, X86_FEATURE_TM2) && (msr_content & (1ULL << 13))) tm2 = 1; /* check whether a vector already exists, temporarily masked? */ - if (h & APIC_VECTOR_MASK) { + if (val & APIC_VECTOR_MASK) { printk(KERN_DEBUG "CPU%d: Thermal LVT vector (%#x) already installed\n", - cpu, (h & APIC_VECTOR_MASK)); + cpu, (val & APIC_VECTOR_MASK)); return; /* -EBUSY */ } /* The temperature transition interrupt handler setup */ - h = THERMAL_APIC_VECTOR; /* our delivery vector */ - h |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */ - apic_write_around(APIC_LVTTHMR, h); + val = THERMAL_APIC_VECTOR; /* our delivery vector */ + val |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */ + apic_write_around(APIC_LVTTHMR, val); - rdmsr (MSR_IA32_THERM_INTERRUPT, l, h); - wrmsr (MSR_IA32_THERM_INTERRUPT, l | 0x03 , h); + msr_content = rdmsr(MSR_IA32_THERM_INTERRUPT); + wrmsr(MSR_IA32_THERM_INTERRUPT, msr_content | 0x03); /* ok we're good to go... */ vendor_thermal_interrupt = intel_thermal_interrupt; - rdmsr (MSR_IA32_MISC_ENABLE, l, h); - wrmsr (MSR_IA32_MISC_ENABLE, l | (1<<3), h); + msr_content = rdmsr (MSR_IA32_MISC_ENABLE); + wrmsr (MSR_IA32_MISC_ENABLE, msr_content | (1ULL<<3)); - l = apic_read (APIC_LVTTHMR); - apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED); + val = apic_read (APIC_LVTTHMR); + apic_write_around (APIC_LVTTHMR, val & ~APIC_LVT_MASKED); if (opt_cpu_info) printk(KERN_INFO "CPU%u: Thermal monitoring enabled (%s)\n", cpu, tm2 ? "TM2" : "TM1"); @@ -155,7 +156,7 @@ static inline void intel_get_extended_ms if ( ext->mc_msrs < ARRAY_SIZE(ext->mc_msr) && msr < MSR_IA32_MCG_EAX + nr_intel_ext_msrs ) { ext->mc_msr[ext->mc_msrs].reg = msr; - mca_rdmsrl(msr, ext->mc_msr[ext->mc_msrs].value); + ext->mc_msr[ext->mc_msrs].value = mca_rdmsr(msr); ++ext->mc_msrs; } } @@ -596,10 +597,10 @@ static void intel_machine_check(struct c "to clear error finding flag\n "); atomic_set(&found_error, 0); } - mca_rdmsrl(MSR_IA32_MCG_STATUS, gstatus); + gstatus = mca_rdmsr(MSR_IA32_MCG_STATUS); if ((gstatus & MCG_STATUS_MCIP) != 0) { mce_printk(MCE_CRITICAL, "MCE: Clear MCIP@ last step"); - mca_wrmsrl(MSR_IA32_MCG_STATUS, gstatus & ~MCG_STATUS_MCIP); + mca_wrmsr(MSR_IA32_MCG_STATUS, gstatus & ~MCG_STATUS_MCIP); } mce_barrier_exit(&mce_trap_bar); @@ -700,7 +701,7 @@ static int do_cmci_discover(int i) unsigned msr = MSR_IA32_MC0_CTL2 + i; u64 val; - rdmsrl(msr, val); + val = rdmsr(msr); /* Some other CPU already owns this bank. */ if (val & CMCI_EN) { clear_bit(i, __get_cpu_var(mce_banks_owned)); @@ -708,8 +709,8 @@ static int do_cmci_discover(int i) } val &= ~CMCI_THRESHOLD_MASK; - wrmsrl(msr, val | CMCI_EN | CMCI_THRESHOLD); - rdmsrl(msr, val); + wrmsr(msr, val | CMCI_EN | CMCI_THRESHOLD); + val = rdmsr(msr); if (!(val & CMCI_EN)) { /* This bank does not support CMCI. Polling timer has to handle it. */ @@ -810,9 +811,9 @@ static void clear_cmci(void) u64 val; if (!test_bit(i, __get_cpu_var(mce_banks_owned))) continue; - rdmsrl(msr, val); + val = rdmsr(msr); if (val & (CMCI_EN|CMCI_THRESHOLD_MASK)) - wrmsrl(msr, val & ~(CMCI_EN|CMCI_THRESHOLD_MASK)); + wrmsr(msr, val & ~(CMCI_EN|CMCI_THRESHOLD_MASK)); clear_bit(i, __get_cpu_var(mce_banks_owned)); } } @@ -911,7 +912,7 @@ static void _mce_cap_init(struct cpuinfo static void mce_init(void) { - u32 l, h; + uint64_t msr_content; int i; mctelem_cookie_t mctc; struct mca_summary bs; @@ -940,17 +941,17 @@ static void mce_init(void) { /* Some banks are shared across cores, use MCi_CTRL to judge whether * this bank has been initialized by other cores already. */ - rdmsr(MSR_IA32_MC0_CTL + 4*i, l, h); - if (!(l | h)) + msr_content = rdmsr(MSR_IA32_MCx_CTL(i)); + if (!msr_content) { /* if ctl is 0, this bank is never initialized */ mce_printk(MCE_VERBOSE, "mce_init: init bank%d\n", i); - wrmsr (MSR_IA32_MC0_CTL + 4*i, 0xffffffff, 0xffffffff); - wrmsr (MSR_IA32_MC0_STATUS + 4*i, 0x0, 0x0); + wrmsr(MSR_IA32_MCx_CTL(i), 0xffffffffffffffffULL); + wrmsr(MSR_IA32_MCx_STATUS(i), 0x0); } } if (firstbank) /* if cmci enabled, firstbank = 0 */ - wrmsr (MSR_IA32_MC0_STATUS, 0x0, 0x0); + wrmsr(MSR_IA32_MC0_STATUS, 0x0); } /* p4/p6 family have similar MCA initialization process */ diff -r 7994e7c5991e xen/arch/x86/cpu/mtrr/generic.c --- a/xen/arch/x86/cpu/mtrr/generic.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/mtrr/generic.c Wed May 26 18:32:03 2010 +0200 @@ -30,22 +30,31 @@ struct mtrr_state mtrr_state = {}; static void get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) { - rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); - rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); + vr->base = rdmsr(MTRRphysBase_MSR(index)); + vr->mask = rdmsr(MTRRphysMask_MSR(index)); } static void get_fixed_ranges(mtrr_type * frs) { + uint64_t msr_content; unsigned int *p = (unsigned int *) frs; int i; - rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]); + msr_content = rdmsr(MTRRfix64K_00000_MSR); + p[0] = (uint32_t)msr_content; + p[1] = (uint32_t)(msr_content >> 32); - for (i = 0; i < 2; i++) - rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]); - for (i = 0; i < 8; i++) - rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); + for (i = 0; i < 2; i++) { + msr_content = rdmsr(MTRRfix16K_80000_MSR + i); + p[2 + i * 2] = (uint32_t)msr_content; + p[3 + i * 2] = (uint32_t)(msr_content >> 32); + } + for (i = 0; i < 8; i++) { + msr_content = rdmsr(MTRRfix4K_C0000_MSR + i); + p[6 + i * 2] = (uint32_t)msr_content; + p[7 + i * 2] = (uint32_t)(msr_content >> 32); + } } void mtrr_save_fixed_ranges(void *info) @@ -59,7 +68,7 @@ void __init get_mtrr_state(void) { unsigned int i; struct mtrr_var_range *vrs; - unsigned lo, dummy; + uint64_t msr_content; if (!mtrr_state.var_ranges) { mtrr_state.var_ranges = xmalloc_array(struct mtrr_var_range, @@ -69,20 +78,20 @@ void __init get_mtrr_state(void) } vrs = mtrr_state.var_ranges; - rdmsr(MTRRcap_MSR, lo, dummy); - mtrr_state.have_fixed = (lo >> 8) & 1; + msr_content = rdmsr(MTRRcap_MSR); + mtrr_state.have_fixed = (msr_content >> 8) & 1; for (i = 0; i < num_var_ranges; i++) get_mtrr_var_range(i, &vrs[i]); if (mtrr_state.have_fixed) get_fixed_ranges(mtrr_state.fixed_ranges); - rdmsr(MTRRdefType_MSR, lo, dummy); - mtrr_state.def_type = (lo & 0xff); - mtrr_state.enabled = (lo & 0xc00) >> 10; + msr_content = rdmsr(MTRRdefType_MSR); + mtrr_state.def_type = (msr_content & 0xff); + mtrr_state.enabled = (msr_content & 0xc00) >> 10; /* Store mtrr_cap for HVM MTRR virtualisation. */ - rdmsrl(MTRRcap_MSR, mtrr_state.mtrr_cap); + mtrr_state.mtrr_cap = rdmsr(MTRRcap_MSR); } /* Some BIOS's are fucked and don't set all MTRRs the same! */ @@ -105,12 +114,12 @@ void __init mtrr_state_warn(void) /* Doesn't attempt to pass an error out to MTRR users because it's quite complicated in some cases and probably not worth it because the best error handling is to ignore it. */ -void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) +void mtrr_wrmsr(unsigned int msr, uint64_t msr_content) { - if (wrmsr_safe(msr, a, b) < 0) + if (wrmsr_safe(msr, msr_content) < 0) printk(KERN_ERR - "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", - smp_processor_id(), msr, a, b); + "MTRR: CPU %u: Writing MSR %x to %"PRIx64" failed\n", + smp_processor_id(), msr, msr_content); /* Cache overlap status for efficient HVM MTRR virtualisation. */ mtrr_state.overlapped = is_var_mtrr_overlapped(&mtrr_state); } @@ -121,12 +130,12 @@ void mtrr_wrmsr(unsigned msr, unsigned a */ static inline void k8_enable_fixed_iorrs(void) { - unsigned lo, hi; + uint64_t msr_content; - rdmsr(MSR_K8_SYSCFG, lo, hi); - mtrr_wrmsr(MSR_K8_SYSCFG, lo + msr_content = rdmsr(MSR_K8_SYSCFG); + mtrr_wrmsr(MSR_K8_SYSCFG, msr_content | K8_MTRRFIXRANGE_DRAM_ENABLE - | K8_MTRRFIXRANGE_DRAM_MODIFY, hi); + | K8_MTRRFIXRANGE_DRAM_MODIFY); } /** @@ -139,16 +148,18 @@ static inline void k8_enable_fixed_iorrs */ static void set_fixed_range(int msr, int * changed, unsigned int * msrwords) { - unsigned lo, hi; + uint64_t msr_content; + uint64_t val; - rdmsr(msr, lo, hi); + msr_content = rdmsr(msr); + val = (uint64_t)(msrwords[1]) << 32 | msrwords[0]; - if (lo != msrwords[0] || hi != msrwords[1]) { + if (msr_content != val) { if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86 == 15 && - ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK)) + (val & K8_MTRR_RDMEM_WRMEM_MASK)) k8_enable_fixed_iorrs(); - mtrr_wrmsr(msr, msrwords[0], msrwords[1]); + mtrr_wrmsr(msr, val); *changed = TRUE; } } @@ -178,10 +189,11 @@ int generic_get_free_region(unsigned lon static void generic_get_mtrr(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type *type) { - unsigned int mask_lo, mask_hi, base_lo, base_hi; + uint64_t _mask, _base; + uint32_t mask_lo, mask_hi, base_lo, base_hi; - rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); - if ((mask_lo & 0x800) == 0) { + _mask = rdmsr(MTRRphysMask_MSR(reg)); + if ((_mask & 0x800) == 0) { /* Invalid (i.e. free) range */ *base = 0; *size = 0; @@ -189,12 +201,16 @@ static void generic_get_mtrr(unsigned in return; } - rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); + _base = rdmsr(MTRRphysBase_MSR(reg)); + mask_lo = (uint32_t)_mask; + mask_hi = (uint32_t)(_mask >> 32); /* Work out the shifted address mask. */ mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; + base_hi = (uint32_t)_base; + base_lo = (uint32_t)(_base >> 32); /* This works correctly if size is a power of two, i.e. a contiguous range. */ *size = -mask_lo; @@ -224,29 +240,39 @@ static int set_fixed_ranges(mtrr_type * changes are made */ static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) { - unsigned int lo, hi; + uint64_t msr_content; int changed = FALSE; + uint32_t lo, hi, base_lo, base_hi, mask_lo, mask_hi; - rdmsr(MTRRphysBase_MSR(index), lo, hi); - if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) - || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != + msr_content = rdmsr(MTRRphysBase_MSR(index)); + lo = (uint32_t)msr_content; + hi = (uint32_t)(msr_content >> 32); + base_lo = (uint32_t)vr->base; + base_hi = (uint32_t)(vr->base >> 32); + + if ((base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) + || (base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { - mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); + mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base); changed = TRUE; } - rdmsr(MTRRphysMask_MSR(index), lo, hi); + msr_content = rdmsr(MTRRphysMask_MSR(index)); + lo = (uint32_t)msr_content; + hi = (uint32_t)(msr_content >> 32); + mask_lo = (uint32_t)vr->mask; + mask_hi = (uint32_t)(vr->mask >> 32); - if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) - || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != + if ((mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) + || (mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { - mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); + mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask); changed = TRUE; } return changed; } -static u32 deftype_lo, deftype_hi; +static uint64_t deftype; static unsigned long set_mtrr_state(void) /* [SUMMARY] Set the MTRR state for this CPU. @@ -268,9 +294,9 @@ static unsigned long set_mtrr_state(void /* Set_mtrr_restore restores the old value of MTRRdefType, so to set it we fiddle with the saved value */ - if ((deftype_lo & 0xff) != mtrr_state.def_type - || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { - deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); + if ((deftype & 0xff) != mtrr_state.def_type + || ((deftype & 0xc00) >> 10) != mtrr_state.enabled) { + deftype = (deftype & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); change_mask |= MTRR_CHANGE_MASK_DEFTYPE; } @@ -313,10 +339,10 @@ static void prepare_set(void) flush_tlb_local(); /* Save MTRR state */ - rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); + deftype = rdmsr(MTRRdefType_MSR); /* Disable MTRRs, and set the default type to uncached */ - mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi); + mtrr_wrmsr(MTRRdefType_MSR, deftype & ~0xcff); } static void post_set(void) @@ -325,7 +351,7 @@ static void post_set(void) flush_tlb_local(); /* Intel (P6) standard MTRRs */ - mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); + mtrr_wrmsr(MTRRdefType_MSR, deftype); /* Enable caches */ write_cr0(read_cr0() & 0xbfffffff); @@ -382,16 +408,20 @@ static void generic_set_mtrr(unsigned in if (size == 0) { /* The invalid bit is kept in the mask, so we simply clear the relevant mask register to disable a range. */ - mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); + mtrr_wrmsr(MTRRphysMask_MSR(reg), 0); memset(vr, 0, sizeof(struct mtrr_var_range)); } else { - vr->base_lo = base << PAGE_SHIFT | type; - vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); - vr->mask_lo = -size << PAGE_SHIFT | 0x800; - vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); + uint32_t base_lo, base_hi, mask_lo, mask_hi; - mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); - mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); + base_lo = base << PAGE_SHIFT | type; + base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); + mask_lo = -size << PAGE_SHIFT | 0x800; + mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); + vr->base = (uint64_t)(base_hi) << 32 | base_lo; + vr->mask = (uint64_t)(mask_hi) << 32 | mask_lo; + + mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base); + mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask); } post_set(); @@ -435,9 +465,7 @@ int generic_validate_add_page(unsigned l static int generic_have_wrcomb(void) { - unsigned long config, dummy; - rdmsr(MTRRcap_MSR, config, dummy); - return (config & (1 << 10)); + return (int)(rdmsr(MTRRcap_MSR) & (1ULL << 10)); } int positive_have_wrcomb(void) diff -r 7994e7c5991e xen/arch/x86/cpu/mtrr/main.c --- a/xen/arch/x86/cpu/mtrr/main.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/mtrr/main.c Wed May 26 18:32:03 2010 +0200 @@ -101,10 +101,10 @@ static int have_wrcomb(void) /* This function returns the number of variable MTRRs */ static void __init set_num_var_ranges(void) { - unsigned long config = 0, dummy; + unsigned long config = 0; if (use_intel()) { - rdmsr(MTRRcap_MSR, config, dummy); + config = rdmsr(MTRRcap_MSR); } else if (is_cpu(AMD)) config = 2; else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) diff -r 7994e7c5991e xen/arch/x86/cpu/mtrr/mtrr.h --- a/xen/arch/x86/cpu/mtrr/mtrr.h Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/mtrr/mtrr.h Wed May 26 18:32:03 2010 +0200 @@ -60,8 +60,7 @@ extern int positive_have_wrcomb(void); struct set_mtrr_context { unsigned long flags; unsigned long cr4val; - u32 deftype_lo; - u32 deftype_hi; + uint64_t deftype; u32 ccr3; }; @@ -83,5 +82,5 @@ extern unsigned int num_var_ranges; void mtrr_state_warn(void); const char *mtrr_attrib_to_str(int x); -void mtrr_wrmsr(unsigned, unsigned, unsigned); +void mtrr_wrmsr(unsigned int msr, uint64_t msr_content); diff -r 7994e7c5991e xen/arch/x86/cpu/mtrr/state.c --- a/xen/arch/x86/cpu/mtrr/state.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/cpu/mtrr/state.c Wed May 26 18:32:03 2010 +0200 @@ -31,7 +31,7 @@ void set_mtrr_prepare_save(struct set_mt if (use_intel()) /* Save MTRR state */ - rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); + ctxt->deftype = rdmsr(MTRRdefType_MSR); else /* Cyrix ARRs - everything else were excluded at the top */ ctxt->ccr3 = getCx86(CX86_CCR3); @@ -42,8 +42,7 @@ void set_mtrr_cache_disable(struct set_m { if (use_intel()) /* Disable MTRRs, and set the default type to uncached */ - mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, - ctxt->deftype_hi); + mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype & 0xf300UL); else if (is_cpu(CYRIX)) /* Cyrix ARRs - everything else were excluded at the top */ setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10); @@ -60,7 +59,7 @@ void set_mtrr_done(struct set_mtrr_conte /* Restore MTRRdefType */ if (use_intel()) /* Intel (P6) standard MTRRs */ - mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); + mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype); else /* Cyrix ARRs - everything else was excluded at the top */ setCx86(CX86_CCR3, ctxt->ccr3); diff -r 7994e7c5991e xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/domain.c Wed May 26 18:32:03 2010 +0200 @@ -1089,21 +1089,15 @@ static void load_segments(struct vcpu *n { /* This can only be non-zero if selector is NULL. */ if ( nctxt->fs_base ) - wrmsr(MSR_FS_BASE, - nctxt->fs_base, - nctxt->fs_base>>32); + wrmsr(MSR_FS_BASE, nctxt->fs_base); /* Most kernels have non-zero GS base, so don't bother testing. */ /* (This is also a serialising instruction, avoiding AMD erratum #88.) */ - wrmsr(MSR_SHADOW_GS_BASE, - nctxt->gs_base_kernel, - nctxt->gs_base_kernel>>32); + wrmsr(MSR_SHADOW_GS_BASE, nctxt->gs_base_kernel); /* This can only be non-zero if selector is NULL. */ if ( nctxt->gs_base_user ) - wrmsr(MSR_GS_BASE, - nctxt->gs_base_user, - nctxt->gs_base_user>>32); + wrmsr(MSR_GS_BASE, nctxt->gs_base_user); /* If in kernel mode then switch the GS bases around. */ if ( (n->arch.flags & TF_kernel_mode) ) diff -r 7994e7c5991e xen/arch/x86/e820.c --- a/xen/arch/x86/e820.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/e820.c Wed May 26 18:32:03 2010 +0200 @@ -457,8 +457,8 @@ static uint64_t mtrr_top_of_ram(void) } addr_mask = ((1ull << phys_bits) - 1) & ~((1ull << 12) - 1); - rdmsrl(MSR_MTRRcap, mtrr_cap); - rdmsrl(MSR_MTRRdefType, mtrr_def); + mtrr_cap = rdmsr(MSR_MTRRcap); + mtrr_def = rdmsr(MSR_MTRRdefType); if ( e820_verbose ) printk(" MTRR cap: %"PRIx64" type: %"PRIx64"\n", mtrr_cap, mtrr_def); @@ -474,8 +474,8 @@ static uint64_t mtrr_top_of_ram(void) top = 0; for ( i = 0; i < (uint8_t)mtrr_cap; i++ ) { - rdmsrl(MSR_MTRRphysBase(i), base); - rdmsrl(MSR_MTRRphysMask(i), mask); + base = rdmsr(MSR_MTRRphysBase(i)); + mask = rdmsr(MSR_MTRRphysMask(i) ); if ( e820_verbose ) printk(" MTRR[%d]: base %"PRIx64" mask %"PRIx64"\n", diff -r 7994e7c5991e xen/arch/x86/genapic/x2apic.c --- a/xen/arch/x86/genapic/x2apic.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/genapic/x2apic.c Wed May 26 18:32:03 2010 +0200 @@ -90,6 +90,7 @@ void send_IPI_mask_x2apic_phys(const cpu { unsigned int cpu, cfg; unsigned long flags; + uint64_t msr_content; /* * Ensure that any synchronisation data written in program order by this @@ -107,8 +108,10 @@ void send_IPI_mask_x2apic_phys(const cpu cfg = APIC_DM_FIXED | 0 /* no shorthand */ | APIC_DEST_PHYSICAL | vector; for_each_cpu_mask ( cpu, *cpumask ) - if ( cpu != smp_processor_id() ) - apic_wrmsr(APIC_ICR, cfg, cpu_physical_id(cpu)); + if ( cpu != smp_processor_id() ) { + msr_content = cfg | ((uint64_t)(cpu_physical_id(cpu)) << 32); + apic_wrmsr(APIC_ICR, msr_content); + } local_irq_restore(flags); } @@ -117,6 +120,7 @@ void send_IPI_mask_x2apic_cluster(const { unsigned int cpu, cfg; unsigned long flags; + uint64_t msr_content; mb(); /* see the comment in send_IPI_mask_x2apic_phys() */ @@ -124,8 +128,10 @@ void send_IPI_mask_x2apic_cluster(const cfg = APIC_DM_FIXED | 0 /* no shorthand */ | APIC_DEST_LOGICAL | vector; for_each_cpu_mask ( cpu, *cpumask ) - if ( cpu != smp_processor_id() ) - apic_wrmsr(APIC_ICR, cfg, cpu_2_logical_apicid[cpu]); + if ( cpu != smp_processor_id() ) { + msr_content = cfg | ((uint64_t)(cpu_2_logical_apicid[cpu]) << 32); + apic_wrmsr(APIC_ICR, msr_content); + } local_irq_restore(flags); } diff -r 7994e7c5991e xen/arch/x86/hvm/emulate.c --- a/xen/arch/x86/hvm/emulate.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/hvm/emulate.c Wed May 26 18:32:03 2010 +0200 @@ -825,16 +825,7 @@ static int hvmemul_read_msr( uint64_t *val, struct x86_emulate_ctxt *ctxt) { - struct cpu_user_regs _regs; - int rc; - - _regs.ecx = (uint32_t)reg; - - if ( (rc = hvm_msr_read_intercept(&_regs)) != X86EMUL_OKAY ) - return rc; - - *val = ((uint64_t)(uint32_t)_regs.edx << 32) | (uint32_t)_regs.eax; - return X86EMUL_OKAY; + return hvm_msr_read_intercept(reg, val); } static int hvmemul_write_msr( @@ -842,13 +833,7 @@ static int hvmemul_write_msr( uint64_t val, struct x86_emulate_ctxt *ctxt) { - struct cpu_user_regs _regs; - - _regs.edx = (uint32_t)(val >> 32); - _regs.eax = (uint32_t)val; - _regs.ecx = (uint32_t)reg; - - return hvm_msr_write_intercept(&_regs); + return hvm_msr_write_intercept(reg, val); } static int hvmemul_wbinvd( diff -r 7994e7c5991e xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/hvm/hvm.c Wed May 26 18:32:03 2010 +0200 @@ -2010,9 +2010,8 @@ void hvm_rdtsc_intercept(struct cpu_user regs->edx = (uint32_t)(tsc >> 32); } -int hvm_msr_read_intercept(struct cpu_user_regs *regs) +int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_val) { - uint32_t ecx = regs->ecx; uint64_t msr_content = 0; struct vcpu *v = current; uint64_t *var_range_base, *fixed_range_base; @@ -2026,7 +2025,7 @@ int hvm_msr_read_intercept(struct cpu_us hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]); mtrr = !!(cpuid[3] & bitmaskof(X86_FEATURE_MTRR)); - switch ( ecx ) + switch ( msr ) { case MSR_IA32_TSC: msr_content = hvm_get_guest_tsc(v); @@ -2064,19 +2063,19 @@ int hvm_msr_read_intercept(struct cpu_us case MSR_MTRRfix16K_A0000: if ( !mtrr ) goto gp_fault; - index = regs->ecx - MSR_MTRRfix16K_80000; + index = msr - MSR_MTRRfix16K_80000; msr_content = fixed_range_base[index + 1]; break; case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000: if ( !mtrr ) goto gp_fault; - index = regs->ecx - MSR_MTRRfix4K_C0000; + index = msr - MSR_MTRRfix4K_C0000; msr_content = fixed_range_base[index + 3]; break; case MSR_IA32_MTRR_PHYSBASE0...MSR_IA32_MTRR_PHYSMASK7: if ( !mtrr ) goto gp_fault; - index = regs->ecx - MSR_IA32_MTRR_PHYSBASE0; + index = msr - MSR_IA32_MTRR_PHYSBASE0; msr_content = var_range_base[index]; break; @@ -2091,18 +2090,17 @@ int hvm_msr_read_intercept(struct cpu_us break; default: - ret = vmce_rdmsr(ecx, &msr_content); + ret = vmce_rdmsr(msr, &msr_content); if ( ret < 0 ) goto gp_fault; else if ( ret ) break; /* ret == 0, This is not an MCE MSR, see other MSRs */ else if (!ret) - return hvm_funcs.msr_read_intercept(regs); + return hvm_funcs.msr_read_intercept(msr, &msr_content); } - regs->eax = (uint32_t)msr_content; - regs->edx = (uint32_t)(msr_content >> 32); + *msr_val = msr_content; return X86EMUL_OKAY; gp_fault: @@ -2110,10 +2108,8 @@ gp_fault: return X86EMUL_EXCEPTION; } -int hvm_msr_write_intercept(struct cpu_user_regs *regs) +int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content) { - uint32_t ecx = regs->ecx; - uint64_t msr_content = (uint32_t)regs->eax | ((uint64_t)regs->edx << 32); struct vcpu *v = current; int index, mtrr; uint32_t cpuid[4]; @@ -2122,7 +2118,7 @@ int hvm_msr_write_intercept(struct cpu_u hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]); mtrr = !!(cpuid[3] & bitmaskof(X86_FEATURE_MTRR)); - switch ( ecx ) + switch ( msr ) { case MSR_IA32_TSC: hvm_set_guest_tsc(v, msr_content); @@ -2132,7 +2128,7 @@ int hvm_msr_write_intercept(struct cpu_u v->arch.hvm_vcpu.msr_tsc_aux = (uint32_t)msr_content; if ( cpu_has_rdtscp && (v->domain->arch.tsc_mode != TSC_MODE_PVRDTSCP) ) - wrmsrl(MSR_TSC_AUX, (uint32_t)msr_content); + wrmsr(MSR_TSC_AUX, (uint32_t)msr_content); break; case MSR_IA32_APICBASE: @@ -2164,7 +2160,7 @@ int hvm_msr_write_intercept(struct cpu_u case MSR_MTRRfix16K_A0000: if ( !mtrr ) goto gp_fault; - index = regs->ecx - MSR_MTRRfix16K_80000 + 1; + index = msr - MSR_MTRRfix16K_80000 + 1; if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr, index, msr_content) ) goto gp_fault; @@ -2172,7 +2168,7 @@ int hvm_msr_write_intercept(struct cpu_u case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000: if ( !mtrr ) goto gp_fault; - index = regs->ecx - MSR_MTRRfix4K_C0000 + 3; + index = msr - MSR_MTRRfix4K_C0000 + 3; if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr, index, msr_content) ) goto gp_fault; @@ -2181,7 +2177,7 @@ int hvm_msr_write_intercept(struct cpu_u if ( !mtrr ) goto gp_fault; if ( !mtrr_var_range_msr_set(&v->arch.hvm_vcpu.mtrr, - regs->ecx, msr_content) ) + msr, msr_content) ) goto gp_fault; break; @@ -2190,13 +2186,13 @@ int hvm_msr_write_intercept(struct cpu_u break; default: - ret = vmce_wrmsr(ecx, msr_content); + ret = vmce_wrmsr(msr, msr_content); if ( ret < 0 ) goto gp_fault; else if ( ret ) break; else if (!ret) - return hvm_funcs.msr_write_intercept(regs); + return hvm_funcs.msr_write_intercept(msr, msr_content); } return X86EMUL_OKAY; diff -r 7994e7c5991e xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/hvm/svm/svm.c Wed May 26 18:32:03 2010 +0200 @@ -106,15 +106,13 @@ static void svm_cpu_down(void) write_efer(read_efer() & ~EFER_SVME); } -static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs) +static enum handler_return +long_mode_do_msr_write(unsigned int msr, uint64_t msr_content) { - u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32); - u32 ecx = regs->ecx; + HVM_DBG_LOG(DBG_LEVEL_0, "msr %x msr_content %"PRIx64, + msr, msr_content); - HVM_DBG_LOG(DBG_LEVEL_0, "msr %x msr_content %"PRIx64, - ecx, msr_content); - - switch ( ecx ) + switch ( msr ) { case MSR_EFER: if ( hvm_set_efer(msr_content) ) @@ -686,7 +684,7 @@ static void svm_ctxt_switch_to(struct vc vpmu_load(v); if ( cpu_has_rdtscp ) - wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); + wrmsr(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); } static void svm_do_resume(struct vcpu *v) @@ -851,22 +849,22 @@ static void svm_init_erratum_383(struct if ( c->x86 != 0x10 ) return; - rdmsrl(MSR_AMD64_DC_CFG, msr_content); - wrmsrl(MSR_AMD64_DC_CFG, msr_content | (1ULL << 47)); + msr_content = rdmsr(MSR_AMD64_DC_CFG); + wrmsr(MSR_AMD64_DC_CFG, msr_content | (1ULL << 47)); amd_erratum383_found = 1; } static int svm_cpu_up(void) { - u32 eax, edx, phys_hsa_lo, phys_hsa_hi; + uint64_t msr_content; u64 phys_hsa; int rc, cpu = smp_processor_id(); struct cpuinfo_x86 *c = &cpu_data[cpu]; /* Check whether SVM feature is disabled in BIOS */ - rdmsr(MSR_K8_VM_CR, eax, edx); - if ( eax & K8_VMCR_SVME_DISABLE ) + msr_content = rdmsr(MSR_K8_VM_CR); + if ( msr_content & K8_VMCR_SVME_DISABLE ) { printk("CPU%d: AMD SVM Extension is disabled in BIOS.\n", cpu); return -EINVAL; @@ -879,9 +877,7 @@ static int svm_cpu_up(void) /* Initialize the HSA for this core. */ phys_hsa = (u64)virt_to_maddr(hsa[cpu]); - phys_hsa_lo = (u32)phys_hsa; - phys_hsa_hi = (u32)(phys_hsa >> 32); - wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi); + wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa); /* check for erratum 383 */ svm_init_erratum_383(c); @@ -894,15 +890,14 @@ static int svm_cpu_up(void) * Check whether EFER.LMSLE can be written. * Unfortunately there's no feature bit defined for this. */ - eax = read_efer(); - edx = read_efer() >> 32; - if ( wrmsr_safe(MSR_EFER, eax | EFER_LMSLE, edx) == 0 ) - rdmsr(MSR_EFER, eax, edx); - if ( eax & EFER_LMSLE ) + msr_content = read_efer(); + if ( wrmsr_safe(MSR_EFER, msr_content | EFER_LMSLE) == 0 ) + msr_content = rdmsr(MSR_EFER); + if ( msr_content & EFER_LMSLE ) { if ( c == &boot_cpu_data ) cpu_has_lmsl = 1; - wrmsr(MSR_EFER, eax ^ EFER_LMSLE, edx); + wrmsr(MSR_EFER, msr_content ^ EFER_LMSLE); } else { @@ -1032,14 +1027,13 @@ static void svm_dr_access(struct vcpu *v __restore_debug_registers(v); } -static int svm_msr_read_intercept(struct cpu_user_regs *regs) +static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_val) { u64 msr_content = 0; - u32 ecx = regs->ecx, eax, edx; struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; - switch ( ecx ) + switch ( msr ) { case MSR_EFER: msr_content = v->arch.hvm_vcpu.guest_efer; @@ -1104,31 +1098,28 @@ static int svm_msr_read_intercept(struct case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: - vpmu_do_rdmsr(regs); + vpmu_do_rdmsr(msr, &msr_content); goto done; default: - if ( rdmsr_viridian_regs(ecx, &msr_content) || - rdmsr_hypervisor_regs(ecx, &msr_content) ) + if ( rdmsr_viridian_regs(msr, &msr_content) || + rdmsr_hypervisor_regs(msr, &msr_content) ) break; - if ( rdmsr_safe(ecx, eax, edx) == 0 ) - { - msr_content = ((uint64_t)edx << 32) | eax; + if ( rdmsr_safe(msr, msr_content) == 0 ) break; - } goto gpf; } - regs->eax = (uint32_t)msr_content; - regs->edx = (uint32_t)(msr_content >> 32); + *msr_val = msr_content; done: - HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx); - HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx", - ecx, (unsigned long)regs->eax, (unsigned long)regs->edx); + HVMTRACE_3D (MSR_READ, msr, + (uint32_t)msr_content, (uint32_t)(msr_content>>32)); + HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, msr_value=%"PRIx64, + msr, msr_content); return X86EMUL_OKAY; gpf: @@ -1136,18 +1127,15 @@ done: return X86EMUL_EXCEPTION; } -static int svm_msr_write_intercept(struct cpu_user_regs *regs) +static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content) { - u64 msr_content = 0; - u32 ecx = regs->ecx; struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; - msr_content = (u32)regs->eax | ((u64)regs->edx << 32); + HVMTRACE_3D(MSR_WRITE, msr, + (uint32_t)msr_content, (uint32_t)(msr_content >> 32)); - HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx); - - switch ( ecx ) + switch ( msr ) { case MSR_K8_VM_HSAVE_PA: goto gpf; @@ -1198,17 +1186,17 @@ static int svm_msr_write_intercept(struc case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: - vpmu_do_wrmsr(regs); + vpmu_do_wrmsr(msr, msr_content); goto done; default: - if ( wrmsr_viridian_regs(ecx, msr_content) ) + if ( wrmsr_viridian_regs(msr, msr_content) ) break; - switch ( long_mode_do_msr_write(regs) ) + switch ( long_mode_do_msr_write(msr, msr_content) ) { case HNDL_unhandled: - wrmsr_hypervisor_regs(ecx, msr_content); + wrmsr_hypervisor_regs(msr, msr_content); break; case HNDL_exception_raised: return X86EMUL_EXCEPTION; @@ -1230,18 +1218,22 @@ static void svm_do_msr_access(struct cpu int rc, inst_len; struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + uint64_t msr_content; if ( vmcb->exitinfo1 == 0 ) { if ( (inst_len = __get_instruction_length(v, INSTR_RDMSR)) == 0 ) return; - rc = hvm_msr_read_intercept(regs); + rc = hvm_msr_read_intercept(regs->ecx, &msr_content); + regs->eax = (uint32_t)msr_content; + regs->edx = (uint32_t)(msr_content >> 32); } else { if ( (inst_len = __get_instruction_length(v, INSTR_WRMSR)) == 0 ) return; - rc = hvm_msr_write_intercept(regs); + msr_content = (uint64_t)(regs->edx) << 32 | regs->eax; + rc = hvm_msr_write_intercept(regs->ecx, msr_content); } if ( rc == X86EMUL_OKAY ) @@ -1306,7 +1298,7 @@ static int svm_is_erratum_383(struct cpu if ( !amd_erratum383_found ) return 0; - rdmsrl(MSR_IA32_MC0_STATUS, msr_content); + msr_content = rdmsr(MSR_IA32_MC0_STATUS); /* Bit 62 may or may not be set for this mce */ msr_content &= ~(1ULL << 62); @@ -1315,10 +1307,10 @@ static int svm_is_erratum_383(struct cpu /* Clear MCi_STATUS registers */ for (i = 0; i < nr_mce_banks; i++) - wrmsrl(MSR_IA32_MCx_STATUS(i), 0ULL); + wrmsr(MSR_IA32_MCx_STATUS(i), 0ULL); - rdmsrl(MSR_IA32_MCG_STATUS, msr_content); - wrmsrl(MSR_IA32_MCG_STATUS, msr_content & ~(1ULL << 2)); + msr_content = rdmsr(MSR_IA32_MCG_STATUS); + wrmsr(MSR_IA32_MCG_STATUS, msr_content & ~(1ULL << 2)); /* flush TLB */ flush_tlb_mask(&v->domain->domain_dirty_cpumask); diff -r 7994e7c5991e xen/arch/x86/hvm/svm/vpmu.c --- a/xen/arch/x86/hvm/svm/vpmu.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/hvm/svm/vpmu.c Wed May 26 18:32:03 2010 +0200 @@ -110,11 +110,11 @@ static inline void context_restore(struc struct amd_vpmu_context *ctxt = vpmu->context; for ( i = 0; i < NUM_COUNTERS; i++ ) - wrmsrl(AMD_F10H_CTRLS[i], ctxt->ctrls[i]); + wrmsr(AMD_F10H_CTRLS[i], ctxt->ctrls[i]); for ( i = 0; i < NUM_COUNTERS; i++ ) { - wrmsrl(AMD_F10H_COUNTERS[i], ctxt->counters[i]); + wrmsr(AMD_F10H_COUNTERS[i], ctxt->counters[i]); /* Force an interrupt to allow guest reset the counter, if the value is positive */ @@ -147,10 +147,10 @@ static inline void context_save(struct v struct amd_vpmu_context *ctxt = vpmu->context; for ( i = 0; i < NUM_COUNTERS; i++ ) - rdmsrl(AMD_F10H_COUNTERS[i], ctxt->counters[i]); + ctxt->counters[i] = rdmsr(AMD_F10H_COUNTERS[i]); for ( i = 0; i < NUM_COUNTERS; i++ ) - rdmsrl(AMD_F10H_CTRLS[i], ctxt->ctrls[i]); + ctxt->ctrls[i] = rdmsr(AMD_F10H_CTRLS[i]); } static void amd_vpmu_save(struct vcpu *v) @@ -167,42 +167,38 @@ static void amd_vpmu_save(struct vcpu *v apic_write(APIC_LVTPC, ctx->hw_lapic_lvtpc | APIC_LVT_MASKED); } -static void context_update(struct cpu_user_regs *regs, u64 msr_content) +static void context_update(unsigned int msr, u64 msr_content) { int i; - u32 addr = regs->ecx; struct vcpu *v = current; struct vpmu_struct *vpmu = vcpu_vpmu(v); struct amd_vpmu_context *ctxt = vpmu->context; for ( i = 0; i < NUM_COUNTERS; i++ ) - if ( addr == AMD_F10H_COUNTERS[i] ) + if ( msr == AMD_F10H_COUNTERS[i] ) ctxt->counters[i] = msr_content; for ( i = 0; i < NUM_COUNTERS; i++ ) - if ( addr == AMD_F10H_CTRLS[i] ) + if ( msr == AMD_F10H_CTRLS[i] ) ctxt->ctrls[i] = msr_content; ctxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC); } -static int amd_vpmu_do_wrmsr(struct cpu_user_regs *regs) +static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) { - u64 msr_content = 0; struct vcpu *v = current; struct vpmu_struct *vpmu = vcpu_vpmu(v); - msr_content = (u32)regs->eax | ((u64)regs->edx << 32); - /* For all counters, enable guest only mode for HVM guest */ - if ( (get_pmu_reg_type(regs->ecx) == MSR_TYPE_CTRL) && + if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) && !(is_guest_mode(msr_content)) ) { set_guest_mode(msr_content); } /* check if the first counter is enabled */ - if ( (get_pmu_reg_type(regs->ecx) == MSR_TYPE_CTRL) && + if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) && is_pmu_enabled(msr_content) && !(vpmu->flags & VPMU_RUNNING) ) { if ( !acquire_pmu_ownership(PMU_OWNER_HVM) ) @@ -212,7 +208,7 @@ static int amd_vpmu_do_wrmsr(struct cpu_ } /* stop saving & restore if guest stops first counter */ - if ( (get_pmu_reg_type(regs->ecx) == MSR_TYPE_CTRL) && + if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) && (is_pmu_enabled(msr_content) == 0) && (vpmu->flags & VPMU_RUNNING) ) { apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED); @@ -221,20 +217,16 @@ static int amd_vpmu_do_wrmsr(struct cpu_ } /* Update vpmu context immediately */ - context_update(regs, msr_content); + context_update(msr, msr_content); /* Write to hw counters */ - wrmsrl(regs->ecx, msr_content); + wrmsr(msr, msr_content); return 1; } -static int amd_vpmu_do_rdmsr(struct cpu_user_regs *regs) +static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_val) { - u64 msr_content = 0; - - rdmsrl(regs->ecx, msr_content); - regs->eax = msr_content & 0xFFFFFFFF; - regs->edx = msr_content >> 32; + *msr_val = rdmsr(msr); return 1; } diff -r 7994e7c5991e xen/arch/x86/hvm/vmx/vmcs.c --- a/xen/arch/x86/hvm/vmx/vmcs.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed May 26 18:32:03 2010 +0200 @@ -101,8 +101,11 @@ static u32 adjust_vmx_controls( const char *name, u32 ctl_min, u32 ctl_opt, u32 msr, bool_t *mismatch) { u32 vmx_msr_low, vmx_msr_high, ctl = ctl_min | ctl_opt; + uint64_t vmx_msr; - rdmsr(msr, vmx_msr_low, vmx_msr_high); + vmx_msr = rdmsr(msr); + vmx_msr_low = (uint32_t)vmx_msr; + vmx_msr_high = (uint32_t)(vmx_msr >> 32); ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ @@ -128,6 +131,7 @@ static bool_t cap_check(const char *name static int vmx_init_vmcs_config(void) { u32 vmx_basic_msr_low, vmx_basic_msr_high, min, opt; + uint64_t vmx_basic_msr; u32 _vmx_pin_based_exec_control; u32 _vmx_cpu_based_exec_control; u32 _vmx_secondary_exec_control = 0; @@ -136,7 +140,9 @@ static int vmx_init_vmcs_config(void) u32 _vmx_vmentry_control; bool_t mismatch = 0; - rdmsr(MSR_IA32_VMX_BASIC, vmx_basic_msr_low, vmx_basic_msr_high); + vmx_basic_msr = rdmsr(MSR_IA32_VMX_BASIC); + vmx_basic_msr_low = (uint32_t)vmx_basic_msr; + vmx_basic_msr_high = (uint32_t)(vmx_basic_msr >> 32); min = (PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING); @@ -198,9 +204,12 @@ static int vmx_init_vmcs_config(void) * We check VMX_BASIC_MSR[55] to correctly handle default1 controls. */ uint32_t must_be_one, must_be_zero, msr = MSR_IA32_VMX_PROCBASED_CTLS; + uint64_t msr_must_be; if ( vmx_basic_msr_high & (1u << 23) ) msr = MSR_IA32_VMX_TRUE_PROCBASED_CTLS; - rdmsr(msr, must_be_one, must_be_zero); + msr_must_be = rdmsr(msr); + must_be_one = (uint32_t)msr_must_be; + must_be_zero = (uint32_t)(msr_must_be >> 32); if ( must_be_one & (CPU_BASED_INVLPG_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING) ) @@ -210,7 +219,7 @@ static int vmx_init_vmcs_config(void) if ( _vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT ) { uint64_t cap; - rdmsrl(MSR_IA32_VMX_EPT_VPID_CAP, cap); + cap = rdmsr(MSR_IA32_VMX_EPT_VPID_CAP); if ( cap & VMX_EPT_SUPER_PAGE_1G ) ept_super_page_level_limit = 2; else if ( cap & VMX_EPT_SUPER_PAGE_2M ) @@ -425,7 +434,7 @@ void vmx_cpu_dead(unsigned int cpu) int vmx_cpu_up(void) { - u32 eax, edx; + uint64_t msr_content; int rc, bios_locked, cpu = smp_processor_id(); u64 cr0, vmx_cr0_fixed0, vmx_cr0_fixed1; @@ -436,8 +445,8 @@ int vmx_cpu_up(void) * the requred CRO fixed bits in VMX operation. */ cr0 = read_cr0(); - rdmsrl(MSR_IA32_VMX_CR0_FIXED0, vmx_cr0_fixed0); - rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx_cr0_fixed1); + vmx_cr0_fixed0 = rdmsr(MSR_IA32_VMX_CR0_FIXED0); + vmx_cr0_fixed1 = rdmsr(MSR_IA32_VMX_CR0_FIXED1); if ( (~cr0 & vmx_cr0_fixed0) || (cr0 & ~vmx_cr0_fixed1) ) { printk("CPU%d: some settings of host CR0 are " @@ -445,12 +454,12 @@ int vmx_cpu_up(void) return -EINVAL; } - rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx); + msr_content = rdmsr(IA32_FEATURE_CONTROL_MSR); - bios_locked = !!(eax & IA32_FEATURE_CONTROL_MSR_LOCK); + bios_locked = !!(msr_content & IA32_FEATURE_CONTROL_MSR_LOCK); if ( bios_locked ) { - if ( !(eax & (tboot_in_measured_env() + if ( !(msr_content & (tboot_in_measured_env() ? IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX : IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX)) ) { @@ -460,11 +469,11 @@ int vmx_cpu_up(void) } else { - eax = IA32_FEATURE_CONTROL_MSR_LOCK; - eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX; + msr_content = IA32_FEATURE_CONTROL_MSR_LOCK; + msr_content |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX; if ( test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) ) - eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX; - wrmsr(IA32_FEATURE_CONTROL_MSR, eax, 0); + msr_content |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX; + wrmsr(IA32_FEATURE_CONTROL_MSR, msr_content); } if ( (rc = vmx_init_vmcs_config()) != 0 ) @@ -480,8 +489,8 @@ int vmx_cpu_up(void) case -2: /* #UD or #GP */ if ( bios_locked && test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) && - (!(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX) || - !(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) ) + (!(msr_content & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX) || + !(msr_content & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) ) { printk("CPU%d: VMXON failed: perhaps because of TXT settings " "in your BIOS configuration?\n", cpu); @@ -743,9 +752,9 @@ static int construct_vmcs(struct vcpu *v __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler); /* Host SYSENTER CS:RIP. */ - rdmsrl(MSR_IA32_SYSENTER_CS, sysenter_cs); + sysenter_cs = rdmsr(MSR_IA32_SYSENTER_CS); __vmwrite(HOST_SYSENTER_CS, sysenter_cs); - rdmsrl(MSR_IA32_SYSENTER_EIP, sysenter_eip); + sysenter_eip = rdmsr(MSR_IA32_SYSENTER_EIP); __vmwrite(HOST_SYSENTER_EIP, sysenter_eip); /* MSR intercepts. */ @@ -849,7 +858,7 @@ static int construct_vmcs(struct vcpu *v { u64 host_pat, guest_pat; - rdmsrl(MSR_IA32_CR_PAT, host_pat); + host_pat = rdmsr(MSR_IA32_CR_PAT); guest_pat = MSR_IA32_CR_PAT_RESET; __vmwrite(HOST_PAT, host_pat); @@ -960,7 +969,7 @@ int vmx_add_host_load_msr(u32 msr) msr_area[msr_count].index = msr; msr_area[msr_count].mbz = 0; - rdmsrl(msr, msr_area[msr_count].data); + msr_area[msr_count].data = rdmsr(msr); curr->arch.hvm_vmx.host_msr_count = ++msr_count; __vmwrite(VM_EXIT_MSR_LOAD_COUNT, msr_count); diff -r 7994e7c5991e xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/hvm/vmx/vmx.c Wed May 26 18:32:03 2010 +0200 @@ -69,8 +69,8 @@ static void vmx_cpuid_intercept( unsigned int *ecx, unsigned int *edx); static void vmx_wbinvd_intercept(void); static void vmx_fpu_dirty_intercept(void); -static int vmx_msr_read_intercept(struct cpu_user_regs *regs); -static int vmx_msr_write_intercept(struct cpu_user_regs *regs); +static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_val); +static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content); static void vmx_invlpg_intercept(unsigned long vaddr); static void __ept_sync_domain(void *info); @@ -150,24 +150,24 @@ static void vmx_save_host_msrs(void) int i; for ( i = 0; i < MSR_INDEX_SIZE; i++ ) - rdmsrl(msr_index[i], host_msr_state->msrs[i]); + host_msr_state->msrs[i] = rdmsr(msr_index[i]); } #define WRITE_MSR(address) \ guest_msr_state->msrs[VMX_INDEX_MSR_ ## address] = msr_content; \ set_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags); \ - wrmsrl(MSR_ ## address, msr_content); \ + wrmsr(MSR_ ## address, msr_content); \ set_bit(VMX_INDEX_MSR_ ## address, &host_msr_state->flags); \ break -static enum handler_return long_mode_do_msr_read(struct cpu_user_regs *regs) +static enum handler_return +long_mode_do_msr_read(unsigned int msr, uint64_t *msr_val) { u64 msr_content = 0; - u32 ecx = regs->ecx; struct vcpu *v = current; struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state; - switch ( ecx ) + switch ( msr ) { case MSR_EFER: msr_content = v->arch.hvm_vcpu.guest_efer; @@ -182,7 +182,7 @@ static enum handler_return long_mode_do_ break; case MSR_SHADOW_GS_BASE: - rdmsrl(MSR_SHADOW_GS_BASE, msr_content); + msr_content = rdmsr(MSR_SHADOW_GS_BASE); break; case MSR_STAR: @@ -205,25 +205,23 @@ static enum handler_return long_mode_do_ return HNDL_unhandled; } - HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, ecx, msr_content); - - regs->eax = (u32)(msr_content >> 0); - regs->edx = (u32)(msr_content >> 32); + HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, msr, msr_content); + + *msr_val = msr_content; return HNDL_done; } -static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs) +static enum handler_return +long_mode_do_msr_write(unsigned int msr, uint64_t msr_content) { - u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32); - u32 ecx = regs->ecx; struct vcpu *v = current; struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state; struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state); - HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, ecx, msr_content); - - switch ( ecx ) + HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, msr, msr_content); + + switch ( msr ) { case MSR_EFER: if ( hvm_set_efer(msr_content) ) @@ -236,12 +234,12 @@ static enum handler_return long_mode_do_ if ( !is_canonical_address(msr_content) ) goto uncanonical_address; - if ( ecx == MSR_FS_BASE ) + if ( msr == MSR_FS_BASE ) __vmwrite(GUEST_FS_BASE, msr_content); - else if ( ecx == MSR_GS_BASE ) + else if ( msr == MSR_GS_BASE ) __vmwrite(GUEST_GS_BASE, msr_content); else - wrmsrl(MSR_SHADOW_GS_BASE, msr_content); + wrmsr(MSR_SHADOW_GS_BASE, msr_content); break; @@ -269,7 +267,7 @@ static enum handler_return long_mode_do_ return HNDL_done; uncanonical_address: - HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", ecx); + HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", msr); vmx_inject_hw_exception(TRAP_gp_fault, 0); exception_raised: return HNDL_exception_raised; @@ -289,7 +287,7 @@ static void vmx_restore_host_msrs(void) while ( host_msr_state->flags ) { i = find_first_set_bit(host_msr_state->flags); - wrmsrl(msr_index[i], host_msr_state->msrs[i]); + wrmsr(msr_index[i], host_msr_state->msrs[i]); clear_bit(i, &host_msr_state->flags); } } @@ -300,7 +298,7 @@ static void vmx_save_guest_msrs(struct v * We cannot cache SHADOW_GS_BASE while the VCPU runs, as it can * be updated at any time via SWAPGS, which we cannot trap. */ - rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs); + v->arch.hvm_vmx.shadow_gs = rdmsr(MSR_SHADOW_GS_BASE); } static void vmx_restore_guest_msrs(struct vcpu *v) @@ -312,7 +310,7 @@ static void vmx_restore_guest_msrs(struc guest_msr_state = &v->arch.hvm_vmx.msr_state; host_msr_state = &this_cpu(host_msr_state); - wrmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs); + wrmsr(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs); guest_flags = guest_msr_state->flags; @@ -324,7 +322,7 @@ static void vmx_restore_guest_msrs(struc "restore guest's index %d msr %x with value %lx", i, msr_index[i], guest_msr_state->msrs[i]); set_bit(i, &host_msr_state->flags); - wrmsrl(msr_index[i], guest_msr_state->msrs[i]); + wrmsr(msr_index[i], guest_msr_state->msrs[i]); clear_bit(i, &guest_flags); } @@ -338,7 +336,7 @@ static void vmx_restore_guest_msrs(struc } if ( cpu_has_rdtscp ) - wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); + wrmsr(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); } #else /* __i386__ */ @@ -370,11 +368,10 @@ static enum handler_return long_mode_do_ return HNDL_done; } -static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs) +static enum handler_return +long_mode_do_msr_write(unsigned int msr, uint64_t msr_content) { - u64 msr_content = regs->eax | ((u64)regs->edx << 32); - - switch ( regs->ecx ) + switch ( msr ) { case MSR_EFER: if ( hvm_set_efer(msr_content) ) @@ -1809,14 +1806,13 @@ static int is_last_branch_msr(u32 ecx) return 0; } -static int vmx_msr_read_intercept(struct cpu_user_regs *regs) +static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_val) { u64 msr_content = 0; - u32 ecx = regs->ecx, eax, edx; - - HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x", ecx); - - switch ( ecx ) + + HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x", msr); + + switch ( msr ) { case MSR_IA32_SYSENTER_CS: msr_content = (u32)__vmread(GUEST_SYSENTER_CS); @@ -1836,17 +1832,17 @@ static int vmx_msr_read_intercept(struct case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2: goto gp_fault; case MSR_IA32_MISC_ENABLE: - rdmsrl(MSR_IA32_MISC_ENABLE, msr_content); + msr_content = rdmsr(MSR_IA32_MISC_ENABLE); /* Debug Trace Store is not supported. */ msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL; break; default: - if ( vpmu_do_rdmsr(regs) ) + if ( vpmu_do_rdmsr(msr, &msr_content) ) goto done; - if ( passive_domain_do_rdmsr(regs) ) + if ( passive_domain_do_rdmsr(msr, &msr_content) ) goto done; - switch ( long_mode_do_msr_read(regs) ) + switch ( long_mode_do_msr_read(msr, &msr_content) ) { case HNDL_unhandled: break; @@ -1856,36 +1852,32 @@ static int vmx_msr_read_intercept(struct goto done; } - if ( vmx_read_guest_msr(ecx, &msr_content) == 0 ) + if ( vmx_read_guest_msr(msr, &msr_content) == 0 ) break; - if ( is_last_branch_msr(ecx) ) + if ( is_last_branch_msr(msr) ) { msr_content = 0; break; } - if ( rdmsr_viridian_regs(ecx, &msr_content) || - rdmsr_hypervisor_regs(ecx, &msr_content) ) + if ( rdmsr_viridian_regs(msr, &msr_content) || + rdmsr_hypervisor_regs(msr, &msr_content) ) break; - if ( rdmsr_safe(ecx, eax, edx) == 0 ) - { - msr_content = ((uint64_t)edx << 32) | eax; + if ( rdmsr_safe(msr, msr_content) == 0 ) break; - } goto gp_fault; } - regs->eax = (uint32_t)msr_content; - regs->edx = (uint32_t)(msr_content >> 32); + *msr_val = msr_content; done: - HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx); - HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx", - ecx, (unsigned long)regs->eax, - (unsigned long)regs->edx); + HVMTRACE_3D (MSR_READ, msr, + (uint32_t)msr_content, (uint32_t)(msr_content >> 32)); + HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, msr_value=0x%"PRIx64, + msr, msr_content); return X86EMUL_OKAY; gp_fault: @@ -1953,20 +1945,17 @@ void vmx_vlapic_msr_changed(struct vcpu vmx_vmcs_exit(v); } -static int vmx_msr_write_intercept(struct cpu_user_regs *regs) +static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content) { - u32 ecx = regs->ecx; - u64 msr_content; struct vcpu *v = current; - HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x", - ecx, (u32)regs->eax, (u32)regs->edx); - - msr_content = (u32)regs->eax | ((u64)regs->edx << 32); - - HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx); - - switch ( ecx ) + HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, msr_value=0x%"PRIx64, + msr, msr_content); + + HVMTRACE_3D (MSR_WRITE, msr, + (uint32_t)msr_content, (uint32_t)(msr_content >> 32)); + + switch ( msr ) { case MSR_IA32_SYSENTER_CS: __vmwrite(GUEST_SYSENTER_CS, msr_content); @@ -1996,7 +1985,7 @@ static int vmx_msr_write_intercept(struc } if ( (rc < 0) || - (vmx_add_host_load_msr(ecx) < 0) ) + (vmx_add_host_load_msr(msr) < 0) ) vmx_inject_hw_exception(TRAP_machine_check, 0); else { @@ -2011,20 +2000,20 @@ static int vmx_msr_write_intercept(struc case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2: goto gp_fault; default: - if ( vpmu_do_wrmsr(regs) ) + if ( vpmu_do_wrmsr(msr, msr_content) ) return X86EMUL_OKAY; - if ( passive_domain_do_wrmsr(regs) ) + if ( passive_domain_do_wrmsr(msr, msr_content) ) return X86EMUL_OKAY; - if ( wrmsr_viridian_regs(ecx, msr_content) ) + if ( wrmsr_viridian_regs(msr, msr_content) ) break; - switch ( long_mode_do_msr_write(regs) ) + switch ( long_mode_do_msr_write(msr, msr_content) ) { case HNDL_unhandled: - if ( (vmx_write_guest_msr(ecx, msr_content) != 0) && - !is_last_branch_msr(ecx) ) - wrmsr_hypervisor_regs(ecx, msr_content); + if ( (vmx_write_guest_msr(msr, msr_content) != 0) && + !is_last_branch_msr(msr) ) + wrmsr_hypervisor_regs(msr, msr_content); break; case HNDL_exception_raised: return X86EMUL_EXCEPTION; @@ -2568,16 +2557,25 @@ asmlinkage void vmx_vmexit_handler(struc vmx_dr_access(exit_qualification, regs); break; case EXIT_REASON_MSR_READ: + { + uint64_t msr_content; inst_len = __get_instruction_length(); /* Safe: RDMSR */ - if ( hvm_msr_read_intercept(regs) == X86EMUL_OKAY ) + if ( hvm_msr_read_intercept(regs->ecx, &msr_content) == X86EMUL_OKAY ) { + regs->eax = (uint32_t)msr_content; + regs->edx = (uint32_t)(msr_content >> 32); + __update_guest_eip(inst_len); + } + break; + } + case EXIT_REASON_MSR_WRITE: + { + uint64_t msr_content; + inst_len = __get_instruction_length(); /* Safe: WRMSR */ + msr_content = (uint64_t)(regs->edx) << 32 | regs->eax; + if ( hvm_msr_write_intercept(regs->ecx, msr_content) == X86EMUL_OKAY ) __update_guest_eip(inst_len); break; - case EXIT_REASON_MSR_WRITE: - inst_len = __get_instruction_length(); /* Safe: WRMSR */ - if ( hvm_msr_write_intercept(regs) == X86EMUL_OKAY ) - __update_guest_eip(inst_len); - break; - + } case EXIT_REASON_MWAIT_INSTRUCTION: case EXIT_REASON_MONITOR_INSTRUCTION: case EXIT_REASON_VMCLEAR: diff -r 7994e7c5991e xen/arch/x86/hvm/vmx/vpmu_core2.c --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Wed May 26 18:32:03 2010 +0200 @@ -189,9 +189,9 @@ static inline void __core2_vpmu_save(str struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; for ( i = 0; i < core2_counters.num; i++ ) - rdmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); + core2_vpmu_cxt->counters[i] = rdmsr(core2_counters.msr[i]); for ( i = 0; i < core2_get_pmc_count(); i++ ) - rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); + core2_vpmu_cxt->arch_msr_pair[i].counter = rdmsr(MSR_IA32_PERFCTR0+i); core2_vpmu_cxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC); apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED); } @@ -220,14 +220,14 @@ static inline void __core2_vpmu_load(str struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context; for ( i = 0; i < core2_counters.num; i++ ) - wrmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); + wrmsr(core2_counters.msr[i], core2_vpmu_cxt->counters[i]); for ( i = 0; i < core2_get_pmc_count(); i++ ) - wrmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); + wrmsr(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter); for ( i = 0; i < core2_ctrls.num; i++ ) - wrmsrl(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]); + wrmsr(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]); for ( i = 0; i < core2_get_pmc_count(); i++ ) - wrmsrl(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control); + wrmsr(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control); apic_write_around(APIC_LVTPC, core2_vpmu_cxt->hw_lapic_lvtpc); } @@ -253,7 +253,7 @@ static int core2_vpmu_alloc_resource(str if ( !acquire_pmu_ownership(PMU_OWNER_HVM) ) return 0; - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) ) return 0; @@ -328,10 +328,9 @@ static int core2_vpmu_msr_common_check(u return 1; } -static int core2_vpmu_do_wrmsr(struct cpu_user_regs *regs) +static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) { - u32 ecx = regs->ecx; - u64 msr_content, global_ctrl, non_global_ctrl; + u64 global_ctrl, non_global_ctrl; char pmu_enable = 0; int i, tmp; int type = -1, index = -1; @@ -339,12 +338,11 @@ static int core2_vpmu_do_wrmsr(struct cp struct vpmu_struct *vpmu = vcpu_vpmu(v); struct core2_vpmu_context *core2_vpmu_cxt = NULL; - if ( !core2_vpmu_msr_common_check(ecx, &type, &index) ) + if ( !core2_vpmu_msr_common_check(msr, &type, &index) ) return 0; - msr_content = (u32)regs->eax | ((u64)regs->edx << 32); core2_vpmu_cxt = vpmu->context; - switch ( ecx ) + switch ( msr ) { case MSR_CORE_PERF_GLOBAL_OVF_CTRL: core2_vpmu_cxt->global_ovf_status &= ~msr_content; @@ -366,13 +364,13 @@ static int core2_vpmu_do_wrmsr(struct cp global_ctrl = msr_content; for ( i = 0; i < core2_get_pmc_count(); i++ ) { - rdmsrl(MSR_P6_EVNTSEL0+i, non_global_ctrl); + non_global_ctrl = rdmsr(MSR_P6_EVNTSEL0+i); core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i] = global_ctrl & (non_global_ctrl >> 22) & 1; global_ctrl >>= 1; } - rdmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, non_global_ctrl); + non_global_ctrl = rdmsr(MSR_CORE_PERF_FIXED_CTR_CTRL); global_ctrl = msr_content >> 32; for ( i = 0; i < 3; i++ ) { @@ -395,7 +393,7 @@ static int core2_vpmu_do_wrmsr(struct cp } break; default: - tmp = ecx - MSR_P6_EVNTSEL0; + tmp = msr - MSR_P6_EVNTSEL0; vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl); if ( tmp >= 0 && tmp < core2_get_pmc_count() ) core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] = @@ -445,7 +443,7 @@ static int core2_vpmu_do_wrmsr(struct cp if (inject_gp) vmx_inject_hw_exception(TRAP_gp_fault, 0); else - wrmsrl(ecx, msr_content); + wrmsr(msr, msr_content); } else vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content); @@ -453,7 +451,7 @@ static int core2_vpmu_do_wrmsr(struct cp return 1; } -static int core2_vpmu_do_rdmsr(struct cpu_user_regs *regs) +static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_val) { u64 msr_content = 0; int type = -1, index = -1; @@ -461,11 +459,11 @@ static int core2_vpmu_do_rdmsr(struct cp struct vpmu_struct *vpmu = vcpu_vpmu(v); struct core2_vpmu_context *core2_vpmu_cxt = NULL; - if ( !core2_vpmu_msr_common_check(regs->ecx, &type, &index) ) + if ( !core2_vpmu_msr_common_check(msr, &type, &index) ) return 0; core2_vpmu_cxt = vpmu->context; - switch ( regs->ecx ) + switch ( msr ) { case MSR_CORE_PERF_GLOBAL_OVF_CTRL: break; @@ -476,11 +474,10 @@ static int core2_vpmu_do_rdmsr(struct cp vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &msr_content); break; default: - rdmsrl(regs->ecx, msr_content); + msr_content = rdmsr(msr); } - regs->eax = msr_content & 0xFFFFFFFF; - regs->edx = msr_content >> 32; + *msr_val = msr_content; return 1; } @@ -494,12 +491,12 @@ static int core2_vpmu_do_interrupt(struc struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context; struct vlapic *vlapic = vcpu_vlapic(v); - rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, msr_content); + msr_content = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS); if ( !msr_content ) return 0; core2_vpmu_cxt->global_ovf_status |= msr_content; msr_content = 0xC000000700000000 | ((1 << core2_get_pmc_count()) - 1); - wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, msr_content); + wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, msr_content); apic_write_around(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); diff -r 7994e7c5991e xen/arch/x86/hvm/vpmu.c --- a/xen/arch/x86/hvm/vpmu.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/hvm/vpmu.c Wed May 26 18:32:03 2010 +0200 @@ -36,21 +36,21 @@ static int __read_mostly opt_vpmu_enabled; boolean_param("vpmu", opt_vpmu_enabled); -int vpmu_do_wrmsr(struct cpu_user_regs *regs) +int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content) { struct vpmu_struct *vpmu = vcpu_vpmu(current); if ( vpmu->arch_vpmu_ops ) - return vpmu->arch_vpmu_ops->do_wrmsr(regs); + return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content); return 0; } -int vpmu_do_rdmsr(struct cpu_user_regs *regs) +int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_val) { struct vpmu_struct *vpmu = vcpu_vpmu(current); if ( vpmu->arch_vpmu_ops ) - return vpmu->arch_vpmu_ops->do_rdmsr(regs); + return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_val); return 0; } diff -r 7994e7c5991e xen/arch/x86/microcode_amd.c --- a/xen/arch/x86/microcode_amd.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/microcode_amd.c Wed May 26 18:32:03 2010 +0200 @@ -47,7 +47,6 @@ struct equiv_cpu_entry *equiv_cpu_table; static int collect_cpu_info(int cpu, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data[cpu]; - uint32_t dummy; memset(csig, 0, sizeof(*csig)); @@ -58,7 +57,7 @@ static int collect_cpu_info(int cpu, str return -EINVAL; } - rdmsr(MSR_AMD_PATCHLEVEL, csig->rev, dummy); + csig->rev = rdmsr(MSR_AMD_PATCHLEVEL); printk(KERN_INFO "microcode: collect_cpu_info: patch_id=0x%x\n", csig->rev); @@ -126,7 +125,7 @@ static int apply_microcode(int cpu) { unsigned long flags; struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu); - uint32_t rev, dummy; + uint32_t rev; struct microcode_amd *mc_amd = uci->mc.mc_amd; /* We should bind the task to the CPU */ @@ -137,10 +136,10 @@ static int apply_microcode(int cpu) spin_lock_irqsave(µcode_update_lock, flags); - wrmsrl(MSR_AMD_PATCHLOADER, (unsigned long)&mc_amd->hdr.data_code); + wrmsr(MSR_AMD_PATCHLOADER, (unsigned long)&mc_amd->hdr.data_code); /* get patch id after patching */ - rdmsr(MSR_AMD_PATCHLEVEL, rev, dummy); + rev = rdmsr(MSR_AMD_PATCHLEVEL); spin_unlock_irqrestore(µcode_update_lock, flags); diff -r 7994e7c5991e xen/arch/x86/microcode_intel.c --- a/xen/arch/x86/microcode_intel.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/microcode_intel.c Wed May 26 18:32:03 2010 +0200 @@ -62,6 +62,7 @@ static DEFINE_SPINLOCK(microcode_update_ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data[cpu_num]; + uint64_t msr_content; unsigned int val[2]; BUG_ON(cpu_num != smp_processor_id()); @@ -81,15 +82,17 @@ static int collect_cpu_info(int cpu_num, if ( (c->x86_model >= 5) || (c->x86 > 6) ) { /* get processor flags from MSR 0x17 */ - rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); + msr_content = rdmsr(MSR_IA32_PLATFORM_ID); + val[1] = msr_content >> 32; csig->pf = 1 << ((val[1] >> 18) & 7); } - wrmsr(MSR_IA32_UCODE_REV, 0, 0); + wrmsr(MSR_IA32_UCODE_REV, 0); /* see notes above for revision 1.07. Apparent chip bug */ sync_core(); /* get the current revision from MSR 0x8B */ - rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev); + msr_content = rdmsr(MSR_IA32_UCODE_REV); + csig->rev = (uint32_t)msr_content; pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n", csig->sig, csig->pf, csig->rev); @@ -250,6 +253,7 @@ static int apply_microcode(int cpu) { unsigned long flags; unsigned int val[2]; + uint64_t msr_content; int cpu_num = raw_smp_processor_id(); struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu_num); @@ -263,16 +267,15 @@ static int apply_microcode(int cpu) spin_lock_irqsave(µcode_update_lock, flags); /* write microcode via MSR 0x79 */ - wrmsr(MSR_IA32_UCODE_WRITE, - (unsigned long) uci->mc.mc_intel->bits, - (unsigned long) uci->mc.mc_intel->bits >> 16 >> 16); - wrmsr(MSR_IA32_UCODE_REV, 0, 0); + wrmsr(MSR_IA32_UCODE_WRITE, (unsigned long) uci->mc.mc_intel->bits); + wrmsr(MSR_IA32_UCODE_REV, 0); /* see notes above for revision 1.07. Apparent chip bug */ sync_core(); /* get the current revision from MSR 0x8B */ - rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); + msr_content = rdmsr(MSR_IA32_UCODE_REV); + val[1] = msr_content >> 32; spin_unlock_irqrestore(µcode_update_lock, flags); if ( val[1] != uci->mc.mc_intel->hdr.rev ) diff -r 7994e7c5991e xen/arch/x86/nmi.c --- a/xen/arch/x86/nmi.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/nmi.c Wed May 26 18:32:03 2010 +0200 @@ -147,7 +147,7 @@ static void disable_lapic_nmi_watchdog(v return; switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_AMD: - wrmsr(MSR_K7_EVNTSEL0, 0, 0); + wrmsr(MSR_K7_EVNTSEL0, 0); break; case X86_VENDOR_INTEL: switch (boot_cpu_data.x86) { @@ -155,14 +155,14 @@ static void disable_lapic_nmi_watchdog(v if (boot_cpu_data.x86_model > 0xd) break; - wrmsr(MSR_P6_EVNTSEL0, 0, 0); + wrmsr(MSR_P6_EVNTSEL0, 0); break; case 15: if (boot_cpu_data.x86_model > 0x4) break; - wrmsr(MSR_P4_IQ_CCCR0, 0, 0); - wrmsr(MSR_P4_CRU_ESCR0, 0, 0); + wrmsr(MSR_P4_IQ_CCCR0, 0); + wrmsr(MSR_P4_CRU_ESCR0, 0); break; } break; @@ -219,7 +219,7 @@ static void __pminit clear_msr_range(uns unsigned int i; for (i = 0; i < n; i++) - wrmsr(base+i, 0, 0); + wrmsr(base+i, 0); } static inline void write_watchdog_counter(const char *descr) @@ -229,7 +229,7 @@ static inline void write_watchdog_counte do_div(count, nmi_hz); if(descr) Dprintk("setting %s to -0x%"PRIx64"\n", descr, count); - wrmsrl(nmi_perfctr_msr, 0 - count); + wrmsr(nmi_perfctr_msr, 0 - count); } static void __pminit setup_k7_watchdog(void) @@ -246,11 +246,11 @@ static void __pminit setup_k7_watchdog(v | K7_EVNTSEL_USR | K7_NMI_EVENT; - wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); + wrmsr(MSR_K7_EVNTSEL0, evntsel); write_watchdog_counter("K7_PERFCTR0"); apic_write(APIC_LVTPC, APIC_DM_NMI); evntsel |= K7_EVNTSEL_ENABLE; - wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); + wrmsr(MSR_K7_EVNTSEL0, evntsel); } static void __pminit setup_p6_watchdog(unsigned counter) @@ -267,18 +267,18 @@ static void __pminit setup_p6_watchdog(u | P6_EVNTSEL_USR | counter; - wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); + wrmsr(MSR_P6_EVNTSEL0, evntsel); write_watchdog_counter("P6_PERFCTR0"); apic_write(APIC_LVTPC, APIC_DM_NMI); evntsel |= P6_EVNTSEL0_ENABLE; - wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); + wrmsr(MSR_P6_EVNTSEL0, evntsel); } static int __pminit setup_p4_watchdog(void) { - unsigned int misc_enable, dummy; + uint64_t misc_enable; - rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy); + misc_enable = rdmsr(MSR_IA32_MISC_ENABLE); if (!(misc_enable & MSR_IA32_MISC_ENABLE_PERF_AVAIL)) return 0; @@ -304,11 +304,11 @@ static int __pminit setup_p4_watchdog(vo clear_msr_range(MSR_P4_BPU_CCCR0, 18); clear_msr_range(MSR_P4_BPU_PERFCTR0, 18); - wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); - wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); + wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0); + wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE); write_watchdog_counter("P4_IQ_COUNTER0"); apic_write(APIC_LVTPC, APIC_DM_NMI); - wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); + wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val); return 1; } @@ -442,7 +442,7 @@ void nmi_watchdog_tick(struct cpu_user_r * - LVTPC is masked on interrupt and must be * unmasked by the LVTPC handler. */ - wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); + wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val); apic_write(APIC_LVTPC, APIC_DM_NMI); } else if ( nmi_perfctr_msr == MSR_P6_PERFCTR0 ) diff -r 7994e7c5991e xen/arch/x86/oprofile/nmi_int.c --- a/xen/arch/x86/oprofile/nmi_int.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/oprofile/nmi_int.c Wed May 26 18:32:03 2010 +0200 @@ -35,14 +35,14 @@ static unsigned long saved_lvtpc[NR_CPUS static char *cpu_type; -static int passive_domain_msr_op_checks(struct cpu_user_regs *regs ,int *typep, int *indexp) +static int passive_domain_msr_op_checks(unsigned int msr,int *typep, int *indexp) { struct vpmu_struct *vpmu = vcpu_vpmu(current); if ( model == NULL ) return 0; if ( model->is_arch_pmu_msr == NULL ) return 0; - if ( !model->is_arch_pmu_msr((u64)regs->ecx, typep, indexp) ) + if ( !model->is_arch_pmu_msr(msr, typep, indexp) ) return 0; if ( !(vpmu->flags & PASSIVE_DOMAIN_ALLOCATED) ) @@ -51,29 +51,26 @@ static int passive_domain_msr_op_checks( return 1; } -int passive_domain_do_rdmsr(struct cpu_user_regs *regs) +int passive_domain_do_rdmsr(unsigned int msr, uint64_t *msr_val) { u64 msr_content; int type, index; - if ( !passive_domain_msr_op_checks(regs, &type, &index)) + if ( !passive_domain_msr_op_checks(msr, &type, &index)) return 0; model->load_msr(current, type, index, &msr_content); - regs->eax = msr_content & 0xFFFFFFFF; - regs->edx = msr_content >> 32; + *msr_val = msr_content; return 1; } -int passive_domain_do_wrmsr(struct cpu_user_regs *regs) +int passive_domain_do_wrmsr(unsigned int msr, uint64_t msr_content) { - u64 msr_content; int type, index; - if ( !passive_domain_msr_op_checks(regs, &type, &index)) + if ( !passive_domain_msr_op_checks(msr, &type, &index)) return 0; - msr_content = (u32)regs->eax | ((u64)regs->edx << 32); model->save_msr(current, type, index, msr_content); return 1; } @@ -109,15 +106,11 @@ static void nmi_cpu_save_registers(struc unsigned int i; for (i = 0; i < nr_ctrs; ++i) { - rdmsr(counters[i].addr, - counters[i].saved.low, - counters[i].saved.high); + counters[i].value = rdmsr(counters[i].addr); } for (i = 0; i < nr_ctrls; ++i) { - rdmsr(controls[i].addr, - controls[i].saved.low, - controls[i].saved.high); + controls[i].value = rdmsr(controls[i].addr); } } @@ -227,15 +220,11 @@ static void nmi_restore_registers(struct unsigned int i; for (i = 0; i < nr_ctrls; ++i) { - wrmsr(controls[i].addr, - controls[i].saved.low, - controls[i].saved.high); + wrmsr(controls[i].addr, controls[i].value); } for (i = 0; i < nr_ctrs; ++i) { - wrmsr(counters[i].addr, - counters[i].saved.low, - counters[i].saved.high); + wrmsr(counters[i].addr, counters[i].value); } } diff -r 7994e7c5991e xen/arch/x86/oprofile/op_model_athlon.c --- a/xen/arch/x86/oprofile/op_model_athlon.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/oprofile/op_model_athlon.c Wed May 26 18:32:03 2010 +0200 @@ -26,15 +26,15 @@ #define NUM_COUNTERS 4 #define NUM_CONTROLS 4 -#define CTR_READ(l,h,msrs,c) do {rdmsr(msrs->counters[(c)].addr, (l), (h));} while (0) -#define CTR_WRITE(l,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1);} while (0) +#define CTR_READ(msr_content,msrs,c) (msr_content) = rdmsr(msrs->counters[(c)].addr) +#define CTR_WRITE(msr_content,msrs,c) do {wrmsr(msrs->counters[(c)].addr, -(msr_content));} while (0) #define CTR_OVERFLOWED(n) (!((n) & (1U<<31))) -#define CTRL_READ(l,h,msrs,c) do {rdmsr(msrs->controls[(c)].addr, (l), (h));} while (0) -#define CTRL_WRITE(l,h,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (l), (h));} while (0) +#define CTRL_READ(msr_content,msrs,c) (msr_content) = rdmsr(msrs->controls[(c)].addr) +#define CTRL_WRITE(msr_content,msrs,c) do {wrmsr(msrs->controls[(c)].addr, (msr_content));} while (0) #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) -#define CTRL_CLEAR(lo, hi) (lo &= (1<<21), hi = 0) +#define CTRL_CLEAR(val) (val &= (1<<21)) #define CTRL_SET_ENABLE(val) (val |= 1<<20) #define CTRL_SET_USR(val,u) (val |= ((u & 1) << 16)) #define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) @@ -64,14 +64,15 @@ static void athlon_fill_in_addresses(str static void athlon_setup_ctrs(struct op_msrs const * const msrs) { - unsigned int low, high; + uint64_t msr_content; + uint32_t high; int i; /* clear all counters */ for (i = 0 ; i < NUM_CONTROLS; ++i) { - CTRL_READ(low, high, msrs, i); - CTRL_CLEAR(low, high); - CTRL_WRITE(low, high, msrs, i); + CTRL_READ(msr_content, msrs, i); + CTRL_CLEAR(msr_content); + CTRL_WRITE(msr_content, msrs, i); } /* avoid a false detection of ctr overflows in NMI handler */ @@ -86,17 +87,19 @@ static void athlon_setup_ctrs(struct op_ CTR_WRITE(counter_config[i].count, msrs, i); - CTRL_READ(low, high, msrs, i); - CTRL_CLEAR(low, high); - CTRL_SET_ENABLE(low); - CTRL_SET_USR(low, counter_config[i].user); - CTRL_SET_KERN(low, counter_config[i].kernel); - CTRL_SET_UM(low, counter_config[i].unit_mask); - CTRL_SET_EVENT_LOW(low, counter_config[i].event); + CTRL_READ(msr_content, msrs, i); + CTRL_CLEAR(msr_content); + CTRL_SET_ENABLE(msr_content); + CTRL_SET_USR(msr_content, counter_config[i].user); + CTRL_SET_KERN(msr_content, counter_config[i].kernel); + CTRL_SET_UM(msr_content, counter_config[i].unit_mask); + CTRL_SET_EVENT_LOW(msr_content, counter_config[i].event); + high = (uint32_t)(msr_content >> 32); CTRL_SET_EVENT_HIGH(high, counter_config[i].event); CTRL_SET_HOST_ONLY(high, 0); CTRL_SET_GUEST_ONLY(high, 0); - CTRL_WRITE(low, high, msrs, i); + msr_content = (uint32_t)msr_content | ((uint64_t)(high) << 32); + CTRL_WRITE(msr_content, msrs, i); } else { reset_value[i] = 0; } @@ -108,7 +111,7 @@ static int athlon_check_ctrs(unsigned in struct cpu_user_regs * const regs) { - unsigned int low, high; + uint64_t msr_content; int i; int ovf = 0; unsigned long eip = regs->eip; @@ -128,8 +131,8 @@ static int athlon_check_ctrs(unsigned in } for (i = 0 ; i < NUM_COUNTERS; ++i) { - CTR_READ(low, high, msrs, i); - if (CTR_OVERFLOWED(low)) { + CTR_READ(msr_content, msrs, i); + if (CTR_OVERFLOWED(msr_content)) { xenoprof_log_event(current, regs, eip, mode, i); CTR_WRITE(reset_value[i], msrs, i); ovf = 1; @@ -143,13 +146,13 @@ static int athlon_check_ctrs(unsigned in static void athlon_start(struct op_msrs const * const msrs) { - unsigned int low, high; + uint64_t msr_content; int i; for (i = 0 ; i < NUM_COUNTERS ; ++i) { if (reset_value[i]) { - CTRL_READ(low, high, msrs, i); - CTRL_SET_ACTIVE(low); - CTRL_WRITE(low, high, msrs, i); + CTRL_READ(msr_content, msrs, i); + CTRL_SET_ACTIVE(msr_content); + CTRL_WRITE(msr_content, msrs, i); } } } @@ -157,15 +160,15 @@ static void athlon_start(struct op_msrs static void athlon_stop(struct op_msrs const * const msrs) { - unsigned int low,high; + uint64_t msr_content; int i; /* Subtle: stop on all counters to avoid race with * setting our pm callback */ for (i = 0 ; i < NUM_COUNTERS ; ++i) { - CTRL_READ(low, high, msrs, i); - CTRL_SET_INACTIVE(low); - CTRL_WRITE(low, high, msrs, i); + CTRL_READ(msr_content, msrs, i); + CTRL_SET_INACTIVE(msr_content); + CTRL_WRITE(msr_content, msrs, i); } } diff -r 7994e7c5991e xen/arch/x86/oprofile/op_model_p4.c --- a/xen/arch/x86/oprofile/op_model_p4.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/oprofile/op_model_p4.c Wed May 26 18:32:03 2010 +0200 @@ -357,8 +357,8 @@ static const struct p4_event_binding p4_ #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) -#define ESCR_READ(escr,high,ev,i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) -#define ESCR_WRITE(escr,high,ev,i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) +#define ESCR_READ(msr_content,ev,i) (msr_content) = rdmsr(ev->bindings[(i)].escr_address) +#define ESCR_WRITE(msr_content,ev,i) do {wrmsr(ev->bindings[(i)].escr_address, msr_content);} while (0) #define CCCR_RESERVED_BITS 0x38030FFF #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) @@ -368,13 +368,13 @@ static const struct p4_event_binding p4_ #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) -#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) -#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) +#define CCCR_READ(msr_content, i) (msr_content) = rdmsr(p4_counters[(i)].cccr_address) +#define CCCR_WRITE(msr_content, i) do {wrmsr(p4_counters[(i)].cccr_address, (msr_content));} while (0) #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) -#define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0) -#define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0) +#define CTR_READ(msr_content,i) (msr_content) = rdmsr(p4_counters[(i)].counter_address) +#define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u64)(l));} while (0) #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) @@ -481,9 +481,8 @@ static void pmc_setup_one_p4_counter(uns { int i; int const maxbind = 2; - unsigned int cccr = 0; - unsigned int escr = 0; - unsigned int high = 0; + uint64_t cccr = 0; + uint64_t escr = 0; unsigned int counter_bit; const struct p4_event_binding *ev = NULL; unsigned int stag; @@ -507,7 +506,7 @@ static void pmc_setup_one_p4_counter(uns if (ev->bindings[i].virt_counter & counter_bit) { /* modify ESCR */ - ESCR_READ(escr, high, ev, i); + ESCR_READ(escr, ev, i); ESCR_CLEAR(escr); if (stag == 0) { ESCR_SET_USR_0(escr, counter_config[ctr].user); @@ -518,10 +517,10 @@ static void pmc_setup_one_p4_counter(uns } ESCR_SET_EVENT_SELECT(escr, ev->event_select); ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); - ESCR_WRITE(escr, high, ev, i); + ESCR_WRITE(escr, ev, i); /* modify CCCR */ - CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); + CCCR_READ(cccr, VIRT_CTR(stag, ctr)); CCCR_CLEAR(cccr); CCCR_SET_REQUIRED_BITS(cccr); CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); @@ -530,7 +529,7 @@ static void pmc_setup_one_p4_counter(uns } else { CCCR_SET_PMI_OVF_1(cccr); } - CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); + CCCR_WRITE(cccr, VIRT_CTR(stag, ctr)); return; } } @@ -544,68 +543,68 @@ static void pmc_setup_one_p4_counter(uns static void p4_setup_ctrs(struct op_msrs const * const msrs) { unsigned int i; - unsigned int low, high; + uint64_t msr_content; unsigned int addr; unsigned int stag; stag = get_stagger(); - rdmsr(MSR_IA32_MISC_ENABLE, low, high); - if (! MISC_PMC_ENABLED_P(low)) { + msr_content = rdmsr(MSR_IA32_MISC_ENABLE); + if (! MISC_PMC_ENABLED_P(msr_content)) { printk(KERN_ERR "oprofile: P4 PMC not available\n"); return; } /* clear the cccrs we will use */ for (i = 0 ; i < num_counters ; i++) { - rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); - CCCR_CLEAR(low); - CCCR_SET_REQUIRED_BITS(low); - wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); + msr_content = rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address); + CCCR_CLEAR(msr_content); + CCCR_SET_REQUIRED_BITS(msr_content); + wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, msr_content); } /* clear cccrs outside our concern */ for (i = stag ; i < NUM_UNUSED_CCCRS ; i += addr_increment()) { - rdmsr(p4_unused_cccr[i], low, high); - CCCR_CLEAR(low); - CCCR_SET_REQUIRED_BITS(low); - wrmsr(p4_unused_cccr[i], low, high); + msr_content = rdmsr(p4_unused_cccr[i]); + CCCR_CLEAR(msr_content); + CCCR_SET_REQUIRED_BITS(msr_content); + wrmsr(p4_unused_cccr[i], msr_content); } /* clear all escrs (including those outside our concern) */ for (addr = MSR_P4_BSU_ESCR0 + stag; addr < MSR_P4_IQ_ESCR0; addr += addr_increment()) { - wrmsr(addr, 0, 0); + wrmsr(addr, 0); } /* On older models clear also MSR_P4_IQ_ESCR0/1 */ if (boot_cpu_data.x86_model < 0x3) { - wrmsr(MSR_P4_IQ_ESCR0, 0, 0); - wrmsr(MSR_P4_IQ_ESCR1, 0, 0); + wrmsr(MSR_P4_IQ_ESCR0, 0); + wrmsr(MSR_P4_IQ_ESCR1, 0); } for (addr = MSR_P4_RAT_ESCR0 + stag; addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) { - wrmsr(addr, 0, 0); + wrmsr(addr, 0); } for (addr = MSR_P4_MS_ESCR0 + stag; addr <= MSR_P4_TC_ESCR1; addr += addr_increment()){ - wrmsr(addr, 0, 0); + wrmsr(addr, 0); } for (addr = MSR_P4_IX_ESCR0 + stag; addr <= MSR_P4_CRU_ESCR3; addr += addr_increment()){ - wrmsr(addr, 0, 0); + wrmsr(addr, 0); } if (num_counters == NUM_COUNTERS_NON_HT) { - wrmsr(MSR_P4_CRU_ESCR4, 0, 0); - wrmsr(MSR_P4_CRU_ESCR5, 0, 0); + wrmsr(MSR_P4_CRU_ESCR4, 0); + wrmsr(MSR_P4_CRU_ESCR5, 0); } else if (stag == 0) { - wrmsr(MSR_P4_CRU_ESCR4, 0, 0); + wrmsr(MSR_P4_CRU_ESCR4, 0); } else { - wrmsr(MSR_P4_CRU_ESCR5, 0, 0); + wrmsr(MSR_P4_CRU_ESCR5, 0); } /* setup all counters */ @@ -624,7 +623,8 @@ static int p4_check_ctrs(unsigned int co struct op_msrs const * const msrs, struct cpu_user_regs * const regs) { - unsigned long ctr, low, high, stag, real; + unsigned long ctr, stag, real; + uint64_t msr_content; int i; int ovf = 0; unsigned long eip = regs->eip; @@ -656,13 +656,13 @@ static int p4_check_ctrs(unsigned int co real = VIRT_CTR(stag, i); - CCCR_READ(low, high, real); - CTR_READ(ctr, high, real); - if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { + CCCR_READ(msr_content, real); + CTR_READ(ctr, real); + if (CCCR_OVF_P(msr_content) || CTR_OVERFLOW_P(ctr)) { xenoprof_log_event(current, regs, eip, mode, i); CTR_WRITE(reset_value[i], real); - CCCR_CLEAR_OVF(low); - CCCR_WRITE(low, high, real); + CCCR_CLEAR_OVF(msr_content); + CCCR_WRITE(msr_content, real); CTR_WRITE(reset_value[i], real); ovf = 1; } @@ -677,7 +677,8 @@ static int p4_check_ctrs(unsigned int co static void p4_start(struct op_msrs const * const msrs) { - unsigned int low, high, stag; + unsigned int stag; + uint64_t msr_content; int i; stag = get_stagger(); @@ -685,24 +686,25 @@ static void p4_start(struct op_msrs cons for (i = 0; i < num_counters; ++i) { if (!reset_value[i]) continue; - CCCR_READ(low, high, VIRT_CTR(stag, i)); - CCCR_SET_ENABLE(low); - CCCR_WRITE(low, high, VIRT_CTR(stag, i)); + CCCR_READ(msr_content, VIRT_CTR(stag, i)); + CCCR_SET_ENABLE(msr_content); + CCCR_WRITE(msr_content, VIRT_CTR(stag, i)); } } static void p4_stop(struct op_msrs const * const msrs) { - unsigned int low, high, stag; + unsigned int stag; + uint64_t msr_content; int i; stag = get_stagger(); for (i = 0; i < num_counters; ++i) { - CCCR_READ(low, high, VIRT_CTR(stag, i)); - CCCR_SET_DISABLE(low); - CCCR_WRITE(low, high, VIRT_CTR(stag, i)); + CCCR_READ(msr_content, VIRT_CTR(stag, i)); + CCCR_SET_DISABLE(msr_content); + CCCR_WRITE(msr_content, VIRT_CTR(stag, i)); } } diff -r 7994e7c5991e xen/arch/x86/oprofile/op_model_ppro.c --- a/xen/arch/x86/oprofile/op_model_ppro.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/oprofile/op_model_ppro.c Wed May 26 18:32:03 2010 +0200 @@ -43,8 +43,8 @@ static int counter_width = 32; #define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1)))) -#define CTRL_READ(l,h,msrs,c) do {rdmsr((msrs->controls[(c)].addr), (l), (h));} while (0) -#define CTRL_WRITE(l,h,msrs,c) do {wrmsr((msrs->controls[(c)].addr), (l), (h));} while (0) +#define CTRL_READ(msr_content,msrs,c) (msr_content) = rdmsr((msrs->controls[(c)].addr)) +#define CTRL_WRITE(msr_content,msrs,c) do {wrmsr((msrs->controls[(c)].addr), (msr_content));} while (0) #define CTRL_SET_ACTIVE(n) (n |= (1<<22)) #define CTRL_SET_INACTIVE(n) (n &= ~(1<<22)) #define CTRL_CLEAR(x) (x &= (1<<21)) @@ -71,7 +71,7 @@ static void ppro_fill_in_addresses(struc static void ppro_setup_ctrs(struct op_msrs const * const msrs) { - unsigned int low, high; + uint64_t msr_content; int i; if (cpu_has_arch_perfmon) { @@ -93,30 +93,30 @@ static void ppro_setup_ctrs(struct op_ms /* clear all counters */ for (i = 0 ; i < num_counters; ++i) { - CTRL_READ(low, high, msrs, i); - CTRL_CLEAR(low); - CTRL_WRITE(low, high, msrs, i); + CTRL_READ(msr_content, msrs, i); + CTRL_CLEAR(msr_content); + CTRL_WRITE(msr_content, msrs, i); } /* avoid a false detection of ctr overflows in NMI handler */ for (i = 0; i < num_counters; ++i) - wrmsrl(msrs->counters[i].addr, -1LL); + wrmsr(msrs->counters[i].addr, -1LL); /* enable active counters */ for (i = 0; i < num_counters; ++i) { if (counter_config[i].enabled) { reset_value[i] = counter_config[i].count; - wrmsrl(msrs->counters[i].addr, -reset_value[i]); + wrmsr(msrs->counters[i].addr, -reset_value[i]); - CTRL_READ(low, high, msrs, i); - CTRL_CLEAR(low); - CTRL_SET_ENABLE(low); - CTRL_SET_USR(low, counter_config[i].user); - CTRL_SET_KERN(low, counter_config[i].kernel); - CTRL_SET_UM(low, counter_config[i].unit_mask); - CTRL_SET_EVENT(low, counter_config[i].event); - CTRL_WRITE(low, high, msrs, i); + CTRL_READ(msr_content, msrs, i); + CTRL_CLEAR(msr_content); + CTRL_SET_ENABLE(msr_content); + CTRL_SET_USR(msr_content, counter_config[i].user); + CTRL_SET_KERN(msr_content, counter_config[i].kernel); + CTRL_SET_UM(msr_content, counter_config[i].unit_mask); + CTRL_SET_EVENT(msr_content, counter_config[i].event); + CTRL_WRITE(msr_content, msrs, i); } else { reset_value[i] = 0; } @@ -137,10 +137,10 @@ static int ppro_check_ctrs(unsigned int for (i = 0 ; i < num_counters; ++i) { if (!reset_value[i]) continue; - rdmsrl(msrs->counters[i].addr, val); + val = rdmsr(msrs->counters[i].addr); if (CTR_OVERFLOWED(val)) { xenoprof_log_event(current, regs, eip, mode, i); - wrmsrl(msrs->counters[i].addr, -reset_value[i]); + wrmsr(msrs->counters[i].addr, -reset_value[i]); if ( is_passive(current->domain) && (mode != 2) && (vcpu_vpmu(current)->flags & PASSIVE_DOMAIN_ALLOCATED) ) { @@ -166,38 +166,38 @@ static int ppro_check_ctrs(unsigned int static void ppro_start(struct op_msrs const * const msrs) { - unsigned int low,high; + uint64_t msr_content; int i; for (i = 0; i < num_counters; ++i) { if (reset_value[i]) { - CTRL_READ(low, high, msrs, i); - CTRL_SET_ACTIVE(low); - CTRL_WRITE(low, high, msrs, i); + CTRL_READ(msr_content, msrs, i); + CTRL_SET_ACTIVE(msr_content); + CTRL_WRITE(msr_content, msrs, i); } } /* Global Control MSR is enabled by default when system power on. * However, this may not hold true when xenoprof starts to run. */ if ( ppro_has_global_ctrl ) - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, (1<cs, v, regs, &code_base, &code_limit, &ar, @@ -2201,32 +2200,32 @@ static int emulate_privileged_op(struct break; case 0x30: /* WRMSR */ { - u32 eax = regs->eax; - u32 edx = regs->edx; - u64 val = ((u64)edx << 32) | eax; + uint32_t eax = regs->eax; + uint32_t edx = regs->edx; + msr_content = ((uint64_t)regs->edx << 32) | regs->eax; switch ( (u32)regs->ecx ) { #ifdef CONFIG_X86_64 case MSR_FS_BASE: if ( is_pv_32on64_vcpu(v) ) goto fail; - if ( wrmsr_safe(MSR_FS_BASE, eax, edx) ) + if ( wrmsr_safe(MSR_FS_BASE, msr_content) ) goto fail; - v->arch.guest_context.fs_base = val; + v->arch.guest_context.fs_base = msr_content; break; case MSR_GS_BASE: if ( is_pv_32on64_vcpu(v) ) goto fail; - if ( wrmsr_safe(MSR_GS_BASE, eax, edx) ) + if ( wrmsr_safe(MSR_GS_BASE, msr_content) ) goto fail; - v->arch.guest_context.gs_base_kernel = val; + v->arch.guest_context.gs_base_kernel = msr_content; break; case MSR_SHADOW_GS_BASE: if ( is_pv_32on64_vcpu(v) ) goto fail; - if ( wrmsr_safe(MSR_SHADOW_GS_BASE, eax, edx) ) + if ( wrmsr_safe(MSR_SHADOW_GS_BASE, msr_content) ) goto fail; - v->arch.guest_context.gs_base_user = val; + v->arch.guest_context.gs_base_user = msr_content; break; #endif case MSR_K7_FID_VID_STATUS: @@ -2246,7 +2245,7 @@ static int emulate_privileged_op(struct goto fail; if ( !is_cpufreq_controller(v->domain) ) break; - if ( wrmsr_safe(regs->ecx, eax, edx) != 0 ) + if ( wrmsr_safe(regs->ecx, msr_content) != 0 ) goto fail; break; case MSR_AMD64_NB_CFG: @@ -2255,11 +2254,11 @@ static int emulate_privileged_op(struct goto fail; if ( !IS_PRIV(v->domain) ) break; - if ( (rdmsr_safe(MSR_AMD64_NB_CFG, l, h) != 0) || - (eax != l) || - ((edx ^ h) & ~(1 << (AMD64_NB_CFG_CF8_EXT_ENABLE_BIT - 32))) ) + if ( (rdmsr_safe(MSR_AMD64_NB_CFG, val) != 0) || + (eax != (uint32_t)val) || + ((edx ^ (val >> 32)) & ~(1 << (AMD64_NB_CFG_CF8_EXT_ENABLE_BIT - 32))) ) goto invalid; - if ( wrmsr_safe(MSR_AMD64_NB_CFG, eax, edx) != 0 ) + if ( wrmsr_safe(MSR_AMD64_NB_CFG, msr_content) != 0 ) goto fail; break; case MSR_FAM10H_MMIO_CONF_BASE: @@ -2268,15 +2267,15 @@ static int emulate_privileged_op(struct goto fail; if ( !IS_PRIV(v->domain) ) break; - if ( (rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, l, h) != 0) || - (((((u64)h << 32) | l) ^ val) & + if ( (rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, val) != 0) || + ((val ^ msr_content) & ~( FAM10H_MMIO_CONF_ENABLE | (FAM10H_MMIO_CONF_BUSRANGE_MASK << FAM10H_MMIO_CONF_BUSRANGE_SHIFT) | ((u64)FAM10H_MMIO_CONF_BASE_MASK << FAM10H_MMIO_CONF_BASE_SHIFT))) ) goto invalid; - if ( wrmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, eax, edx) != 0 ) + if ( wrmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, msr_content) != 0 ) goto fail; break; case MSR_IA32_MPERF: @@ -2286,7 +2285,7 @@ static int emulate_privileged_op(struct goto fail; if ( !is_cpufreq_controller(v->domain) ) break; - if ( wrmsr_safe(regs->ecx, eax, edx) != 0 ) + if ( wrmsr_safe(regs->ecx, msr_content) != 0 ) goto fail; break; case MSR_IA32_THERM_CONTROL: @@ -2294,25 +2293,25 @@ static int emulate_privileged_op(struct goto fail; if ( (v->domain->domain_id != 0) || !v->domain->is_pinned ) break; - if ( wrmsr_safe(regs->ecx, eax, edx) != 0 ) + if ( wrmsr_safe(regs->ecx, msr_content) != 0 ) goto fail; break; default: - if ( wrmsr_hypervisor_regs(regs->ecx, val) ) + if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) ) break; - rc = vmce_wrmsr(regs->ecx, val); + rc = vmce_wrmsr(regs->ecx, msr_content); if ( rc < 0 ) goto fail; if ( rc ) break; - if ( (rdmsr_safe(regs->ecx, l, h) != 0) || - (eax != l) || (edx != h) ) + if ( (rdmsr_safe(regs->ecx, val) != 0) || + (eax != (uint32_t)val) || (edx != (val>>32)) ) invalid: gdprintk(XENLOG_WARNING, "Domain attempted WRMSR %p from " - "%08x:%08x to %08x:%08x.\n", - _p(regs->ecx), h, l, edx, eax); + "%16"PRIx64" to %16"PRIx64".\n", + _p(regs->ecx), val, msr_content); break; } break; @@ -2371,12 +2370,16 @@ static int emulate_privileged_op(struct regs->eax = regs->edx = 0; break; } - if ( rdmsr_safe(regs->ecx, regs->eax, regs->edx) != 0 ) + if ( rdmsr_safe(regs->ecx, msr_content) != 0 ) goto fail; + regs->eax = (uint32_t)msr_content; + regs->edx = (uint32_t)(msr_content >> 32); break; case MSR_IA32_MISC_ENABLE: - if ( rdmsr_safe(regs->ecx, regs->eax, regs->edx) ) + if ( rdmsr_safe(regs->ecx, msr_content) != 0 ) goto fail; + regs->eax = (uint32_t)msr_content; + regs->edx = (uint32_t)(msr_content >> 32); regs->eax &= ~(MSR_IA32_MISC_ENABLE_PERF_AVAIL | MSR_IA32_MISC_ENABLE_MONITOR_ENABLE); regs->eax |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL | @@ -2403,8 +2406,10 @@ static int emulate_privileged_op(struct /* Everyone can read the MSR space. */ /* gdprintk(XENLOG_WARNING,"Domain attempted RDMSR %p.\n", _p(regs->ecx));*/ - if ( rdmsr_safe(regs->ecx, regs->eax, regs->edx) ) + if ( rdmsr_safe(regs->ecx, msr_content) != 0 ) goto fail; + regs->eax = (uint32_t)msr_content; + regs->edx = (uint32_t)(msr_content >> 32); break; } break; diff -r 7994e7c5991e xen/arch/x86/x86_32/traps.c --- a/xen/arch/x86/x86_32/traps.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/x86_32/traps.c Wed May 26 18:32:03 2010 +0200 @@ -124,10 +124,11 @@ void show_registers(struct cpu_user_regs if ( this_cpu(ler_msr) && !guest_mode(regs) ) { - u32 from, to, hi; - rdmsr(this_cpu(ler_msr), from, hi); - rdmsr(this_cpu(ler_msr) + 1, to, hi); - printk("ler: %08x -> %08x\n", from, to); + uint64_t msr_content_from, msr_content_to; + msr_content_from = rdmsr(this_cpu(ler_msr)); + msr_content_to = rdmsr(this_cpu(ler_msr) + 1); + printk("ler: %16"PRIx64" -> %16"PRIx64"\n", + msr_content_from, msr_content_to); } } @@ -403,8 +404,8 @@ static void do_update_sysenter(void *inf { xen_callback_t *address = info; - wrmsr(MSR_IA32_SYSENTER_CS, address->cs, 0); - wrmsr(MSR_IA32_SYSENTER_EIP, address->eip, 0); + wrmsr(MSR_IA32_SYSENTER_CS, address->cs); + wrmsr(MSR_IA32_SYSENTER_EIP, address->eip); } #endif diff -r 7994e7c5991e xen/arch/x86/x86_64/mm.c --- a/xen/arch/x86/x86_64/mm.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/x86_64/mm.c Wed May 26 18:32:03 2010 +0200 @@ -1079,21 +1079,21 @@ long do_set_segment_base(unsigned int wh switch ( which ) { case SEGBASE_FS: - if ( wrmsr_safe(MSR_FS_BASE, base, base>>32) ) + if ( wrmsr_safe(MSR_FS_BASE, base) ) ret = -EFAULT; else v->arch.guest_context.fs_base = base; break; case SEGBASE_GS_USER: - if ( wrmsr_safe(MSR_SHADOW_GS_BASE, base, base>>32) ) + if ( wrmsr_safe(MSR_SHADOW_GS_BASE, base) ) ret = -EFAULT; else v->arch.guest_context.gs_base_user = base; break; case SEGBASE_GS_KERNEL: - if ( wrmsr_safe(MSR_GS_BASE, base, base>>32) ) + if ( wrmsr_safe(MSR_GS_BASE, base) ) ret = -EFAULT; else v->arch.guest_context.gs_base_kernel = base; diff -r 7994e7c5991e xen/arch/x86/x86_64/mmconfig-shared.c --- a/xen/arch/x86/x86_64/mmconfig-shared.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/x86_64/mmconfig-shared.c Wed May 26 18:32:03 2010 +0200 @@ -125,8 +125,8 @@ static const char __init *pci_mmcfg_inte static const char __init *pci_mmcfg_amd_fam10h(void) { - u32 low, high, address; - u64 base, msr; + u32 address; + uint64_t base, msr_content; int i; unsigned segnbits = 0, busnbits; @@ -134,20 +134,17 @@ static const char __init *pci_mmcfg_amd_ return NULL; address = MSR_FAM10H_MMIO_CONF_BASE; - if (rdmsr_safe(address, low, high)) + if (rdmsr_safe(address, msr_content)) return NULL; - msr = high; - msr <<= 32; - msr |= low; - /* mmconfig is not enable */ - if (!(msr & FAM10H_MMIO_CONF_ENABLE)) + if (!(msr_content & FAM10H_MMIO_CONF_ENABLE)) return NULL; - base = msr & (FAM10H_MMIO_CONF_BASE_MASK<> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & + busnbits = (msr_content >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & FAM10H_MMIO_CONF_BUSRANGE_MASK; /* diff -r 7994e7c5991e xen/arch/x86/x86_64/traps.c --- a/xen/arch/x86/x86_64/traps.c Wed May 26 10:11:50 2010 +0200 +++ b/xen/arch/x86/x86_64/traps.c Wed May 26 18:32:03 2010 +0200 @@ -138,8 +138,8 @@ void show_registers(struct cpu_user_regs if ( this_cpu(ler_msr) && !guest_mode(regs) ) { u64 from, to; - rdmsrl(this_cpu(ler_msr), from); - rdmsrl(this_cpu(ler_msr) + 1, to); + from = rdmsr(this_cpu(ler_msr)); + to = rdmsr(this_cpu(ler_msr) + 1); printk("ler: %016lx -> %016lx\n", from, to); } } @@ -395,28 +395,27 @@ void __devinit subarch_percpu_traps_init /* Trampoline for SYSCALL entry from long mode. */ stack = &stack[IST_MAX * PAGE_SIZE]; /* Skip the IST stacks. */ - wrmsrl(MSR_LSTAR, (unsigned long)stack); + wrmsr(MSR_LSTAR, (unsigned long)stack); stack += write_stack_trampoline(stack, stack_bottom, FLAT_KERNEL_CS64); if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) { /* SYSENTER entry. */ - wrmsrl(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom); - wrmsrl(MSR_IA32_SYSENTER_EIP, (unsigned long)sysenter_entry); - wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS, 0); + wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)stack_bottom); + wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)sysenter_entry); + wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS); } /* Trampoline for SYSCALL entry from compatibility mode. */ stack = (char *)L1_CACHE_ALIGN((unsigned long)stack); - wrmsrl(MSR_CSTAR, (unsigned long)stack); + wrmsr(MSR_CSTAR, (unsigned long)stack); stack += write_stack_trampoline(stack, stack_bottom, FLAT_USER_CS32); /* Common SYSCALL parameters. */ - wrmsr(MSR_STAR, 0, (FLAT_RING3_CS32<<16) | __HYPERVISOR_CS); + wrmsr(MSR_STAR, ((uint64_t)(FLAT_RING3_CS32<<16) | __HYPERVISOR_CS) << 32); wrmsr(MSR_SYSCALL_MASK, X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT| - X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_TF, - 0U); + X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_TF); } void init_int80_direct_trap(struct vcpu *v) diff -r 7994e7c5991e xen/include/asm-x86/apic.h --- a/xen/include/asm-x86/apic.h Wed May 26 10:11:50 2010 +0200 +++ b/xen/include/asm-x86/apic.h Wed May 26 18:32:03 2010 +0200 @@ -4,7 +4,7 @@ #include #include #include -#include +#include #define Dprintk(x...) @@ -76,55 +76,44 @@ static __inline u32 apic_mem_read(unsign * access the 64-bit ICR register. */ -static __inline void apic_wrmsr(unsigned long reg, u32 low, u32 high) +static __inline void apic_wrmsr(unsigned long reg, uint64_t msr_content) { if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || reg == APIC_LVR) return; - __asm__ __volatile__("wrmsr" - : /* no outputs */ - : "c" (APIC_MSR_BASE + (reg >> 4)), "a" (low), "d" (high)); + wrmsr(APIC_MSR_BASE + (reg >> 4), msr_content); } -static __inline void apic_rdmsr(unsigned long reg, u32 *low, u32 *high) +static __inline uint64_t apic_rdmsr(unsigned long reg) { if (reg == APIC_DFR) - { - *low = *high = -1u; - return; - } - __asm__ __volatile__("rdmsr" - : "=a" (*low), "=d" (*high) - : "c" (APIC_MSR_BASE + (reg >> 4))); + return -1u; + return rdmsr(APIC_MSR_BASE + (reg >> 4)); } -static __inline void apic_write(unsigned long reg, u32 v) +static __inline void apic_write(unsigned long reg, uint32_t v) { - if ( x2apic_enabled ) - apic_wrmsr(reg, v, 0); + apic_wrmsr(reg, v); else apic_mem_write(reg, v); } -static __inline void apic_write_atomic(unsigned long reg, u32 v) +static __inline void apic_write_atomic(unsigned long reg, uint32_t v) { if ( x2apic_enabled ) - apic_wrmsr(reg, v, 0); + apic_wrmsr(reg, v); else apic_mem_write_atomic(reg, v); } -static __inline u32 apic_read(unsigned long reg) +static __inline uint32_t apic_read(unsigned long reg) { - u32 lo, hi; - if ( x2apic_enabled ) - apic_rdmsr(reg, &lo, &hi); + return apic_rdmsr(reg); else - lo = apic_mem_read(reg); - return lo; + return apic_mem_read(reg); } static __inline u64 apic_icr_read(void) @@ -132,7 +121,7 @@ static __inline u64 apic_icr_read(void) u32 lo, hi; if ( x2apic_enabled ) - apic_rdmsr(APIC_ICR, &lo, &hi); + return apic_rdmsr(APIC_ICR); else { lo = apic_mem_read(APIC_ICR); @@ -145,7 +134,7 @@ static __inline u64 apic_icr_read(void) static __inline void apic_icr_write(u32 low, u32 dest) { if ( x2apic_enabled ) - apic_wrmsr(APIC_ICR, low, dest); + apic_wrmsr(APIC_ICR, low | ((uint64_t)(dest) << 32)); else { apic_mem_write(APIC_ICR2, dest << 24); diff -r 7994e7c5991e xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Wed May 26 10:11:50 2010 +0200 +++ b/xen/include/asm-x86/hvm/hvm.h Wed May 26 18:32:03 2010 +0200 @@ -130,8 +130,8 @@ struct hvm_function_table { unsigned int *ecx, unsigned int *edx); void (*wbinvd_intercept)(void); void (*fpu_dirty_intercept)(void); - int (*msr_read_intercept)(struct cpu_user_regs *regs); - int (*msr_write_intercept)(struct cpu_user_regs *regs); + int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_val); + int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content); void (*invlpg_intercept)(unsigned long vaddr); void (*set_uc_mode)(struct vcpu *v); void (*set_info_guest)(struct vcpu *v); diff -r 7994e7c5991e xen/include/asm-x86/hvm/support.h --- a/xen/include/asm-x86/hvm/support.h Wed May 26 10:11:50 2010 +0200 +++ b/xen/include/asm-x86/hvm/support.h Wed May 26 18:32:03 2010 +0200 @@ -133,7 +133,7 @@ int hvm_set_efer(uint64_t value); int hvm_set_cr0(unsigned long value); int hvm_set_cr3(unsigned long value); int hvm_set_cr4(unsigned long value); -int hvm_msr_read_intercept(struct cpu_user_regs *regs); -int hvm_msr_write_intercept(struct cpu_user_regs *regs); +int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_val); +int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content); #endif /* __ASM_X86_HVM_SUPPORT_H__ */ diff -r 7994e7c5991e xen/include/asm-x86/hvm/vpmu.h --- a/xen/include/asm-x86/hvm/vpmu.h Wed May 26 10:11:50 2010 +0200 +++ b/xen/include/asm-x86/hvm/vpmu.h Wed May 26 18:32:03 2010 +0200 @@ -47,8 +47,8 @@ struct msr_load_store_entry { /* Arch specific operations shared by all vpmus */ struct arch_vpmu_ops { - int (*do_wrmsr)(struct cpu_user_regs *regs); - int (*do_rdmsr)(struct cpu_user_regs *regs); + int (*do_wrmsr)(unsigned int msr, uint64_t msr_content); + int (*do_rdmsr)(unsigned int msr, uint64_t *msr_val); int (*do_interrupt)(struct cpu_user_regs *regs); void (*arch_vpmu_initialise)(struct vcpu *v); void (*arch_vpmu_destroy)(struct vcpu *v); @@ -66,8 +66,8 @@ struct vpmu_struct { #define VPMU_CONTEXT_LOADED 0x2 #define VPMU_RUNNING 0x4 #define PASSIVE_DOMAIN_ALLOCATED 0x8 -int vpmu_do_wrmsr(struct cpu_user_regs *regs); -int vpmu_do_rdmsr(struct cpu_user_regs *regs); +int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content); +int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_val); int vpmu_do_interrupt(struct cpu_user_regs *regs); void vpmu_initialise(struct vcpu *v); void vpmu_destroy(struct vcpu *v); diff -r 7994e7c5991e xen/include/asm-x86/msr.h --- a/xen/include/asm-x86/msr.h Wed May 26 10:11:50 2010 +0200 +++ b/xen/include/asm-x86/msr.h Wed May 26 18:32:03 2010 +0200 @@ -5,37 +5,34 @@ #ifndef __ASSEMBLY__ -#include +#include #include +#include -#define rdmsr(msr,val1,val2) \ - __asm__ __volatile__("rdmsr" \ - : "=a" (val1), "=d" (val2) \ - : "c" (msr)) +static inline uint64_t rdmsr(unsigned int msr) +{ + unsigned long a__, b__; -#define rdmsrl(msr,val) do { unsigned long a__,b__; \ - __asm__ __volatile__("rdmsr" \ - : "=a" (a__), "=d" (b__) \ - : "c" (msr)); \ - val = a__ | ((u64)b__<<32); \ -} while(0); + __asm__ __volatile__("rdmsr" + : "=a" (a__), "=d" (b__) + : "c" (msr)); + return a__ | ((uint64_t)b__<<32); +} -#define wrmsr(msr,val1,val2) \ - __asm__ __volatile__("wrmsr" \ +static inline void wrmsr(unsigned int msr, uint64_t val) +{ + uint32_t lo, hi; + lo = (uint32_t)val; + hi = (uint32_t)(val >> 32); + __asm__ __volatile__("wrmsr" \ : /* no outputs */ \ - : "c" (msr), "a" (val1), "d" (val2)) - -static inline void wrmsrl(unsigned int msr, __u64 val) -{ - __u32 lo, hi; - lo = (__u32)val; - hi = (__u32)(val >> 32); - wrmsr(msr, lo, hi); + : "c" (msr), "a" (lo), "d" (hi)); } /* rdmsr with exception handling */ -#define rdmsr_safe(msr,val1,val2) ({\ +#define rdmsr_safe(msr,val) ({\ int _rc; \ + uint32_t val1, val2; \ __asm__ __volatile__( \ "1: rdmsr\n2:\n" \ ".section .fixup,\"ax\"\n" \ @@ -47,23 +44,30 @@ static inline void wrmsrl(unsigned int m ".previous\n" \ : "=a" (val1), "=d" (val2), "=&r" (_rc) \ : "c" (msr), "2" (0), "i" (-EFAULT)); \ + val = val2 | ((uint64_t)val1 << 32); \ _rc; }) /* wrmsr with exception handling */ -#define wrmsr_safe(msr,val1,val2) ({\ - int _rc; \ - __asm__ __volatile__( \ - "1: wrmsr\n2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %5,%0\n; jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " "__FIXUP_ALIGN"\n" \ - " "__FIXUP_WORD" 1b,3b\n" \ - ".previous\n" \ - : "=&r" (_rc) \ - : "c" (msr), "a" (val1), "d" (val2), "0" (0), "i" (-EFAULT)); \ - _rc; }) +static inline int wrmsr_safe(unsigned int msr, uint64_t val) +{ + int _rc; + uint32_t lo, hi; + lo = (uint32_t)val; + hi = (uint32_t)(val >> 32); + + __asm__ __volatile__( + "1: wrmsr\n2:\n" + ".section .fixup,\"ax\"\n" + "3: movl %5,%0\n; jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " "__FIXUP_ALIGN"\n" + " "__FIXUP_WORD" 1b,3b\n" + ".previous\n" + : "=&r" (_rc) + : "c" (msr), "a" (lo), "d" (hi), "0" (0), "i" (-EFAULT)); + return _rc; +} #define rdtsc(low,high) \ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) @@ -82,9 +86,9 @@ static inline void wrmsrl(unsigned int m } while(0) #endif -#define write_tsc(val) wrmsrl(MSR_IA32_TSC, val) +#define write_tsc(val) wrmsr(MSR_IA32_TSC, val) -#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) +#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val)) #define rdpmc(counter,low,high) \ __asm__ __volatile__("rdpmc" \ @@ -94,29 +98,20 @@ static inline void wrmsrl(unsigned int m DECLARE_PER_CPU(u64, efer); -static inline u64 read_efer(void) -{ - return this_cpu(efer); -} +#define read_efer() this_cpu(efer) -static inline void write_efer(u64 val) -{ - this_cpu(efer) = val; - wrmsrl(MSR_EFER, val); -} +#define write_efer(val) do { \ + this_cpu(efer) = (val); \ + wrmsr(MSR_EFER, (val)); \ +} while(0) DECLARE_PER_CPU(u32, ler_msr); -static inline void ler_enable(void) -{ - u64 debugctl; - - if ( !this_cpu(ler_msr) ) - return; - - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); - wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl | 1); -} +#define ler_enable() do { \ + if ( !this_cpu(ler_msr) ) \ + return; \ + wrmsr(MSR_IA32_DEBUGCTLMSR, rdmsr(MSR_IA32_DEBUGCTLMSR) | 1); \ +} while(0) #endif /* !__ASSEMBLY__ */ diff -r 7994e7c5991e xen/include/asm-x86/mtrr.h --- a/xen/include/asm-x86/mtrr.h Wed May 26 10:11:50 2010 +0200 +++ b/xen/include/asm-x86/mtrr.h Wed May 26 18:32:03 2010 +0200 @@ -35,10 +35,8 @@ enum { typedef u8 mtrr_type; struct mtrr_var_range { - u32 base_lo; - u32 base_hi; - u32 mask_lo; - u32 mask_hi; + uint64_t base; + uint64_t mask; }; #define NUM_FIXED_RANGES 88 diff -r 7994e7c5991e xen/include/asm-x86/xenoprof.h --- a/xen/include/asm-x86/xenoprof.h Wed May 26 10:11:50 2010 +0200 +++ b/xen/include/asm-x86/xenoprof.h Wed May 26 18:32:03 2010 +0200 @@ -64,8 +64,8 @@ void xenoprof_backtrace( "xenoprof/x86 with autotranslated mode enabled" \ "isn't supported yet\n"); \ } while (0) -int passive_domain_do_rdmsr(struct cpu_user_regs *regs); -int passive_domain_do_wrmsr(struct cpu_user_regs *regs); +int passive_domain_do_rdmsr(unsigned int msr, uint64_t *msr_val); +int passive_domain_do_wrmsr(unsigned int msr, uint64_t msr_content); void passive_domain_destroy(struct vcpu *v); #endif /* __ASM_X86_XENOPROF_H__ */ diff -r 7994e7c5991e xen/include/xen/cpuidle.h --- a/xen/include/xen/cpuidle.h Wed May 26 10:11:50 2010 +0200 +++ b/xen/include/xen/cpuidle.h Wed May 26 18:32:03 2010 +0200 @@ -27,6 +27,8 @@ #ifndef _XEN_CPUIDLE_H #define _XEN_CPUIDLE_H +#include + #define ACPI_PROCESSOR_MAX_POWER 8 #define CPUIDLE_NAME_LEN 16