x86/MSR: introduce MSR access split/fold helpers This is in preparation of eliminating the mis-naming of 64-bit fields with 32-bit register names (eflags instead of rflags etc). Use the guaranteed 32-bit underscore prefixed names for now where appropriate. Signed-off-by: Jan Beulich --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3695,12 +3695,9 @@ static uint64_t _hvm_rdtsc_intercept(voi void hvm_rdtsc_intercept(struct cpu_user_regs *regs) { - uint64_t tsc = _hvm_rdtsc_intercept(); + msr_split(regs, _hvm_rdtsc_intercept()); - regs->eax = (uint32_t)tsc; - regs->edx = (uint32_t)(tsc >> 32); - - HVMTRACE_2D(RDTSC, regs->eax, regs->edx); + HVMTRACE_2D(RDTSC, regs->_eax, regs->_edx); } int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content) --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1936,14 +1936,10 @@ static void svm_do_msr_access(struct cpu rc = hvm_msr_read_intercept(regs->_ecx, &msr_content); if ( rc == X86EMUL_OKAY ) - { - regs->rax = (uint32_t)msr_content; - regs->rdx = (uint32_t)(msr_content >> 32); - } + msr_split(regs, msr_content); } else - rc = hvm_msr_write_intercept(regs->_ecx, - (regs->rdx << 32) | regs->_eax, 1); + rc = hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1); if ( rc == X86EMUL_OKAY ) __update_guest_eip(regs, inst_len); @@ -2618,8 +2614,7 @@ void svm_vmexit_handler(struct cpu_user_ if ( vmcb_get_cpl(vmcb) ) hvm_inject_hw_exception(TRAP_gp_fault, 0); else if ( (inst_len = __get_instruction_length(v, INSTR_XSETBV)) && - hvm_handle_xsetbv(regs->ecx, - (regs->rdx << 32) | regs->_eax) == 0 ) + hvm_handle_xsetbv(regs->_ecx, msr_fold(regs)) == 0 ) __update_guest_eip(regs, inst_len); break; --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -3626,22 +3626,18 @@ void vmx_vmexit_handler(struct cpu_user_ case EXIT_REASON_MSR_READ: { uint64_t msr_content; - if ( hvm_msr_read_intercept(regs->ecx, &msr_content) == X86EMUL_OKAY ) + if ( hvm_msr_read_intercept(regs->_ecx, &msr_content) == X86EMUL_OKAY ) { - regs->eax = (uint32_t)msr_content; - regs->edx = (uint32_t)(msr_content >> 32); + msr_split(regs, msr_content); update_guest_eip(); /* Safe: RDMSR */ } break; } + case EXIT_REASON_MSR_WRITE: - { - uint64_t msr_content; - msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax; - if ( hvm_msr_write_intercept(regs->ecx, msr_content, 1) == X86EMUL_OKAY ) + if ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) == X86EMUL_OKAY ) update_guest_eip(); /* Safe: WRMSR */ break; - } case EXIT_REASON_VMXOFF: if ( nvmx_handle_vmxoff(regs) == X86EMUL_OKAY ) @@ -3802,8 +3798,7 @@ void vmx_vmexit_handler(struct cpu_user_ break; case EXIT_REASON_XSETBV: - if ( hvm_handle_xsetbv(regs->ecx, - (regs->rdx << 32) | regs->_eax) == 0 ) + if ( hvm_handle_xsetbv(regs->_ecx, msr_fold(regs)) == 0 ) update_guest_eip(); /* Safe: XSETBV */ break; --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -2322,15 +2322,11 @@ int nvmx_n2_vmexit_handler(struct cpu_us nvcpu->nv_vmexit_pending = 1; else { - uint64_t tsc; - /* * special handler is needed if L1 doesn't intercept rdtsc, * avoiding changing guest_tsc and messing up timekeeping in L1 */ - tsc = hvm_get_guest_tsc(v) + get_vvmcs(v, TSC_OFFSET); - regs->eax = (uint32_t)tsc; - regs->edx = (uint32_t)(tsc >> 32); + msr_split(regs, hvm_get_guest_tsc(v) + get_vvmcs(v, TSC_OFFSET)); update_guest_eip(); return 1; --- a/xen/arch/x86/time.c +++ b/xen/arch/x86/time.c @@ -1918,13 +1918,10 @@ void pv_soft_rdtsc(struct vcpu *v, struc spin_unlock(&d->arch.vtsc_lock); - now = gtime_to_gtsc(d, now); - - regs->eax = (uint32_t)now; - regs->edx = (uint32_t)(now >> 32); + msr_split(regs, gtime_to_gtsc(d, now)); if ( rdtscp ) - regs->ecx = + regs->rcx = (d->arch.tsc_mode == TSC_MODE_PVRDTSCP) ? d->arch.incarnation : 0; } --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -3401,12 +3401,7 @@ if(rc) printk("%pv: %02x @ %08lx -> %d\n else if ( currd->arch.vtsc ) pv_soft_rdtsc(curr, regs, 0); else - { - uint64_t val = rdtsc(); - - regs->eax = (uint32_t)val; - regs->edx = (uint32_t)(val >> 32); - } + msr_split(regs, rdtsc()); } if ( ctxt.ctxt.retire.singlestep ) --- a/xen/include/asm-x86/msr.h +++ b/xen/include/asm-x86/msr.h @@ -71,6 +71,17 @@ static inline int wrmsr_safe(unsigned in return _rc; } +static inline uint64_t msr_fold(const struct cpu_user_regs *regs) +{ + return (regs->rdx << 32) | regs->_eax; +} + +static inline void msr_split(struct cpu_user_regs *regs, uint64_t val) +{ + regs->rdx = val >> 32; + regs->rax = (uint32_t)val; +} + static inline uint64_t rdtsc(void) { uint32_t low, high;