|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 3/4] x86/msr: Handle MSR_TSC_AUX consistently for PV and HVM guests
With PVRDTSCP mode removed, handling of MSR_TSC_AUX can move into the common
code. Move its storage into struct vcpu_msrs (dropping the HVM-specific
msr_tsc_aux), and add an RDPID feature check as this bit also enumerates the
presence of the MSR.
Drop hvm_msr_tsc_aux() entirely, and use v->arch.msrs->tsc_aux directly.
Update hvm_load_cpu_ctxt() to check that the incoming ctxt.msr_tsc_aux isn't
out of range. In practice, no previous version of Xen ever wrote an
out-of-range value. Add MSR_TSC_AUX to the list of MSRs migrated for PV
guests.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
CC: Jun Nakajima <jun.nakajima@xxxxxxxxx>
CC: Kevin Tian <kevin.tian@xxxxxxxxx>
CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
CC: Brian Woods <brian.woods@xxxxxxx>
---
xen/arch/x86/domain.c | 3 +--
xen/arch/x86/domctl.c | 2 ++
xen/arch/x86/hvm/hvm.c | 18 +++++-------------
xen/arch/x86/hvm/svm/svm.c | 4 ++--
xen/arch/x86/hvm/vmx/vmx.c | 4 ++--
xen/arch/x86/msr.c | 18 ++++++++++++++++++
xen/arch/x86/pv/emul-priv-op.c | 4 ----
xen/include/asm-x86/hvm/hvm.h | 6 ------
xen/include/asm-x86/hvm/vcpu.h | 1 -
xen/include/asm-x86/msr.h | 8 ++++++++
10 files changed, 38 insertions(+), 30 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 295b10c..2067a0c 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1593,8 +1593,7 @@ void paravirt_ctxt_switch_to(struct vcpu *v)
activate_debugregs(v);
if ( cpu_has_rdtscp )
- wrmsr_tsc_aux(v->domain->arch.tsc_mode == TSC_MODE_PVRDTSCP
- ? v->domain->arch.incarnation : 0);
+ wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
/* Update per-VCPU guest runstate shared memory area (if registered). */
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 97ea5d8..b8d0796 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1275,6 +1275,7 @@ long arch_do_domctl(
static const uint32_t msrs_to_send[] = {
MSR_SPEC_CTRL,
MSR_INTEL_MISC_FEATURES_ENABLES,
+ MSR_TSC_AUX,
};
uint32_t nr_msrs = ARRAY_SIZE(msrs_to_send);
@@ -1399,6 +1400,7 @@ long arch_do_domctl(
{
case MSR_SPEC_CTRL:
case MSR_INTEL_MISC_FEATURES_ENABLES:
+ case MSR_TSC_AUX:
if ( guest_wrmsr(v, msr.index, msr.value) != X86EMUL_OKAY )
break;
continue;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0bc676c..1e4fc7d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -774,7 +774,7 @@ static int hvm_save_cpu_ctxt(struct vcpu *v,
hvm_domain_context_t *h)
struct segment_register seg;
struct hvm_hw_cpu ctxt = {
.tsc = hvm_get_guest_tsc_fixed(v, v->domain->arch.hvm.sync_tsc),
- .msr_tsc_aux = hvm_msr_tsc_aux(v),
+ .msr_tsc_aux = v->arch.msrs->tsc_aux,
.rax = v->arch.user_regs.rax,
.rbx = v->arch.user_regs.rbx,
.rcx = v->arch.user_regs.rcx,
@@ -1040,7 +1040,10 @@ static int hvm_load_cpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
if ( hvm_funcs.tsc_scaling.setup )
hvm_funcs.tsc_scaling.setup(v);
- v->arch.hvm.msr_tsc_aux = ctxt.msr_tsc_aux;
+ if ( ctxt.msr_tsc_aux != (uint32_t)ctxt.msr_tsc_aux )
+ return -EINVAL;
+
+ v->arch.msrs->tsc_aux = ctxt.msr_tsc_aux;
hvm_set_guest_tsc_fixed(v, ctxt.tsc, d->arch.hvm.sync_tsc);
@@ -3400,10 +3403,6 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t
*msr_content)
*msr_content = v->arch.hvm.msr_tsc_adjust;
break;
- case MSR_TSC_AUX:
- *msr_content = hvm_msr_tsc_aux(v);
- break;
-
case MSR_IA32_APICBASE:
*msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
break;
@@ -3556,13 +3555,6 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
msr_content,
hvm_set_guest_tsc_adjust(v, msr_content);
break;
- case MSR_TSC_AUX:
- v->arch.hvm.msr_tsc_aux = (uint32_t)msr_content;
- if ( cpu_has_rdtscp
- && (v->domain->arch.tsc_mode != TSC_MODE_PVRDTSCP) )
- wrmsr_tsc_aux(msr_content);
- break;
-
case MSR_IA32_APICBASE:
if ( !vlapic_msr_set(vcpu_vlapic(v), msr_content) )
goto gp_fault;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 396ee4a..e42e152 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1136,7 +1136,7 @@ static void svm_ctxt_switch_to(struct vcpu *v)
svm_tsc_ratio_load(v);
if ( cpu_has_rdtscp )
- wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
+ wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
static void noreturn svm_do_resume(struct vcpu *v)
@@ -3063,7 +3063,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
break;
case VMEXIT_RDTSCP:
- regs->rcx = hvm_msr_tsc_aux(v);
+ regs->rcx = v->arch.msrs->tsc_aux;
/* fall through */
case VMEXIT_RDTSC:
svm_vmexit_do_rdtsc(regs);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 365eeb2..ea9694a 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -512,7 +512,7 @@ static void vmx_restore_guest_msrs(struct vcpu *v)
wrmsrl(MSR_SYSCALL_MASK, v->arch.hvm.vmx.sfmask);
if ( cpu_has_rdtscp )
- wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
+ wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
}
void vmx_update_cpu_exec_control(struct vcpu *v)
@@ -3956,7 +3956,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
vmx_invlpg_intercept(exit_qualification);
break;
case EXIT_REASON_RDTSCP:
- regs->rcx = hvm_msr_tsc_aux(v);
+ regs->rcx = v->arch.msrs->tsc_aux;
/* fall through */
case EXIT_REASON_RDTSC:
update_guest_eip(); /* Safe: RDTSC, RDTSCP */
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index c9e87b1..ba1ce29 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -159,6 +159,13 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr,
uint64_t *val)
ret = guest_rdmsr_xen(v, msr, val);
break;
+ case MSR_TSC_AUX:
+ if ( !cp->extd.rdtscp && !cp->feat.rdpid )
+ goto gp_fault;
+
+ *val = msrs->tsc_aux;
+ break;
+
default:
return X86EMUL_UNHANDLEABLE;
}
@@ -285,6 +292,17 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
ret = guest_wrmsr_xen(v, msr, val);
break;
+ case MSR_TSC_AUX:
+ if ( !cp->extd.rdtscp && !cp->feat.rdpid )
+ goto gp_fault;
+ if ( val != (uint32_t)val )
+ goto gp_fault;
+
+ msrs->tsc_aux = val;
+ if ( v == curr )
+ wrmsr_tsc_aux(val);
+ break;
+
default:
return X86EMUL_UNHANDLEABLE;
}
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 3641d31..7c2b635 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -847,10 +847,6 @@ static int read_msr(unsigned int reg, uint64_t *val,
*val = currd->arch.vtsc ? pv_soft_rdtsc(curr, ctxt->regs) : rdtsc();
return X86EMUL_OKAY;
- case MSR_TSC_AUX:
- *val = 0;
- return X86EMUL_OKAY;
-
case MSR_EFER:
/* Hide unknown bits, and unconditionally hide SVME from guests. */
*val = read_efer() & EFER_KNOWN_MASK & ~EFER_SVME;
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 3d3250d..3a92bb3 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -563,12 +563,6 @@ static inline void hvm_invalidate_regs_fields(struct
cpu_user_regs *regs)
#endif
}
-#define hvm_msr_tsc_aux(v) ({ \
- struct domain *__d = (v)->domain; \
- (__d->arch.tsc_mode == TSC_MODE_PVRDTSCP) \
- ? (u32)__d->arch.incarnation : (u32)(v)->arch.hvm.msr_tsc_aux; \
-})
-
/*
* Nested HVM
*/
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index c663155..1d2f407 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -171,7 +171,6 @@ struct hvm_vcpu {
struct hvm_vcpu_asid n1asid;
- u32 msr_tsc_aux;
u64 msr_tsc_adjust;
u64 msr_xss;
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index c1cb38f..9d0d52b 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -289,6 +289,14 @@ struct vcpu_msrs
} misc_features_enables;
/*
+ * 0xc0000103 - MSR_TSC_AUX
+ *
+ * Value is guest chosen, and eagerly loaded in guest context. The value
+ * is accessible to userspace with the RDTSCP and RDPID instructions.
+ */
+ uint32_t tsc_aux;
+
+ /*
* 0xc00110{27,19-1b} MSR_AMD64_DR{0-3}_ADDRESS_MASK
* TODO: Not yet handled by guest_{rd,wr}msr() infrastructure.
*/
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |