|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH 07/11] x86: Migrate MSR handler vendor checks to x86_vendor_is()
Many handlers are vendor-specific and are currently gated on runtime
checks. If we migrate those to x86_vendor_is() they will effectively
cause the ellision of handling code for CPU vendors not compiled in.
Not a functional change.
Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
---
xen/arch/x86/msr.c | 41 +++++++++++++++++++++++------------------
1 file changed, 23 insertions(+), 18 deletions(-)
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index ad75a2e108..f022332971 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -157,8 +157,9 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
* The MSR has existed on all Intel parts since before the 64bit days,
* and is implemented by other vendors.
*/
- if ( !(cp->x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_CENTAUR |
- X86_VENDOR_SHANGHAI)) )
+ if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_INTEL |
+ X86_VENDOR_CENTAUR |
+ X86_VENDOR_SHANGHAI) )
goto gp_fault;
*val = IA32_FEATURE_CONTROL_LOCK;
@@ -169,8 +170,8 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
break;
case MSR_IA32_PLATFORM_ID:
- if ( !(cp->x86_vendor & X86_VENDOR_INTEL) ||
- !(boot_cpu_data.x86_vendor & X86_VENDOR_INTEL) )
+ if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_INTEL) ||
+ !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
goto gp_fault;
rdmsrl(MSR_IA32_PLATFORM_ID, *val);
break;
@@ -189,9 +190,10 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t
*val)
* from Xen's last microcode load, which can be forwarded straight to
* the guest.
*/
- if ( !(cp->x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_AMD)) ||
- !(boot_cpu_data.x86_vendor &
- (X86_VENDOR_INTEL | X86_VENDOR_AMD)) ||
+ if ( !x86_vendor_is(cp->x86_vendor,
+ X86_VENDOR_INTEL | X86_VENDOR_AMD) ||
+ !x86_vendor_is(boot_cpu_data.x86_vendor,
+ X86_VENDOR_INTEL | X86_VENDOR_AMD) ||
rdmsr_safe(MSR_AMD_PATCHLEVEL, val) )
goto gp_fault;
break;
@@ -236,7 +238,8 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
*/
case MSR_IA32_PERF_STATUS:
case MSR_IA32_PERF_CTL:
- if ( !(cp->x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_CENTAUR)) )
+ if ( !x86_vendor_is(cp->x86_vendor,
+ X86_VENDOR_INTEL | X86_VENDOR_CENTAUR) )
goto gp_fault;
*val = 0;
@@ -245,7 +248,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
goto gp_fault;
case MSR_IA32_THERM_STATUS:
- if ( cp->x86_vendor != X86_VENDOR_INTEL )
+ if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_INTEL) )
goto gp_fault;
*val = 0;
break;
@@ -302,7 +305,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
case MSR_K8_IORR_MASK1:
case MSR_K8_TSEG_BASE:
case MSR_K8_TSEG_MASK:
- if ( !(cp->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+ if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON)
)
goto gp_fault;
if ( !is_hardware_domain(d) )
return X86EMUL_UNHANDLEABLE;
@@ -314,14 +317,15 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t
*val)
break;
case MSR_K8_HWCR:
- if ( !(cp->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+ if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON)
)
goto gp_fault;
*val = 0;
break;
case MSR_FAM10H_MMIO_CONF_BASE:
if ( !is_hardware_domain(d) ||
- !(cp->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) ||
+ !x86_vendor_is(cp->x86_vendor,
+ X86_VENDOR_AMD | X86_VENDOR_HYGON) ||
rdmsr_safe(msr, val) )
goto gp_fault;
@@ -338,7 +342,7 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
break;
case MSR_AMD64_DE_CFG:
- if ( !(cp->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+ if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON)
)
goto gp_fault;
*val = AMD64_DE_CFG_LFENCE_SERIALISE;
break;
@@ -461,7 +465,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
* for backwards compatiblity, the OS should write 0 to it before
* trying to access the current microcode version.
*/
- if ( cp->x86_vendor != X86_VENDOR_INTEL || val != 0 )
+ if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_INTEL) || val != 0 )
goto gp_fault;
break;
@@ -471,7 +475,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
* to AMD CPUs as well (at least the architectural/CPUID part does).
*/
if ( is_pv_domain(d) ||
- cp->x86_vendor != X86_VENDOR_AMD )
+ !x86_vendor_is(cp->x86_vendor, X86_VENDOR_AMD) )
goto gp_fault;
break;
@@ -483,7 +487,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
* by any CPUID bit.
*/
if ( is_pv_domain(d) ||
- cp->x86_vendor != X86_VENDOR_INTEL )
+ !x86_vendor_is(cp->x86_vendor, X86_VENDOR_INTEL) )
goto gp_fault;
break;
@@ -553,7 +557,8 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
* a cpufreq controller dom0 which has full access.
*/
case MSR_IA32_PERF_CTL:
- if ( !(cp->x86_vendor & (X86_VENDOR_INTEL | X86_VENDOR_CENTAUR)) )
+ if ( !x86_vendor_is(cp->x86_vendor,
+ X86_VENDOR_INTEL | X86_VENDOR_CENTAUR) )
goto gp_fault;
if ( likely(!is_cpufreq_controller(d)) || wrmsr_safe(msr, val) == 0 )
@@ -663,7 +668,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
* OpenBSD 6.7 will panic if writing to DE_CFG triggers a #GP:
* https://www.illumos.org/issues/12998 - drop writes.
*/
- if ( !(cp->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+ if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON)
)
goto gp_fault;
break;
--
2.43.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |