|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH 10/11] x86: Migrate everything under cpu/ to use x86_vendor_is()
There's nothing special about this folder, but it doing it ahead alleviates the
size of the following patch.
Not a functional change.
Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@xxxxxxx>
---
xen/arch/x86/cpu/amd.c | 6 +++---
xen/arch/x86/cpu/common.c | 2 +-
xen/arch/x86/cpu/intel_cacheinfo.c | 5 ++---
xen/arch/x86/cpu/mcheck/amd_nonfatal.c | 2 +-
xen/arch/x86/cpu/mcheck/mcaction.c | 3 ++-
xen/arch/x86/cpu/mcheck/mce.c | 14 +++++++-------
xen/arch/x86/cpu/mcheck/mce_amd.c | 6 +++---
xen/arch/x86/cpu/mcheck/mce_intel.c | 6 +++---
xen/arch/x86/cpu/mcheck/vmce.c | 4 ++--
xen/arch/x86/cpu/microcode/amd.c | 2 +-
xen/arch/x86/cpu/microcode/core.c | 2 +-
xen/arch/x86/cpu/mtrr/generic.c | 4 ++--
xen/arch/x86/cpu/mwait-idle.c | 4 ++--
13 files changed, 30 insertions(+), 30 deletions(-)
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 9b02e1ba67..1205253e13 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -428,7 +428,7 @@ int cpu_has_amd_erratum(const struct cpuinfo_x86 *cpu, int
osvw_id, ...)
u32 range;
u32 ms;
- if (cpu->x86_vendor != X86_VENDOR_AMD)
+ if (!x86_vendor_is(cpu->x86_vendor, X86_VENDOR_AMD))
return 0;
if (osvw_id >= 0 && cpu_has(cpu, X86_FEATURE_OSVW)) {
@@ -519,8 +519,8 @@ static void check_syscfg_dram_mod_en(void)
{
uint64_t syscfg;
- if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
- (boot_cpu_data.x86 >= 0x0f)))
+ if (!(x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD) &&
+ (boot_cpu_data.x86 >= 0x0f)))
return;
rdmsrl(MSR_K8_SYSCFG, syscfg);
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index c0c3606dd2..df81ef9136 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -463,7 +463,7 @@ void __init early_cpu_init(bool verbose)
paddr_bits -= (ebx >> 6) & 0x3f;
}
- if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
+ if (!x86_vendor_is(c->x86_vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON))
park_offline_cpus = opt_mce;
initialize_cpu_data(0);
diff --git a/xen/arch/x86/cpu/intel_cacheinfo.c
b/xen/arch/x86/cpu/intel_cacheinfo.c
index e88faa7545..7dc778cd55 100644
--- a/xen/arch/x86/cpu/intel_cacheinfo.c
+++ b/xen/arch/x86/cpu/intel_cacheinfo.c
@@ -168,9 +168,8 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
* Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
* trace cache
*/
- if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1 &&
- c->x86_vendor != X86_VENDOR_SHANGHAI)
- {
+ if (((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) &&
+ !x86_vendor_is(c->x86_vendor, X86_VENDOR_SHANGHAI)) {
/* supports eax=2 call */
unsigned int i, j, n, regs[4];
unsigned char *dp = (unsigned char *)regs;
diff --git a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
index 7d48c9ab5f..c2c829a397 100644
--- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
+++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
@@ -191,7 +191,7 @@ static void cf_check mce_amd_work_fn(void *data)
void __init amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c)
{
- if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
+ if (!x86_vendor_is(c->x86_vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON))
return;
/* Assume we are on K8 or newer AMD or Hygon CPU here */
diff --git a/xen/arch/x86/cpu/mcheck/mcaction.c
b/xen/arch/x86/cpu/mcheck/mcaction.c
index bf7a0de965..a43e3240c3 100644
--- a/xen/arch/x86/cpu/mcheck/mcaction.c
+++ b/xen/arch/x86/cpu/mcheck/mcaction.c
@@ -101,7 +101,8 @@ mc_memerr_dhandler(struct mca_binfo *binfo,
* not always precise. In that case, fallback to
broadcast.
*/
global->mc_domid != bank->mc_domid ||
- (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ (x86_vendor_is(boot_cpu_data.x86_vendor,
+ X86_VENDOR_INTEL) &&
(!(global->mc_gstatus & MCG_STATUS_LMCE) ||
!(d->vcpu[mc_vcpuid]->arch.vmce.mcg_ext_ctl &
MCG_EXT_CTL_LMCE_EN))) )
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 9bef1da385..40c8c10df9 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -333,8 +333,7 @@ mcheck_mca_logout(enum mca_source who, struct mca_banks
*bankmask,
ASSERT(mig);
mca_init_global(mc_flags, mig);
/* A hook here to get global extended msrs */
- if ( IS_ENABLED(CONFIG_INTEL) &&
- boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL)
)
intel_get_extended_msrs(mig, mci);
}
}
@@ -564,8 +563,8 @@ bool mce_available(const struct cpuinfo_x86 *c)
*/
unsigned int mce_firstbank(struct cpuinfo_x86 *c)
{
- return c->x86 == 6 &&
- c->x86_vendor == X86_VENDOR_INTEL && c->x86_model < 0x1a;
+ return x86_vendor_is(c->x86_vendor, X86_VENDOR_INTEL) &&
+ c->x86 == 6 && c->x86_model < 0x1a;
}
static int show_mca_info(int inited, struct cpuinfo_x86 *c)
@@ -1107,7 +1106,7 @@ static bool __maybe_unused x86_mc_msrinject_verify(struct
xen_mc_msrinject *mci)
if ( IS_MCA_BANKREG(reg, mci->mcinj_cpunr) )
{
- if ( c->x86_vendor == X86_VENDOR_AMD )
+ if ( x86_vendor_is(c->x86_vendor, X86_VENDOR_AMD) )
{
/*
* On AMD we can set MCi_STATUS_WREN in the
@@ -1142,7 +1141,7 @@ static bool __maybe_unused x86_mc_msrinject_verify(struct
xen_mc_msrinject *mci)
case MSR_F10_MC4_MISC1:
case MSR_F10_MC4_MISC2:
case MSR_F10_MC4_MISC3:
- if ( c->x86_vendor != X86_VENDOR_AMD )
+ if ( !x86_vendor_is(c->x86_vendor, X86_VENDOR_AMD) )
reason = "only supported on AMD";
else if ( c->x86 < 0x10 )
reason = "only supported on AMD Fam10h+";
@@ -1150,7 +1149,8 @@ static bool __maybe_unused x86_mc_msrinject_verify(struct
xen_mc_msrinject *mci)
/* MSRs that the HV will take care of */
case MSR_K8_HWCR:
- if ( c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+ if ( x86_vendor_is(c->x86_vendor,
+ X86_VENDOR_AMD | X86_VENDOR_HYGON) )
reason = "HV will operate HWCR";
else
reason = "only supported on AMD or Hygon";
diff --git a/xen/arch/x86/cpu/mcheck/mce_amd.c
b/xen/arch/x86/cpu/mcheck/mce_amd.c
index 25c29eb3d2..e664bf0443 100644
--- a/xen/arch/x86/cpu/mcheck/mce_amd.c
+++ b/xen/arch/x86/cpu/mcheck/mce_amd.c
@@ -160,7 +160,7 @@ mcequirk_lookup_amd_quirkdata(const struct cpuinfo_x86 *c)
{
unsigned int i;
- BUG_ON(c->x86_vendor != X86_VENDOR_AMD);
+ BUG_ON(!x86_vendor_is(c->x86_vendor, X86_VENDOR_AMD));
for ( i = 0; i < ARRAY_SIZE(mce_amd_quirks); i++ )
{
@@ -291,7 +291,7 @@ amd_mcheck_init(const struct cpuinfo_x86 *c, bool bsp)
uint32_t i;
enum mcequirk_amd_flags quirkflag = 0;
- if ( c->x86_vendor != X86_VENDOR_HYGON )
+ if ( !x86_vendor_is(c->x86_vendor, X86_VENDOR_HYGON) )
quirkflag = mcequirk_lookup_amd_quirkdata(c);
/* Assume that machine check support is available.
@@ -337,6 +337,6 @@ amd_mcheck_init(const struct cpuinfo_x86 *c, bool bsp)
ppin_msr = MSR_AMD_PPIN;
}
- return c->x86_vendor == X86_VENDOR_HYGON ?
+ return x86_vendor_is(c->x86_vendor, X86_VENDOR_HYGON) ?
mcheck_hygon : mcheck_amd_famXX;
}
diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c
b/xen/arch/x86/cpu/mcheck/mce_intel.c
index 839a0e5ba9..9f3ae4277b 100644
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -711,8 +711,8 @@ static bool mce_is_broadcast(struct cpuinfo_x86 *c)
* DisplayFamily_DisplayModel encoding of 06H_EH and above,
* a MCA signal is broadcast to all logical processors in the system
*/
- if ( c->x86_vendor == X86_VENDOR_INTEL && c->x86 == 6 &&
- c->x86_model >= 0xe )
+ if ( x86_vendor_is(c->x86_vendor, X86_VENDOR_INTEL) &&
+ c->x86 == 6 && c->x86_model >= 0xe )
return true;
return false;
}
@@ -1036,7 +1036,7 @@ int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr,
uint64_t *val)
return 1;
}
- if ( !(cp->x86_vendor & X86_VENDOR_INTEL) )
+ if ( !x86_vendor_is(cp->x86_vendor, X86_VENDOR_INTEL) )
return 0;
if ( bank < GUEST_MC_BANK_NUM )
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index dd1ccecfe5..7f0c413412 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -45,7 +45,7 @@ void vmce_init_vcpu(struct vcpu *v)
int i;
/* global MCA MSRs init */
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
v->arch.vmce.mcg_cap = INTEL_GUEST_MCG_CAP;
else
v->arch.vmce.mcg_cap = AMD_GUEST_MCG_CAP;
@@ -63,7 +63,7 @@ int vmce_restore_vcpu(struct vcpu *v, const struct
hvm_vmce_vcpu *ctxt)
{
unsigned long guest_mcg_cap;
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ if ( x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
guest_mcg_cap = INTEL_GUEST_MCG_CAP | MCG_LMCE_P;
else
guest_mcg_cap = AMD_GUEST_MCG_CAP;
diff --git a/xen/arch/x86/cpu/microcode/amd.c b/xen/arch/x86/cpu/microcode/amd.c
index 4a7573c885..da5573445e 100644
--- a/xen/arch/x86/cpu/microcode/amd.c
+++ b/xen/arch/x86/cpu/microcode/amd.c
@@ -616,7 +616,7 @@ void __init amd_check_entrysign(void)
unsigned int curr_rev;
uint8_t fixed_rev;
- if ( boot_cpu_data.vendor != X86_VENDOR_AMD ||
+ if ( !x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_AMD) ||
boot_cpu_data.family < 0x17 ||
boot_cpu_data.family > 0x1a )
return;
diff --git a/xen/arch/x86/cpu/microcode/core.c
b/xen/arch/x86/cpu/microcode/core.c
index fe47c3a6c1..e931bca95e 100644
--- a/xen/arch/x86/cpu/microcode/core.c
+++ b/xen/arch/x86/cpu/microcode/core.c
@@ -199,7 +199,7 @@ static struct microcode_patch *parse_blob(const char *buf,
size_t len)
/* Returns true if ucode should be loaded on a given cpu */
static bool is_cpu_primary(unsigned int cpu)
{
- if ( boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+ if ( x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_AMD |
X86_VENDOR_HYGON) )
/* Load ucode on every logical thread/core */
return true;
diff --git a/xen/arch/x86/cpu/mtrr/generic.c b/xen/arch/x86/cpu/mtrr/generic.c
index c587e9140e..88cf6a5e8e 100644
--- a/xen/arch/x86/cpu/mtrr/generic.c
+++ b/xen/arch/x86/cpu/mtrr/generic.c
@@ -218,9 +218,9 @@ static void __init print_mtrr_state(const char *level)
printk("%s %u disabled\n", level, i);
}
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ if ((x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_AMD) &&
boot_cpu_data.x86 >= 0xf) ||
- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+ x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_HYGON)) {
uint64_t syscfg, tom2;
rdmsrl(MSR_K8_SYSCFG, syscfg);
diff --git a/xen/arch/x86/cpu/mwait-idle.c b/xen/arch/x86/cpu/mwait-idle.c
index f47fdfb569..c284375b24 100644
--- a/xen/arch/x86/cpu/mwait-idle.c
+++ b/xen/arch/x86/cpu/mwait-idle.c
@@ -1598,7 +1598,7 @@ static int __init mwait_idle_probe(void)
const struct x86_cpu_id *id;
const char *str;
- if (boot_cpu_data.vendor != X86_VENDOR_INTEL)
+ if (!x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_INTEL))
return -ENODEV;
id = x86_match_cpu(intel_idle_ids);
@@ -1816,7 +1816,7 @@ bool __init mwait_pc10_supported(void)
{
unsigned int ecx, edx, dummy;
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ if (!x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) ||
!cpu_has_monitor ||
boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return false;
--
2.43.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |