[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 06/16] x86/cpu: Rework the vendor early_init() hooks to be __init



All interior actions are now conditional on c == &boot_cpu_data, so rearrange
it to the caller in identify_cpu() and drop the hook parameter.

This allows early_init_$VENDOR() to become __init, which in turn allows
$VENDOR_init_levelling() to cease being noinline.

Reposition the early_init_intel() function simply to make diff legible.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Julian Vetter <julian.vetter@xxxxxxxxxx>
CC: Teddy Astie <teddy.astie@xxxxxxxxxx>
---
 xen/arch/x86/cpu/amd.c    |  7 +++----
 xen/arch/x86/cpu/common.c |  4 ++--
 xen/arch/x86/cpu/cpu.h    |  4 ++--
 xen/arch/x86/cpu/intel.c  | 42 +++++++++++++++++----------------------
 4 files changed, 25 insertions(+), 32 deletions(-)

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 970cb42e9e0b..36fea2e0a299 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -224,7 +224,7 @@ static const typeof(ctxt_switch_masking) 
__initconst_cf_clobber __used csm =
  * avoid this, as the accidentally-advertised features will not actually
  * function.
  */
-static void __init noinline amd_init_levelling(void)
+static void __init amd_init_levelling(void)
 {
        /*
         * If there's support for CpuidUserDis or CPUID faulting then
@@ -617,10 +617,9 @@ void amd_process_freq(const struct cpuinfo_x86 *c,
                *low_mhz = amd_parse_freq(c->x86, lo);
 }
 
-void cf_check early_init_amd(struct cpuinfo_x86 *c)
+void __init cf_check early_init_amd(void)
 {
-       if (c == &boot_cpu_data)
-               amd_init_levelling();
+    amd_init_levelling();
 }
 
 void amd_log_freq(const struct cpuinfo_x86 *c)
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 89b58e6182b9..39e64f3a5f88 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -503,8 +503,8 @@ void identify_cpu(struct cpuinfo_x86 *c)
        if (c->extended_cpuid_level >= 0x80000021)
                c->x86_capability[FEATURESET_e21a] = cpuid_eax(0x80000021);
 
-       if (actual_cpu.c_early_init)
-               alternative_vcall(actual_cpu.c_early_init, c);
+       if (c == &boot_cpu_data && actual_cpu.c_early_init)
+               alternative_vcall(actual_cpu.c_early_init);
 
        /* AMD-defined flags: level 0x80000001 */
        if (c->extended_cpuid_level >= 0x80000001)
diff --git a/xen/arch/x86/cpu/cpu.h b/xen/arch/x86/cpu/cpu.h
index bbede57ab00d..0fc6370edb13 100644
--- a/xen/arch/x86/cpu/cpu.h
+++ b/xen/arch/x86/cpu/cpu.h
@@ -4,7 +4,7 @@
 #define X86_CPU_H
 
 struct cpu_dev {
-       void            (*c_early_init)(struct cpuinfo_x86 *c);
+       void            (*c_early_init)(void);
        void            (*c_init)(struct cpuinfo_x86 * c);
 };
 
@@ -19,7 +19,7 @@ extern void display_cacheinfo(struct cpuinfo_x86 *c);
 extern void detect_ht(struct cpuinfo_x86 *c);
 extern bool detect_extended_topology(struct cpuinfo_x86 *c);
 
-void cf_check early_init_amd(struct cpuinfo_x86 *c);
+void cf_check early_init_amd(void);
 void amd_log_freq(const struct cpuinfo_x86 *c);
 void amd_init_de_cfg(const struct cpuinfo_x86 *c);
 void amd_init_lfence_dispatch(void);
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 141dc2368143..2aeeb2f5bf55 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -251,7 +251,7 @@ static const typeof(ctxt_switch_masking) 
__initconst_cf_clobber __used csm =
     intel_ctxt_switch_masking;
 #endif
 
-static void __init noinline intel_init_levelling(void)
+static void __init intel_init_levelling(void)
 {
        uint32_t eax, ecx, edx, tmp;
 
@@ -325,29 +325,6 @@ void __init intel_unlock_cpuid_leaves(struct cpuinfo_x86 
*c)
        }
 }
 
-static void cf_check early_init_intel(struct cpuinfo_x86 *c)
-{
-       if (c == &boot_cpu_data &&
-           bootsym(trampoline_misc_enable_off) & 
MSR_IA32_MISC_ENABLE_XD_DISABLE)
-               printk(KERN_INFO "re-enabled NX (Execute Disable) 
protection\n");
-
-       if (c == &boot_cpu_data) {
-               uint64_t misc_enable;
-
-               check_memory_type_self_snoop_errata();
-
-               /*
-                * If fast string is not enabled in IA32_MISC_ENABLE for any 
reason,
-                * clear the enhanced fast string CPU capability.
-                */
-               rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
-               if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING))
-                       setup_clear_cpu_cap(X86_FEATURE_ERMS);
-
-               intel_init_levelling();
-       }
-}
-
 /*
  * Errata BA80, AAK120, AAM108, AAO67, BD59, AAY54: Rapid Core C3/C6 Transition
  * May Cause Unpredictable System Behavior
@@ -413,6 +390,23 @@ static void __init probe_mwait_errata(void)
     }
 }
 
+static void __init cf_check early_init_intel(void)
+{
+    if ( bootsym(trampoline_misc_enable_off) & MSR_IA32_MISC_ENABLE_XD_DISABLE 
)
+        printk(KERN_INFO "re-enabled NX (Execute Disable) protection\n");
+
+    check_memory_type_self_snoop_errata();
+
+    /*
+     * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
+     * clear the enhanced fast string CPU capability.
+     */
+    if ( !(rdmsr(MSR_IA32_MISC_ENABLE) & MSR_IA32_MISC_ENABLE_FAST_STRING) )
+        setup_clear_cpu_cap(X86_FEATURE_ERMS);
+
+    intel_init_levelling();
+}
+
 /*
  * P4 Xeon errata 037 workaround.
  * Hardware prefetcher may cause stale data to be loaded into the cache.
-- 
2.39.5




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.