|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v3 38/41] x86/paravirt: kvmclock: Setup kvmclock early iff it's sched_clock
Rework the seemingly generic x86_cpuinit_ops.early_percpu_clock_init hook
into a dedicated PV sched_clock hook, as the only reason the hook exists
is to allow kvmclock to enable its PV clock on secondary CPUs before the
kernel tries to reference sched_clock, e.g. when grabbing a timestamp for
printk.
Rearranging the hook doesn't exactly reduce complexity; arguably it does
the opposite. But as-is, it's practically impossible to understand *why*
kvmclock needs to do early configuration.
Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
arch/x86/include/asm/timer.h | 8 ++++++--
arch/x86/include/asm/x86_init.h | 2 --
arch/x86/kernel/kvmclock.c | 13 ++++++-------
arch/x86/kernel/smpboot.c | 3 ++-
arch/x86/kernel/tsc.c | 16 +++++++++++++++-
arch/x86/kernel/x86_init.c | 1 -
6 files changed, 29 insertions(+), 14 deletions(-)
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index ca5c95d48c03..ab1271bd9c3b 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -15,14 +15,18 @@ extern bool using_native_sched_clock(void);
#ifdef CONFIG_PARAVIRT
int __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
- void (*save)(void), void
(*restore)(void));
+ void (*save)(void), void (*restore)(void),
+ void (*start_secondary));
static __always_inline void paravirt_set_sched_clock(u64 (*func)(void),
void (*save)(void),
void (*restore)(void))
{
- (void)__paravirt_set_sched_clock(func, true, save, restore);
+ (void)__paravirt_set_sched_clock(func, true, save, restore, NULL);
}
+void paravirt_sched_clock_start_secondary(void);
+#else
+static inline void paravirt_sched_clock_start_secondary(void) { }
#endif
/*
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 6c8a6ead84f6..d1b3f18ea41f 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -187,13 +187,11 @@ struct x86_init_ops {
/**
* struct x86_cpuinit_ops - platform specific cpu hotplug setups
* @setup_percpu_clockev: set up the per cpu clock event device
- * @early_percpu_clock_init: early init of the per cpu clock event device
* @fixup_cpu_id: fixup function for cpuinfo_x86::topo.pkg_id
* @parallel_bringup: Parallel bringup control
*/
struct x86_cpuinit_ops {
void (*setup_percpu_clockev)(void);
- void (*early_percpu_clock_init)(void);
void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
bool parallel_bringup;
};
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 0578bc448b1b..62c8ea2e6769 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -127,12 +127,13 @@ static void kvm_save_sched_clock_state(void)
kvmclock_disable();
}
-#ifdef CONFIG_SMP
-static void kvm_setup_secondary_clock(void)
+static void kvm_setup_secondary_sched_clock(void)
{
+ if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_SMP)))
+ return;
+
kvm_register_clock("secondary cpu, sched_clock setup");
}
-#endif
static void kvm_restore_sched_clock_state(void)
{
@@ -352,7 +353,8 @@ static __init void kvm_sched_clock_init(bool stable)
{
if (__paravirt_set_sched_clock(kvm_sched_clock_read, stable,
kvm_save_sched_clock_state,
- kvm_restore_sched_clock_state))
+ kvm_restore_sched_clock_state,
+ kvm_setup_secondary_sched_clock))
return;
kvm_sched_clock_offset = kvm_clock_read();
@@ -437,9 +439,6 @@ void __init kvmclock_init(void)
x86_platform.get_wallclock = kvm_get_wallclock;
x86_platform.set_wallclock = kvm_set_wallclock;
-#ifdef CONFIG_SMP
- x86_cpuinit.early_percpu_clock_init = kvm_setup_secondary_clock;
-#endif
kvm_get_preset_lpj();
clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 294a8ea60298..318ae70e5e7b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -78,6 +78,7 @@
#include <asm/io_apic.h>
#include <asm/fpu/api.h>
#include <asm/setup.h>
+#include <asm/timer.h>
#include <asm/uv/uv.h>
#include <asm/microcode.h>
#include <asm/i8259.h>
@@ -275,7 +276,7 @@ static void notrace __noendbr start_secondary(void *unused)
cpu_init();
fpu__init_cpu();
rcutree_report_cpu_starting(raw_smp_processor_id());
- x86_cpuinit.early_percpu_clock_init();
+ paravirt_sched_clock_start_secondary();
ap_starting();
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7a261214fa3e..f78e86494dec 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -280,8 +280,19 @@ bool using_native_sched_clock(void)
return static_call_query(pv_sched_clock) == native_sched_clock;
}
+#ifdef CONFIG_SMP
+static void (*pv_sched_clock_start_secondary)(void) __ro_after_init;
+
+void paravirt_sched_clock_start_secondary(void)
+{
+ if (pv_sched_clock_start_secondary)
+ pv_sched_clock_start_secondary();
+}
+#endif
+
int __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
- void (*save)(void), void (*restore)(void))
+ void (*save)(void), void (*restore)(void),
+ void (*start_secondary))
{
/*
* Don't replace TSC with a PV clock when running as a CoCo guest and
@@ -298,6 +309,9 @@ int __init __paravirt_set_sched_clock(u64 (*func)(void),
bool stable,
static_call_update(pv_sched_clock, func);
x86_platform.save_sched_clock_state = save;
x86_platform.restore_sched_clock_state = restore;
+#ifdef CONFIG_SMP
+ pv_sched_clock_start_secondary = start_secondary;
+#endif
return 0;
}
#else
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index ebefb77c37bb..cbb5ee613ed5 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -128,7 +128,6 @@ struct x86_init_ops x86_init __initdata = {
};
struct x86_cpuinit_ops x86_cpuinit = {
- .early_percpu_clock_init = x86_init_noop,
.setup_percpu_clockev = setup_secondary_APIC_clock,
.parallel_bringup = true,
};
--
2.54.0.563.g4f69b47b94-goog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |