[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 18/41] x86/paravirt: Pass sched_clock save/restore helpers during registration



Pass in a PV clock's save/restore helpers when configuring sched_clock
instead of relying on each PV clock to manually set the save/restore hooks.
In addition to bringing sanity to the code, this will allow gracefully
"rejecting" a PV sched_clock, e.g. when running as a CoCo guest that has
access to a "secure" TSC.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
 arch/x86/include/asm/timer.h       | 9 ++++++---
 arch/x86/kernel/cpu/vmware.c       | 7 ++-----
 arch/x86/kernel/kvmclock.c         | 6 +++---
 arch/x86/kernel/tsc.c              | 5 ++++-
 arch/x86/xen/time.c                | 5 ++---
 drivers/clocksource/hyperv_timer.c | 6 ++----
 6 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index fe41d40a9ae6..e97cd1ae03d1 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -14,11 +14,14 @@ extern int no_timer_check;
 extern bool using_native_sched_clock(void);
 
 #ifdef CONFIG_PARAVIRT
-void __paravirt_set_sched_clock(u64 (*func)(void), bool stable);
+void __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
+                               void (*save)(void), void (*restore)(void));
 
-static inline void paravirt_set_sched_clock(u64 (*func)(void))
+static inline void paravirt_set_sched_clock(u64 (*func)(void),
+                                           void (*save)(void),
+                                           void (*restore)(void))
 {
-       __paravirt_set_sched_clock(func, true);
+       __paravirt_set_sched_clock(func, true, save, restore);
 }
 #endif
 
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index b5cb66ca022b..968de002975f 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -347,11 +347,8 @@ static void __init vmware_paravirt_ops_setup(void)
 
        vmware_cyc2ns_setup();
 
-       if (vmw_sched_clock) {
-               paravirt_set_sched_clock(vmware_sched_clock);
-               x86_platform.save_sched_clock_state = NULL;
-               x86_platform.restore_sched_clock_state = NULL;
-       }
+       if (vmw_sched_clock)
+               paravirt_set_sched_clock(vmware_sched_clock, NULL, NULL);
 
        if (vmware_is_stealclock_available()) {
                has_steal_clock = true;
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 800c3d65f0af..962b6fcb5c60 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -137,7 +137,9 @@ static void kvm_restore_sched_clock_state(void)
 static inline void kvm_sched_clock_init(bool stable)
 {
        kvm_sched_clock_offset = kvm_clock_read();
-       __paravirt_set_sched_clock(kvm_sched_clock_read, stable);
+       __paravirt_set_sched_clock(kvm_sched_clock_read, stable,
+                                  kvm_save_sched_clock_state,
+                                  kvm_restore_sched_clock_state);
 
        pr_info("kvm-clock: using sched offset of %llu cycles",
                kvm_sched_clock_offset);
@@ -344,8 +346,6 @@ void __init kvmclock_init(void)
 #ifdef CONFIG_SMP
        x86_cpuinit.early_percpu_clock_init = kvm_setup_secondary_clock;
 #endif
-       x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
-       x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
        kvm_get_preset_lpj();
 
        /*
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index b14c4ada89a3..0114c63dfdd9 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -280,12 +280,15 @@ bool using_native_sched_clock(void)
        return static_call_query(pv_sched_clock) == native_sched_clock;
 }
 
-void __paravirt_set_sched_clock(u64 (*func)(void), bool stable)
+void __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
+                               void (*save)(void), void (*restore)(void))
 {
        if (!stable)
                clear_sched_clock_stable();
 
        static_call_update(pv_sched_clock, func);
+       x86_platform.save_sched_clock_state = save;
+       x86_platform.restore_sched_clock_state = restore;
 }
 #else
 u64 sched_clock_noinstr(void) __attribute__((alias("native_sched_clock")));
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 21d366d01985..ee7095febfd1 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -567,13 +567,12 @@ static void __init xen_init_time_common(void)
 {
        xen_sched_clock_offset = xen_clocksource_read();
        static_call_update(pv_steal_clock, xen_steal_clock);
-       paravirt_set_sched_clock(xen_sched_clock);
+
        /*
         * Xen has paravirtualized suspend/resume and so doesn't use the common
         * x86 sched_clock save/restore hooks.
         */
-       x86_platform.save_sched_clock_state = NULL;
-       x86_platform.restore_sched_clock_state = NULL;
+       paravirt_set_sched_clock(xen_sched_clock, NULL, NULL);
 
        tsc_register_calibration_routines(xen_tsc_khz, NULL);
        x86_platform.get_wallclock = xen_get_wallclock;
diff --git a/drivers/clocksource/hyperv_timer.c 
b/drivers/clocksource/hyperv_timer.c
index ac1d9f9c381c..dee59ce61c29 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -553,10 +553,8 @@ static void hv_restore_sched_clock_state(void)
 static __always_inline void hv_setup_sched_clock(void *sched_clock)
 {
        /* We're on x86/x64 *and* using PV ops */
-       paravirt_set_sched_clock(sched_clock);
-
-       x86_platform.save_sched_clock_state = hv_save_sched_clock_state;
-       x86_platform.restore_sched_clock_state = hv_restore_sched_clock_state;
+       paravirt_set_sched_clock(sched_clock, hv_save_sched_clock_state,
+                                hv_restore_sched_clock_state);
 }
 #else /* !CONFIG_GENERIC_SCHED_CLOCK && !CONFIG_PARAVIRT */
 static __always_inline void hv_setup_sched_clock(void *sched_clock) {}
-- 
2.54.0.563.g4f69b47b94-goog




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.