[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v3 29/41] x86/paravirt: Plumb a return code into __paravirt_set_sched_clock()



Add a return code to __paravirt_set_sched_clock() so that the kernel can
reject attempts to use a PV sched_clock without breaking the caller.  E.g.
when running as a CoCo VM with a secure TSC, using a PV clock is generally
undesirable.

Note, kvmclock is the only PV clock that does anything "extra" beyond
simply registering itself as sched_clock, i.e. is the only caller that
needs to check the new return value.

Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
 arch/x86/include/asm/timer.h | 6 +++---
 arch/x86/kernel/kvmclock.c   | 8 +++++---
 arch/x86/kernel/tsc.c        | 5 +++--
 3 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 96ae7feac47c..ca5c95d48c03 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -14,14 +14,14 @@ extern int no_timer_check;
 extern bool using_native_sched_clock(void);
 
 #ifdef CONFIG_PARAVIRT
-void __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
-                                      void (*save)(void), void 
(*restore)(void));
+int __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
+                                     void (*save)(void), void 
(*restore)(void));
 
 static __always_inline void paravirt_set_sched_clock(u64 (*func)(void),
                                                     void (*save)(void),
                                                     void (*restore)(void))
 {
-       __paravirt_set_sched_clock(func, true, save, restore);
+       (void)__paravirt_set_sched_clock(func, true, save, restore);
 }
 #endif
 
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index d3bb281c0805..9b3d1ed1a96d 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -338,10 +338,12 @@ static int kvmclock_setup_percpu(unsigned int cpu)
 
 static __init void kvm_sched_clock_init(bool stable)
 {
+       if (__paravirt_set_sched_clock(kvm_sched_clock_read, stable,
+                                      kvm_save_sched_clock_state,
+                                      kvm_restore_sched_clock_state))
+               return;
+
        kvm_sched_clock_offset = kvm_clock_read();
-       __paravirt_set_sched_clock(kvm_sched_clock_read, stable,
-                                  kvm_save_sched_clock_state,
-                                  kvm_restore_sched_clock_state);
        kvmclock_is_sched_clock = true;
 
        /*
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 4a48b8ba5bea..3c15fc10e501 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -280,8 +280,8 @@ bool using_native_sched_clock(void)
        return static_call_query(pv_sched_clock) == native_sched_clock;
 }
 
-void __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
-                                      void (*save)(void), void 
(*restore)(void))
+int __init __paravirt_set_sched_clock(u64 (*func)(void), bool stable,
+                                     void (*save)(void), void (*restore)(void))
 {
        if (!stable)
                clear_sched_clock_stable();
@@ -289,6 +289,7 @@ void __init __paravirt_set_sched_clock(u64 (*func)(void), 
bool stable,
        static_call_update(pv_sched_clock, func);
        x86_platform.save_sched_clock_state = save;
        x86_platform.restore_sched_clock_state = restore;
+       return 0;
 }
 #else
 u64 sched_clock_noinstr(void) __attribute__((alias("native_sched_clock")));
-- 
2.54.0.563.g4f69b47b94-goog




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.