[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] trust new architecturally-defined TSC Invariant bit on Intel systems



Trust new architecturally-defined TSC Invariant bit (on
Intel systems only for now, AMD TBD).

Signed-off-by: Dan Magenheimer <dan.magenheimer@xxxxxxxxxx>

diff -r 1d7221667204 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c    Thu Oct 08 09:24:32 2009 +0100
+++ b/xen/arch/x86/smpboot.c    Thu Oct 08 11:48:15 2009 -0600
@@ -187,6 +187,11 @@ static void __init synchronize_tsc_bp (v
        unsigned int one_usec;
        int buggy = 0;
 
+       if (tsc_is_reliable()) {
+               printk("TSC is reliable, synchronization unnecessary\n");
+               return;
+       }
+       
        printk("checking TSC synchronization across %u CPUs: ", 
num_booting_cpus());
 
        /* convert from kcyc/sec to cyc/usec */
@@ -278,6 +283,9 @@ static void __init synchronize_tsc_ap (v
 static void __init synchronize_tsc_ap (void)
 {
        int i;
+
+       if (tsc_is_reliable())
+               return;
 
        /*
         * Not every cpu is online at the time
diff -r 1d7221667204 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Thu Oct 08 09:24:32 2009 +0100
+++ b/xen/arch/x86/time.c       Thu Oct 08 11:48:15 2009 -0600
@@ -43,6 +43,12 @@ string_param("clocksource", opt_clocksou
  */
 static int opt_consistent_tscs;
 boolean_param("consistent_tscs", opt_consistent_tscs);
+
+/*
+ * opt_tsc_broken: Override all tests and force TSC to be assumed unreliable
+ */
+static int opt_tsc_broken;
+boolean_param("tsc_broken", opt_tsc_broken);
 
 unsigned long cpu_khz;  /* CPU clock frequency in kHz. */
 DEFINE_SPINLOCK(rtc_lock);
@@ -692,6 +698,11 @@ static void __init init_platform_timer(v
            freq_string(pts->frequency), pts->name);
 }
 
+int tsc_is_reliable(void)
+{
+    return boot_cpu_has(X86_FEATURE_TSC_RELIABLE) && !opt_tsc_broken;
+}
+
 void cstate_restore_tsc(void)
 {
     struct cpu_time *t = &this_cpu(cpu_time);
@@ -699,7 +710,7 @@ void cstate_restore_tsc(void)
     s_time_t stime_delta;
     u64 new_tsc;
 
-    if ( boot_cpu_has(X86_FEATURE_NONSTOP_TSC) )
+    if ( boot_cpu_has(X86_FEATURE_NONSTOP_TSC) && !opt_tsc_broken )
         return;
 
     stime_delta = read_platform_stime() - t->stime_master_stamp;
@@ -1117,6 +1128,9 @@ static void time_calibration_tsc_rendezv
     struct calibration_rendezvous *r = _r;
     unsigned int total_cpus = cpus_weight(r->cpu_calibration_map);
 
+    if ( tsc_is_reliable() )
+        goto skip_tsc_sync;
+
     /* Loop to get rid of cache effects on TSC skew. */
     for ( i = 4; i >= 0; i-- )
     {
@@ -1153,6 +1167,8 @@ static void time_calibration_tsc_rendezv
                 mb();
         }
     }
+
+skip_tsc_sync:
 
     rdtscll(c->local_tsc_stamp);
     c->stime_local_stamp = get_s_time();
diff -r 1d7221667204 xen/include/asm-x86/time.h
--- a/xen/include/asm-x86/time.h        Thu Oct 08 09:24:32 2009 +0100
+++ b/xen/include/asm-x86/time.h        Thu Oct 08 11:48:15 2009 -0600
@@ -43,4 +43,6 @@ uint64_t ns_to_acpi_pm_tick(uint64_t ns)
 
 void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs);
 
+int tsc_is_reliable(void);
+
 #endif /* __X86_TIME_H__ */

Attachment: tsc-reliab3.patch
Description: Binary data

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.