[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] RE: [PATCH] CPUIDLE: revise tsc-save/restore to avoid big tsc skew between cpus



> I tried extrapolating from t->stime_local_stamp, cpu_khz, and
> t->local_tsc_stamp before I got into the current solution. It would still
> bring accumulating skew, but in a slower increasing speed. I would like to
> try it again with  t->tsc_scale instead of cpu_khz. If it is works, it would
> really be simpler. Allow me some time.    

Below patch should be what you expected. It will still bring continuously 
increasing tsc skew. If I pin all domains' vcpus on 1 pcpu, the skew is 
increasing faster. 

diff -r 1b173394f815 xen/arch/x86/acpi/cpu_idle.c
--- a/xen/arch/x86/acpi/cpu_idle.c      Thu Dec 04 16:36:43 2008 +0000
+++ b/xen/arch/x86/acpi/cpu_idle.c      Fri Dec 05 19:06:06 2008 +0800
@@ -317,8 +317,6 @@ static void acpi_processor_idle(void)
          * stopped by H/W. Without carefully handling of TSC/APIC stop issues,
          * deep C state can't work correctly.
          */
-        /* preparing TSC stop */
-        cstate_save_tsc();
         /* preparing APIC stop */
         lapic_timer_off();
 
diff -r 1b173394f815 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Thu Dec 04 16:36:43 2008 +0000
+++ b/xen/arch/x86/time.c       Fri Dec 05 19:06:06 2008 +0800
@@ -48,11 +48,9 @@ struct time_scale {
 
 struct cpu_time {
     u64 local_tsc_stamp;
-    u64 cstate_tsc_stamp;
     s_time_t stime_local_stamp;
     s_time_t stime_master_stamp;
     struct time_scale tsc_scale;
-    u64 cstate_plt_count_stamp;
 };
 
 struct platform_timesource {
@@ -149,6 +147,32 @@ static inline u64 scale_delta(u64 delta,
 #endif
 
     return product;
+}
+
+/*
+ * Scale a 32-bit time delta to delta by dividing a 32-bit fraction & scaling,
+ * yielding a 64-bit result.
+ */
+static inline u64 scale_time(u32 time_delta, struct time_scale *scale)
+{
+    u64 td64 = time_delta;
+    u64 quotient, remainder;
+
+    td64 <<= 32;
+
+    quotient = td64 / scale->mul_frac;
+    remainder = td64 % scale->mul_frac;
+
+    if ( scale->shift < 0 )
+    {
+        quotient <<= -scale->shift;
+        remainder <<= -scale->shift;
+        quotient += remainder / scale->mul_frac;
+    }
+    else
+        quotient >>= scale->shift;
+
+    return quotient;
 }
 
 /*
@@ -644,29 +668,19 @@ static void init_platform_timer(void)
            freq_string(pts->frequency), pts->name);
 }
 
-void cstate_save_tsc(void)
+void cstate_restore_tsc(void)
 {
     struct cpu_time *t = &this_cpu(cpu_time);
+    u64 tsc_delta;
+    s_time_t stime_delta;
 
     if ( tsc_invariant )
         return;
 
-    t->cstate_plt_count_stamp = plt_src.read_counter();
-    rdtscll(t->cstate_tsc_stamp);
-}
-
-void cstate_restore_tsc(void)
-{
-    struct cpu_time *t = &this_cpu(cpu_time);
-    u64 plt_count_delta, tsc_delta;
-
-    if ( tsc_invariant )
-        return;
-
-    plt_count_delta = (plt_src.read_counter() -
-                       t->cstate_plt_count_stamp) & plt_mask;
-    tsc_delta = scale_delta(plt_count_delta, &plt_scale) * cpu_khz/1000000UL;
-    wrmsrl(MSR_IA32_TSC, t->cstate_tsc_stamp + tsc_delta);
+    stime_delta = read_platform_stime() - t->stime_local_stamp;
+    ASSERT(stime_delta < 0x100000000UL);
+    tsc_delta = scale_time((u32)stime_delta, &t->tsc_scale);
+    wrmsrl(MSR_IA32_TSC, t->local_tsc_stamp + tsc_delta);
 }
 
 /***************************************************************************
diff -r 1b173394f815 xen/include/xen/time.h
--- a/xen/include/xen/time.h    Thu Dec 04 16:36:43 2008 +0000
+++ b/xen/include/xen/time.h    Fri Dec 05 19:06:06 2008 +0800
@@ -13,7 +13,6 @@
 #include <asm/time.h>
 
 extern int init_xen_time(void);
-extern void cstate_save_tsc(void);
 extern void cstate_restore_tsc(void);
 
 extern unsigned long cpu_khz;

Jimmy
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.