WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Revert 21339:804304d4e05d "x86: TSC handl

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Revert 21339:804304d4e05d "x86: TSC handling cleanups"
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 21 May 2010 08:25:17 -0700
Delivery-date: Fri, 21 May 2010 08:27:33 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1274455299 -3600
# Node ID d0420ab973455eff500fef63243b0a49f5640527
# Parent  3480444bdf376979f3e566cf8cc400de8fe6b791
Revert 21339:804304d4e05d "x86: TSC handling cleanups"

It very much breaks PV domU boot.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c           |   30 ++++++++++++++++++++++++++++--
 xen/arch/x86/hvm/save.c          |    6 +++---
 xen/arch/x86/hvm/vpt.c           |    3 +++
 xen/arch/x86/time.c              |   31 ++++++++-----------------------
 xen/common/kernel.c              |    2 --
 xen/include/asm-x86/hvm/domain.h |    2 ++
 xen/include/asm-x86/hvm/hvm.h    |    1 +
 xen/include/asm-x86/time.h       |    1 -
 xen/include/public/features.h    |    3 ---
 9 files changed, 45 insertions(+), 34 deletions(-)

diff -r 3480444bdf37 -r d0420ab97345 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri May 21 15:25:10 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Fri May 21 16:21:39 2010 +0100
@@ -205,6 +205,32 @@ void hvm_set_rdtsc_exiting(struct domain
         hvm_funcs.set_rdtsc_exiting(v, enable);
 }
 
+int hvm_gtsc_need_scale(struct domain *d)
+{
+    uint32_t gtsc_mhz, htsc_mhz;
+
+    if ( d->arch.vtsc )
+        return 0;
+
+    gtsc_mhz = d->arch.hvm_domain.gtsc_khz / 1000;
+    htsc_mhz = (uint32_t)cpu_khz / 1000;
+
+    d->arch.hvm_domain.tsc_scaled = (gtsc_mhz && (gtsc_mhz != htsc_mhz));
+    return d->arch.hvm_domain.tsc_scaled;
+}
+
+static u64 hvm_h2g_scale_tsc(struct vcpu *v, u64 host_tsc)
+{
+    uint32_t gtsc_khz, htsc_khz;
+
+    if ( !v->domain->arch.hvm_domain.tsc_scaled )
+        return host_tsc;
+
+    htsc_khz = cpu_khz;
+    gtsc_khz = v->domain->arch.hvm_domain.gtsc_khz;
+    return muldiv64(host_tsc, gtsc_khz, htsc_khz);
+}
+
 void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
 {
     uint64_t tsc;
@@ -212,11 +238,11 @@ void hvm_set_guest_tsc(struct vcpu *v, u
     if ( v->domain->arch.vtsc )
     {
         tsc = hvm_get_guest_time(v);
-        tsc = gtime_to_gtsc(v->domain, tsc);
     }
     else
     {
         rdtscll(tsc);
+        tsc = hvm_h2g_scale_tsc(v, tsc);
     }
 
     v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - tsc;
@@ -230,12 +256,12 @@ u64 hvm_get_guest_tsc(struct vcpu *v)
     if ( v->domain->arch.vtsc )
     {
         tsc = hvm_get_guest_time(v);
-        tsc = gtime_to_gtsc(v->domain, tsc);
         v->domain->arch.vtsc_kerncount++;
     }
     else
     {
         rdtscll(tsc);
+        tsc = hvm_h2g_scale_tsc(v, tsc);
     }
 
     return tsc + v->arch.hvm_vcpu.cache_tsc_offset;
diff -r 3480444bdf37 -r d0420ab97345 xen/arch/x86/hvm/save.c
--- a/xen/arch/x86/hvm/save.c   Fri May 21 15:25:10 2010 +0100
+++ b/xen/arch/x86/hvm/save.c   Fri May 21 16:21:39 2010 +0100
@@ -33,7 +33,7 @@ void arch_hvm_save(struct domain *d, str
     hdr->cpuid = eax;
 
     /* Save guest's preferred TSC. */
-    hdr->gtsc_khz = d->arch.tsc_khz;
+    hdr->gtsc_khz = d->arch.hvm_domain.gtsc_khz;
 }
 
 int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr)
@@ -62,8 +62,8 @@ int arch_hvm_load(struct domain *d, stru
 
     /* Restore guest's preferred TSC frequency. */
     if ( hdr->gtsc_khz )
-        d->arch.tsc_khz = hdr->gtsc_khz;
-    if ( d->arch.vtsc )
+        d->arch.hvm_domain.gtsc_khz = hdr->gtsc_khz;
+    if ( hvm_gtsc_need_scale(d) )
     {
         hvm_set_rdtsc_exiting(d, 1);
         gdprintk(XENLOG_WARNING, "Domain %d expects freq %uMHz "
diff -r 3480444bdf37 -r d0420ab97345 xen/arch/x86/hvm/vpt.c
--- a/xen/arch/x86/hvm/vpt.c    Fri May 21 15:25:10 2010 +0100
+++ b/xen/arch/x86/hvm/vpt.c    Fri May 21 16:21:39 2010 +0100
@@ -32,6 +32,9 @@ void hvm_init_guest_time(struct domain *
     spin_lock_init(&pl->pl_time_lock);
     pl->stime_offset = -(u64)get_s_time();
     pl->last_guest_time = 0;
+
+    d->arch.hvm_domain.gtsc_khz = cpu_khz;
+    d->arch.hvm_domain.tsc_scaled = 0;
 }
 
 u64 hvm_get_guest_time(struct vcpu *v)
diff -r 3480444bdf37 -r d0420ab97345 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Fri May 21 15:25:10 2010 +0100
+++ b/xen/arch/x86/time.c       Fri May 21 16:21:39 2010 +0100
@@ -804,13 +804,8 @@ static void __update_vcpu_system_time(st
 
     if ( d->arch.vtsc )
     {
-        u64 stime = t->stime_local_stamp;
-        if ( is_hvm_domain(d) )
-        {
-            struct pl_time *pl = &v->domain->arch.hvm_domain.pl_time;
-            stime += pl->stime_offset + v->arch.hvm_vcpu.stime_offset;
-        }
-        tsc_stamp = gtime_to_gtsc(d, stime);
+        u64 delta = max_t(s64, t->stime_local_stamp - d->arch.vtsc_offset, 0);
+        tsc_stamp = scale_delta(delta, &d->arch.ns_to_vtsc);
     }
     else
     {
@@ -833,8 +828,6 @@ static void __update_vcpu_system_time(st
         _u.tsc_to_system_mul = t->tsc_scale.mul_frac;
         _u.tsc_shift         = (s8)t->tsc_scale.shift;
     }
-    if ( is_hvm_domain(d) )
-        _u.tsc_timestamp += v->arch.hvm_vcpu.cache_tsc_offset;
 
     /* Don't bother unless timestamp record has changed or we are forced. */
     _u.version = u->version; /* make versions match for memcmp test */
@@ -1598,18 +1591,11 @@ struct tm wallclock_time(void)
  * PV SoftTSC Emulation.
  */
 
-u64 gtime_to_gtsc(struct domain *d, u64 tsc)
-{
-    if ( !is_hvm_domain(d) )
-        tsc = max_t(s64, tsc - d->arch.vtsc_offset, 0);
-    return scale_delta(tsc, &d->arch.ns_to_vtsc);
-}
-
 void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs, int rdtscp)
 {
     s_time_t now = get_s_time();
     struct domain *d = v->domain;
-    u64 tsc;
+    u64 delta;
 
     spin_lock(&d->arch.vtsc_lock);
 
@@ -1625,7 +1611,8 @@ void pv_soft_rdtsc(struct vcpu *v, struc
 
     spin_unlock(&d->arch.vtsc_lock);
 
-    tsc = gtime_to_gtsc(d, now);
+    delta = max_t(s64, now - d->arch.vtsc_offset, 0);
+    now = scale_delta(delta, &d->arch.ns_to_vtsc);
 
     regs->eax = (uint32_t)now;
     regs->edx = (uint32_t)(now >> 32);
@@ -1766,10 +1753,8 @@ void tsc_set_info(struct domain *d,
         d->arch.vtsc_offset = get_s_time() - elapsed_nsec;
         d->arch.tsc_khz = gtsc_khz ? gtsc_khz : cpu_khz;
         set_time_scale(&d->arch.vtsc_to_ns, d->arch.tsc_khz * 1000 );
-        /* use native TSC if initial host has safe TSC, has not migrated
-         * yet and tsc_khz == cpu_khz */
-        if ( host_tsc_is_safe() && incarnation == 0 &&
-                d->arch.tsc_khz == cpu_khz )
+        /* use native TSC if initial host has safe TSC and not migrated yet */
+        if ( host_tsc_is_safe() && incarnation == 0 )
             d->arch.vtsc = 0;
         else 
             d->arch.ns_to_vtsc = scale_reciprocal(d->arch.vtsc_to_ns);
@@ -1794,7 +1779,7 @@ void tsc_set_info(struct domain *d,
     }
     d->arch.incarnation = incarnation + 1;
     if ( is_hvm_domain(d) )
-        hvm_set_rdtsc_exiting(d, d->arch.vtsc);
+        hvm_set_rdtsc_exiting(d, d->arch.vtsc || hvm_gtsc_need_scale(d));
 }
 
 /* vtsc may incur measurable performance degradation, diagnose with this */
diff -r 3480444bdf37 -r d0420ab97345 xen/common/kernel.c
--- a/xen/common/kernel.c       Fri May 21 15:25:10 2010 +0100
+++ b/xen/common/kernel.c       Fri May 21 16:21:39 2010 +0100
@@ -259,8 +259,6 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDL
                 fi.submap |= (1U << XENFEAT_mmu_pt_update_preserve_ad) |
                              (1U << XENFEAT_highmem_assist) |
                              (1U << XENFEAT_gnttab_map_avail_bits);
-            else
-                fi.submap |= (1U << XENFEAT_hvm_safe_pvclock);
 #endif
             break;
         default:
diff -r 3480444bdf37 -r d0420ab97345 xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h  Fri May 21 15:25:10 2010 +0100
+++ b/xen/include/asm-x86/hvm/domain.h  Fri May 21 16:21:39 2010 +0100
@@ -45,6 +45,8 @@ struct hvm_domain {
     struct hvm_ioreq_page  ioreq;
     struct hvm_ioreq_page  buf_ioreq;
 
+    uint32_t               gtsc_khz; /* kHz */
+    bool_t                 tsc_scaled;
     struct pl_time         pl_time;
 
     struct hvm_io_handler  io_handler;
diff -r 3480444bdf37 -r d0420ab97345 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Fri May 21 15:25:10 2010 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Fri May 21 16:21:39 2010 +0100
@@ -296,6 +296,7 @@ uint8_t hvm_combine_hw_exceptions(uint8_
 uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);
 
 void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable);
+int hvm_gtsc_need_scale(struct domain *d);
 
 static inline int hvm_cpu_up(void)
 {
diff -r 3480444bdf37 -r d0420ab97345 xen/include/asm-x86/time.h
--- a/xen/include/asm-x86/time.h        Fri May 21 15:25:10 2010 +0100
+++ b/xen/include/asm-x86/time.h        Fri May 21 16:21:39 2010 +0100
@@ -57,7 +57,6 @@ uint64_t ns_to_acpi_pm_tick(uint64_t ns)
 uint64_t ns_to_acpi_pm_tick(uint64_t ns);
 
 void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs, int rdtscp);
-u64 gtime_to_gtsc(struct domain *d, u64 tsc);
 
 void tsc_set_info(struct domain *d, uint32_t tsc_mode, uint64_t elapsed_nsec,
                   uint32_t gtsc_khz, uint32_t incarnation);
diff -r 3480444bdf37 -r d0420ab97345 xen/include/public/features.h
--- a/xen/include/public/features.h     Fri May 21 15:25:10 2010 +0100
+++ b/xen/include/public/features.h     Fri May 21 16:21:39 2010 +0100
@@ -68,9 +68,6 @@
  */
 #define XENFEAT_gnttab_map_avail_bits      7
 
-/* x86: pvclock algorithm is safe to use on HVM */
-#define XENFEAT_hvm_safe_pvclock           9
-
 #define XENFEAT_NR_SUBMAPS 1
 
 #endif /* __XEN_PUBLIC_FEATURES_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Revert 21339:804304d4e05d "x86: TSC handling cleanups", Xen patchbot-unstable <=