WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: TSC handling cleanups (version 2)

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: TSC handling cleanups (version 2)
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 21 May 2010 23:20:14 -0700
Delivery-date: Fri, 21 May 2010 23:20:41 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1274506307 -3600
# Node ID c1ed00d495342334980be576bbb8a2b1037b89c9
# Parent  d0420ab973455eff500fef63243b0a49f5640527
x86: TSC handling cleanups (version 2)

"I am removing the tsc_scaled variable that is never actually used
because when tsc needs to be scaled vtsc is 1.  I am also making this
more explicit in tsc_set_info.  I am also removing hvm_domain.gtsc_khz
that is a duplicate of d->arch.tsc_khz.  I am using scale_delta(delta,
&d->arch.ns_to_vtsc) to scale the tsc value before returning it to the
guest like in the pv case.  I added a feature flag to specify that the
pvclock algorithm is safe to be used in an HVM guest so that the guest
can now use it without hanging."

Version 2 fixes a bug which breaks PV domU time.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c           |   30 ++----------------------------
 xen/arch/x86/hvm/save.c          |    6 +++---
 xen/arch/x86/hvm/vpt.c           |    3 ---
 xen/arch/x86/time.c              |   30 ++++++++++++++++++++++--------
 xen/common/kernel.c              |    2 ++
 xen/include/asm-x86/hvm/domain.h |    2 --
 xen/include/asm-x86/hvm/hvm.h    |    1 -
 xen/include/asm-x86/time.h       |    1 +
 xen/include/public/features.h    |    3 +++
 9 files changed, 33 insertions(+), 45 deletions(-)

diff -r d0420ab97345 -r c1ed00d49534 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri May 21 16:21:39 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Sat May 22 06:31:47 2010 +0100
@@ -205,32 +205,6 @@ void hvm_set_rdtsc_exiting(struct domain
         hvm_funcs.set_rdtsc_exiting(v, enable);
 }
 
-int hvm_gtsc_need_scale(struct domain *d)
-{
-    uint32_t gtsc_mhz, htsc_mhz;
-
-    if ( d->arch.vtsc )
-        return 0;
-
-    gtsc_mhz = d->arch.hvm_domain.gtsc_khz / 1000;
-    htsc_mhz = (uint32_t)cpu_khz / 1000;
-
-    d->arch.hvm_domain.tsc_scaled = (gtsc_mhz && (gtsc_mhz != htsc_mhz));
-    return d->arch.hvm_domain.tsc_scaled;
-}
-
-static u64 hvm_h2g_scale_tsc(struct vcpu *v, u64 host_tsc)
-{
-    uint32_t gtsc_khz, htsc_khz;
-
-    if ( !v->domain->arch.hvm_domain.tsc_scaled )
-        return host_tsc;
-
-    htsc_khz = cpu_khz;
-    gtsc_khz = v->domain->arch.hvm_domain.gtsc_khz;
-    return muldiv64(host_tsc, gtsc_khz, htsc_khz);
-}
-
 void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
 {
     uint64_t tsc;
@@ -238,11 +212,11 @@ void hvm_set_guest_tsc(struct vcpu *v, u
     if ( v->domain->arch.vtsc )
     {
         tsc = hvm_get_guest_time(v);
+        tsc = gtime_to_gtsc(v->domain, tsc);
     }
     else
     {
         rdtscll(tsc);
-        tsc = hvm_h2g_scale_tsc(v, tsc);
     }
 
     v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - tsc;
@@ -256,12 +230,12 @@ u64 hvm_get_guest_tsc(struct vcpu *v)
     if ( v->domain->arch.vtsc )
     {
         tsc = hvm_get_guest_time(v);
+        tsc = gtime_to_gtsc(v->domain, tsc);
         v->domain->arch.vtsc_kerncount++;
     }
     else
     {
         rdtscll(tsc);
-        tsc = hvm_h2g_scale_tsc(v, tsc);
     }
 
     return tsc + v->arch.hvm_vcpu.cache_tsc_offset;
diff -r d0420ab97345 -r c1ed00d49534 xen/arch/x86/hvm/save.c
--- a/xen/arch/x86/hvm/save.c   Fri May 21 16:21:39 2010 +0100
+++ b/xen/arch/x86/hvm/save.c   Sat May 22 06:31:47 2010 +0100
@@ -33,7 +33,7 @@ void arch_hvm_save(struct domain *d, str
     hdr->cpuid = eax;
 
     /* Save guest's preferred TSC. */
-    hdr->gtsc_khz = d->arch.hvm_domain.gtsc_khz;
+    hdr->gtsc_khz = d->arch.tsc_khz;
 }
 
 int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr)
@@ -62,8 +62,8 @@ int arch_hvm_load(struct domain *d, stru
 
     /* Restore guest's preferred TSC frequency. */
     if ( hdr->gtsc_khz )
-        d->arch.hvm_domain.gtsc_khz = hdr->gtsc_khz;
-    if ( hvm_gtsc_need_scale(d) )
+        d->arch.tsc_khz = hdr->gtsc_khz;
+    if ( d->arch.vtsc )
     {
         hvm_set_rdtsc_exiting(d, 1);
         gdprintk(XENLOG_WARNING, "Domain %d expects freq %uMHz "
diff -r d0420ab97345 -r c1ed00d49534 xen/arch/x86/hvm/vpt.c
--- a/xen/arch/x86/hvm/vpt.c    Fri May 21 16:21:39 2010 +0100
+++ b/xen/arch/x86/hvm/vpt.c    Sat May 22 06:31:47 2010 +0100
@@ -32,9 +32,6 @@ void hvm_init_guest_time(struct domain *
     spin_lock_init(&pl->pl_time_lock);
     pl->stime_offset = -(u64)get_s_time();
     pl->last_guest_time = 0;
-
-    d->arch.hvm_domain.gtsc_khz = cpu_khz;
-    d->arch.hvm_domain.tsc_scaled = 0;
 }
 
 u64 hvm_get_guest_time(struct vcpu *v)
diff -r d0420ab97345 -r c1ed00d49534 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Fri May 21 16:21:39 2010 +0100
+++ b/xen/arch/x86/time.c       Sat May 22 06:31:47 2010 +0100
@@ -804,8 +804,13 @@ static void __update_vcpu_system_time(st
 
     if ( d->arch.vtsc )
     {
-        u64 delta = max_t(s64, t->stime_local_stamp - d->arch.vtsc_offset, 0);
-        tsc_stamp = scale_delta(delta, &d->arch.ns_to_vtsc);
+        u64 stime = t->stime_local_stamp;
+        if ( is_hvm_domain(d) )
+        {
+            struct pl_time *pl = &v->domain->arch.hvm_domain.pl_time;
+            stime += pl->stime_offset + v->arch.hvm_vcpu.stime_offset;
+        }
+        tsc_stamp = gtime_to_gtsc(d, stime);
     }
     else
     {
@@ -828,6 +833,8 @@ static void __update_vcpu_system_time(st
         _u.tsc_to_system_mul = t->tsc_scale.mul_frac;
         _u.tsc_shift         = (s8)t->tsc_scale.shift;
     }
+    if ( is_hvm_domain(d) )
+        _u.tsc_timestamp += v->arch.hvm_vcpu.cache_tsc_offset;
 
     /* Don't bother unless timestamp record has changed or we are forced. */
     _u.version = u->version; /* make versions match for memcmp test */
@@ -1591,11 +1598,17 @@ struct tm wallclock_time(void)
  * PV SoftTSC Emulation.
  */
 
+u64 gtime_to_gtsc(struct domain *d, u64 tsc)
+{
+    if ( !is_hvm_domain(d) )
+        tsc = max_t(s64, tsc - d->arch.vtsc_offset, 0);
+    return scale_delta(tsc, &d->arch.ns_to_vtsc);
+}
+
 void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs, int rdtscp)
 {
     s_time_t now = get_s_time();
     struct domain *d = v->domain;
-    u64 delta;
 
     spin_lock(&d->arch.vtsc_lock);
 
@@ -1611,8 +1624,7 @@ void pv_soft_rdtsc(struct vcpu *v, struc
 
     spin_unlock(&d->arch.vtsc_lock);
 
-    delta = max_t(s64, now - d->arch.vtsc_offset, 0);
-    now = scale_delta(delta, &d->arch.ns_to_vtsc);
+    now = gtime_to_gtsc(d, now);
 
     regs->eax = (uint32_t)now;
     regs->edx = (uint32_t)(now >> 32);
@@ -1753,8 +1765,10 @@ void tsc_set_info(struct domain *d,
         d->arch.vtsc_offset = get_s_time() - elapsed_nsec;
         d->arch.tsc_khz = gtsc_khz ? gtsc_khz : cpu_khz;
         set_time_scale(&d->arch.vtsc_to_ns, d->arch.tsc_khz * 1000 );
-        /* use native TSC if initial host has safe TSC and not migrated yet */
-        if ( host_tsc_is_safe() && incarnation == 0 )
+        /* use native TSC if initial host has safe TSC, has not migrated
+         * yet and tsc_khz == cpu_khz */
+        if ( host_tsc_is_safe() && incarnation == 0 &&
+                d->arch.tsc_khz == cpu_khz )
             d->arch.vtsc = 0;
         else 
             d->arch.ns_to_vtsc = scale_reciprocal(d->arch.vtsc_to_ns);
@@ -1779,7 +1793,7 @@ void tsc_set_info(struct domain *d,
     }
     d->arch.incarnation = incarnation + 1;
     if ( is_hvm_domain(d) )
-        hvm_set_rdtsc_exiting(d, d->arch.vtsc || hvm_gtsc_need_scale(d));
+        hvm_set_rdtsc_exiting(d, d->arch.vtsc);
 }
 
 /* vtsc may incur measurable performance degradation, diagnose with this */
diff -r d0420ab97345 -r c1ed00d49534 xen/common/kernel.c
--- a/xen/common/kernel.c       Fri May 21 16:21:39 2010 +0100
+++ b/xen/common/kernel.c       Sat May 22 06:31:47 2010 +0100
@@ -259,6 +259,8 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDL
                 fi.submap |= (1U << XENFEAT_mmu_pt_update_preserve_ad) |
                              (1U << XENFEAT_highmem_assist) |
                              (1U << XENFEAT_gnttab_map_avail_bits);
+            else
+                fi.submap |= (1U << XENFEAT_hvm_safe_pvclock);
 #endif
             break;
         default:
diff -r d0420ab97345 -r c1ed00d49534 xen/include/asm-x86/hvm/domain.h
--- a/xen/include/asm-x86/hvm/domain.h  Fri May 21 16:21:39 2010 +0100
+++ b/xen/include/asm-x86/hvm/domain.h  Sat May 22 06:31:47 2010 +0100
@@ -45,8 +45,6 @@ struct hvm_domain {
     struct hvm_ioreq_page  ioreq;
     struct hvm_ioreq_page  buf_ioreq;
 
-    uint32_t               gtsc_khz; /* kHz */
-    bool_t                 tsc_scaled;
     struct pl_time         pl_time;
 
     struct hvm_io_handler  io_handler;
diff -r d0420ab97345 -r c1ed00d49534 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Fri May 21 16:21:39 2010 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Sat May 22 06:31:47 2010 +0100
@@ -296,7 +296,6 @@ uint8_t hvm_combine_hw_exceptions(uint8_
 uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);
 
 void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable);
-int hvm_gtsc_need_scale(struct domain *d);
 
 static inline int hvm_cpu_up(void)
 {
diff -r d0420ab97345 -r c1ed00d49534 xen/include/asm-x86/time.h
--- a/xen/include/asm-x86/time.h        Fri May 21 16:21:39 2010 +0100
+++ b/xen/include/asm-x86/time.h        Sat May 22 06:31:47 2010 +0100
@@ -57,6 +57,7 @@ uint64_t ns_to_acpi_pm_tick(uint64_t ns)
 uint64_t ns_to_acpi_pm_tick(uint64_t ns);
 
 void pv_soft_rdtsc(struct vcpu *v, struct cpu_user_regs *regs, int rdtscp);
+u64 gtime_to_gtsc(struct domain *d, u64 tsc);
 
 void tsc_set_info(struct domain *d, uint32_t tsc_mode, uint64_t elapsed_nsec,
                   uint32_t gtsc_khz, uint32_t incarnation);
diff -r d0420ab97345 -r c1ed00d49534 xen/include/public/features.h
--- a/xen/include/public/features.h     Fri May 21 16:21:39 2010 +0100
+++ b/xen/include/public/features.h     Sat May 22 06:31:47 2010 +0100
@@ -68,6 +68,9 @@
  */
 #define XENFEAT_gnttab_map_avail_bits      7
 
+/* x86: pvclock algorithm is safe to use on HVM */
+#define XENFEAT_hvm_safe_pvclock           9
+
 #define XENFEAT_NR_SUBMAPS 1
 
 #endif /* __XEN_PUBLIC_FEATURES_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86: TSC handling cleanups (version 2), Xen patchbot-unstable <=