WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] Cpufreq: prevent negative px resident tim

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] Cpufreq: prevent negative px resident time, add spinlock to avoid race
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 08 Jan 2009 06:57:34 -0800
Delivery-date: Thu, 08 Jan 2009 07:01:32 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1231154172 0
# Node ID fd59c117994a3d4ad1e4cc51dc51e59e8d87e18c
# Parent  42108955f52e127a91ca928818b45a6cb29f5d54
Cpufreq: prevent negative px resident time, add spinlock to avoid race

Due to NOW() value may drift between different cpus, we add protection
to prevent negative px resident time.
Due to both cpufreq logic and xenpm may race accessing
cpufreq_statistic_data, we add spinlock to avoid race.

Signed-off-by: Liu, Jinsong <jinsong.liu@xxxxxxxxx>
---
 xen/drivers/acpi/pmstat.c                 |   27 ++++---
 xen/drivers/cpufreq/utility.c             |  104 ++++++++++++++++++++++++------
 xen/include/acpi/cpufreq/cpufreq.h        |    2 
 xen/include/acpi/cpufreq/processor_perf.h |    1 
 4 files changed, 102 insertions(+), 32 deletions(-)

diff -r 42108955f52e -r fd59c117994a xen/drivers/acpi/pmstat.c
--- a/xen/drivers/acpi/pmstat.c Mon Jan 05 11:15:40 2009 +0000
+++ b/xen/drivers/acpi/pmstat.c Mon Jan 05 11:16:12 2009 +0000
@@ -87,33 +87,34 @@ int do_get_pm_info(struct xen_sysctl_get
 
     case PMSTAT_get_pxstat:
     {
-        uint64_t now, ct;
-        uint64_t total_idle_ns;
-        uint64_t tmp_idle_ns;
+        uint32_t ct;
         struct pm_px *pxpt = cpufreq_statistic_data[op->cpuid];
+        spinlock_t *cpufreq_statistic_lock = 
+                   &per_cpu(cpufreq_statistic_lock, op->cpuid);
+
+        spin_lock_irq(cpufreq_statistic_lock);
 
         if ( !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt )
+        {
+            spin_unlock_irq(cpufreq_statistic_lock);
             return -ENODATA;
-
-        total_idle_ns = get_cpu_idle_time(op->cpuid);
-        tmp_idle_ns = total_idle_ns - pxpt->prev_idle_wall;
-
-        now = NOW();
+        }
+
         pxpt->u.usable = pmpt->perf.state_count - pmpt->perf.platform_limit;
-        pxpt->u.pt[pxpt->u.cur].residency += now - pxpt->prev_state_wall;
-        pxpt->u.pt[pxpt->u.cur].residency -= tmp_idle_ns;
-        pxpt->prev_state_wall = now;
-        pxpt->prev_idle_wall = total_idle_ns;
+
+        cpufreq_residency_update(op->cpuid, pxpt->u.cur);
 
         ct = pmpt->perf.state_count;
         if ( copy_to_guest(op->u.getpx.trans_pt, pxpt->u.trans_pt, ct*ct) )
         {
+            spin_unlock_irq(cpufreq_statistic_lock);
             ret = -EFAULT;
             break;
         }
 
         if ( copy_to_guest(op->u.getpx.pt, pxpt->u.pt, ct) )
         {
+            spin_unlock_irq(cpufreq_statistic_lock);
             ret = -EFAULT;
             break;
         }
@@ -122,6 +123,8 @@ int do_get_pm_info(struct xen_sysctl_get
         op->u.getpx.usable = pxpt->u.usable;
         op->u.getpx.last = pxpt->u.last;
         op->u.getpx.cur = pxpt->u.cur;
+
+        spin_unlock_irq(cpufreq_statistic_lock);
 
         break;
     }
diff -r 42108955f52e -r fd59c117994a xen/drivers/cpufreq/utility.c
--- a/xen/drivers/cpufreq/utility.c     Mon Jan 05 11:15:40 2009 +0000
+++ b/xen/drivers/cpufreq/utility.c     Mon Jan 05 11:16:12 2009 +0000
@@ -36,35 +36,54 @@ struct processor_pminfo *__read_mostly p
 struct processor_pminfo *__read_mostly processor_pminfo[NR_CPUS];
 struct cpufreq_policy   *__read_mostly cpufreq_cpu_policy[NR_CPUS];
 
+DEFINE_PER_CPU(spinlock_t, cpufreq_statistic_lock);
+
 /*********************************************************************
  *                    Px STATISTIC INFO                              *
  *********************************************************************/
 
+void cpufreq_residency_update(unsigned int cpu, uint8_t state)
+{
+    uint64_t now, total_idle_ns;
+    int64_t delta;
+    struct pm_px *pxpt = cpufreq_statistic_data[cpu];
+
+    total_idle_ns = get_cpu_idle_time(cpu);
+    now = NOW();
+
+    delta = (now - pxpt->prev_state_wall) - 
+            (total_idle_ns - pxpt->prev_idle_wall);
+
+    if ( likely(delta >= 0) )
+        pxpt->u.pt[state].residency += delta;
+
+    pxpt->prev_state_wall = now;
+    pxpt->prev_idle_wall = total_idle_ns;
+}
+
 void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to)
 {
-    uint64_t now;
     struct pm_px *pxpt = cpufreq_statistic_data[cpu];
     struct processor_pminfo *pmpt = processor_pminfo[cpu];
-    uint64_t total_idle_ns;
-    uint64_t tmp_idle_ns;
-
-    if ( !pxpt || !pmpt )
+    spinlock_t *cpufreq_statistic_lock = 
+               &per_cpu(cpufreq_statistic_lock, cpu);
+
+    spin_lock_irq(cpufreq_statistic_lock);
+
+    if ( !pxpt || !pmpt ) {
+        spin_unlock_irq(cpufreq_statistic_lock);
         return;
-
-    now = NOW();
-    total_idle_ns = get_cpu_idle_time(cpu);
-    tmp_idle_ns = total_idle_ns - pxpt->prev_idle_wall;
+    }
 
     pxpt->u.last = from;
     pxpt->u.cur = to;
     pxpt->u.pt[to].count++;
-    pxpt->u.pt[from].residency += now - pxpt->prev_state_wall;
-    pxpt->u.pt[from].residency -= tmp_idle_ns;
+
+    cpufreq_residency_update(cpu, from);
 
     (*(pxpt->u.trans_pt + from * pmpt->perf.state_count + to))++;
 
-    pxpt->prev_state_wall = now;
-    pxpt->prev_idle_wall = total_idle_ns;
+    spin_unlock_irq(cpufreq_statistic_lock);
 }
 
 int cpufreq_statistic_init(unsigned int cpuid)
@@ -72,24 +91,33 @@ int cpufreq_statistic_init(unsigned int 
     uint32_t i, count;
     struct pm_px *pxpt = cpufreq_statistic_data[cpuid];
     const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
+    spinlock_t *cpufreq_statistic_lock = 
+                          &per_cpu(cpufreq_statistic_lock, cpuid);
 
     if ( !pmpt )
         return -EINVAL;
 
-    if ( pxpt )
+    spin_lock_irq(cpufreq_statistic_lock);
+
+    if ( pxpt ) {
+        spin_unlock_irq(cpufreq_statistic_lock);
         return 0;
+    }
 
     count = pmpt->perf.state_count;
 
     pxpt = xmalloc(struct pm_px);
-    if ( !pxpt )
+    if ( !pxpt ) {
+        spin_unlock_irq(cpufreq_statistic_lock);
         return -ENOMEM;
+    }
     memset(pxpt, 0, sizeof(*pxpt));
     cpufreq_statistic_data[cpuid] = pxpt;
 
     pxpt->u.trans_pt = xmalloc_array(uint64_t, count * count);
     if (!pxpt->u.trans_pt) {
         xfree(pxpt);
+        spin_unlock_irq(cpufreq_statistic_lock);
         return -ENOMEM;
     }
 
@@ -97,6 +125,7 @@ int cpufreq_statistic_init(unsigned int 
     if (!pxpt->u.pt) {
         xfree(pxpt->u.trans_pt);
         xfree(pxpt);
+        spin_unlock_irq(cpufreq_statistic_lock);
         return -ENOMEM;
     }
 
@@ -112,19 +141,30 @@ int cpufreq_statistic_init(unsigned int 
     pxpt->prev_state_wall = NOW();
     pxpt->prev_idle_wall = get_cpu_idle_time(cpuid);
 
+    spin_unlock_irq(cpufreq_statistic_lock);
+
     return 0;
 }
 
 void cpufreq_statistic_exit(unsigned int cpuid)
 {
     struct pm_px *pxpt = cpufreq_statistic_data[cpuid];
-
-    if (!pxpt)
+    spinlock_t *cpufreq_statistic_lock = 
+               &per_cpu(cpufreq_statistic_lock, cpuid);
+
+    spin_lock_irq(cpufreq_statistic_lock);
+
+    if (!pxpt) {
+        spin_unlock_irq(cpufreq_statistic_lock);
         return;
+    }
+
     xfree(pxpt->u.trans_pt);
     xfree(pxpt->u.pt);
     xfree(pxpt);
     cpufreq_statistic_data[cpuid] = NULL;
+
+    spin_unlock_irq(cpufreq_statistic_lock);
 }
 
 void cpufreq_statistic_reset(unsigned int cpuid)
@@ -132,9 +172,15 @@ void cpufreq_statistic_reset(unsigned in
     uint32_t i, j, count;
     struct pm_px *pxpt = cpufreq_statistic_data[cpuid];
     const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
-
-    if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt )
+    spinlock_t *cpufreq_statistic_lock = 
+               &per_cpu(cpufreq_statistic_lock, cpuid);
+
+    spin_lock_irq(cpufreq_statistic_lock);
+
+    if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt ) {
+        spin_unlock_irq(cpufreq_statistic_lock);
         return;
+    }
 
     count = pmpt->perf.state_count;
 
@@ -148,7 +194,25 @@ void cpufreq_statistic_reset(unsigned in
 
     pxpt->prev_state_wall = NOW();
     pxpt->prev_idle_wall = get_cpu_idle_time(cpuid);
-}
+
+    spin_unlock_irq(cpufreq_statistic_lock);
+}
+
+static int __init cpufreq_statistic_lock_init(void)
+{
+    unsigned int cpu;
+    spinlock_t *cpufreq_statistic_lock; 
+
+    for (cpu=0; cpu<NR_CPUS; cpu++) {
+        cpufreq_statistic_lock = 
+                &per_cpu(cpufreq_statistic_lock, cpu);
+
+        spin_lock_init(cpufreq_statistic_lock);
+    }
+
+    return 0;
+}
+__initcall(cpufreq_statistic_lock_init);
 
 
 /*********************************************************************
diff -r 42108955f52e -r fd59c117994a xen/include/acpi/cpufreq/cpufreq.h
--- a/xen/include/acpi/cpufreq/cpufreq.h        Mon Jan 05 11:15:40 2009 +0000
+++ b/xen/include/acpi/cpufreq/cpufreq.h        Mon Jan 05 11:16:12 2009 +0000
@@ -19,6 +19,8 @@
 #include <xen/cpumask.h>
 
 #include "processor_perf.h"
+
+DECLARE_PER_CPU(spinlock_t, cpufreq_statistic_lock);
 
 struct cpufreq_governor;
 
diff -r 42108955f52e -r fd59c117994a xen/include/acpi/cpufreq/processor_perf.h
--- a/xen/include/acpi/cpufreq/processor_perf.h Mon Jan 05 11:15:40 2009 +0000
+++ b/xen/include/acpi/cpufreq/processor_perf.h Mon Jan 05 11:16:12 2009 +0000
@@ -9,6 +9,7 @@ int get_cpu_id(u8);
 int get_cpu_id(u8);
 int powernow_cpufreq_init(void);
 
+void cpufreq_residency_update(unsigned int, uint8_t);
 void cpufreq_statistic_update(unsigned int, uint8_t, uint8_t);
 int  cpufreq_statistic_init(unsigned int);
 void cpufreq_statistic_exit(unsigned int);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] Cpufreq: prevent negative px resident time, add spinlock to avoid race, Xen patchbot-unstable <=