WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] RE: [PATCH] CPUIDLE: shorten hpet spin_lock holding time

To: Keir Fraser <keir.fraser@xxxxxxxxxxxxx>, "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] RE: [PATCH] CPUIDLE: shorten hpet spin_lock holding time
From: "Wei, Gang" <gang.wei@xxxxxxxxx>
Date: Wed, 21 Apr 2010 00:05:44 +0800
Accept-language: zh-CN, en-US
Acceptlanguage: zh-CN, en-US
Cc:
Delivery-date: Tue, 20 Apr 2010 09:06:55 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <C7F37708.11E5E%keir.fraser@xxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <F26D193E20BBDC42A43B611D1BDEDE710270AE3EBC@xxxxxxxxxxxxxxxxxxxxxxxxxxxx> <C7F37708.11E5E%keir.fraser@xxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcrgS919AC3RHXCET4295IJeXmmgRwAO/3swAAIUDKAAASh2WgADOhng
Thread-topic: [PATCH] CPUIDLE: shorten hpet spin_lock holding time
Resend.

CPUIDLE: shorten hpet spin_lock holding time

Try to reduce spin_lock overhead for deep C state entry/exit. This will benefit 
systems with a lot of cpus which need the hpet broadcast to wakeup from deep C 
state.

Signed-off-by: Wei Gang <gang.wei@xxxxxxxxx>

diff -r dbf0fd95180f xen/arch/x86/hpet.c
--- a/xen/arch/x86/hpet.c       Tue Apr 20 14:32:53 2010 +0100
+++ b/xen/arch/x86/hpet.c       Tue Apr 20 23:48:19 2010 +0800
@@ -186,6 +186,9 @@ static void handle_hpet_broadcast(struct
 
 again:
     ch->next_event = STIME_MAX;
+
+    spin_unlock_irq(&ch->lock);
+
     next_event = STIME_MAX;
     mask = (cpumask_t)CPU_MASK_NONE;
     now = NOW();
@@ -193,10 +196,17 @@ again:
     /* find all expired events */
     for_each_cpu_mask(cpu, ch->cpumask)
     {
-        if ( per_cpu(timer_deadline_start, cpu) <= now )
-            cpu_set(cpu, mask);
-        else if ( per_cpu(timer_deadline_end, cpu) < next_event )
-            next_event = per_cpu(timer_deadline_end, cpu);
+        spin_lock_irq(&ch->lock);
+
+        if ( cpumask_test_cpu(cpu, ch->cpumask) )
+        {
+            if ( per_cpu(timer_deadline_start, cpu) <= now )
+                cpu_set(cpu, mask);
+            else if ( per_cpu(timer_deadline_end, cpu) < next_event )
+                next_event = per_cpu(timer_deadline_end, cpu);
+        }
+
+        spin_unlock_irq(&ch->lock);
     }
 
     /* wakeup the cpus which have an expired event. */
@@ -204,10 +214,14 @@ again:
 
     if ( next_event != STIME_MAX )
     {
-        if ( reprogram_hpet_evt_channel(ch, next_event, now, 0) )
+        spin_lock_irq(&ch->lock);
+
+        if ( next_event < ch->next_event &&
+             reprogram_hpet_evt_channel(ch, next_event, now, 0) )
             goto again;
-    }
-    spin_unlock_irq(&ch->lock);
+
+        spin_unlock_irq(&ch->lock);
+    }
 }
 
 static void hpet_interrupt_handler(int irq, void *data,
@@ -656,17 +670,23 @@ void hpet_broadcast_enter(void)
     BUG_ON( !ch );
 
     ASSERT(!local_irq_is_enabled());
-    spin_lock(&ch->lock);
 
     if ( hpet_attach_channel )
+    {
+        spin_lock(&ch->lock);
+
         hpet_attach_channel(cpu, ch);
+
+        spin_unlock(&ch->lock);
+    }
 
     /* Cancel any outstanding LAPIC timer event and disable interrupts. */
     reprogram_timer(0);
     disable_APIC_timer();
 
+    spin_lock(&ch->lock);
+
     cpu_set(cpu, ch->cpumask);
-
     /* reprogram if current cpu expire time is nearer */
     if ( this_cpu(timer_deadline_end) < ch->next_event )
         reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(), 1);
@@ -684,23 +704,28 @@ void hpet_broadcast_exit(void)
 
     BUG_ON( !ch );
 
+    /* Reprogram the deadline; trigger timer work now if it has passed. */
+    enable_APIC_timer();
+    if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
+        raise_softirq(TIMER_SOFTIRQ);
+
     spin_lock_irq(&ch->lock);
 
-    if ( cpu_test_and_clear(cpu, ch->cpumask) )
-    {
-        /* Reprogram the deadline; trigger timer work now if it has passed. */
-        enable_APIC_timer();
-        if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
-            raise_softirq(TIMER_SOFTIRQ);
-
-        if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
-            reprogram_hpet_evt_channel(ch, STIME_MAX, 0, 0);
-    }
+    cpu_clear(cpu, ch->cpumask);
+    if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
+        reprogram_hpet_evt_channel(ch, STIME_MAX, 0, 0);
+
+    spin_unlock_irq(&ch->lock);
+
 
     if ( hpet_detach_channel )
+    {
+        spin_lock_irq(&ch->lock);
+
         hpet_detach_channel(cpu);
 
-    spin_unlock_irq(&ch->lock);
+        spin_unlock_irq(&ch->lock);
+    }
 }
 
 int hpet_broadcast_is_available(void)

Attachment: hpet_short_lock_v2.patch
Description: hpet_short_lock_v2.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel