[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] Re: [PATCH] CPUIDLE: shorten hpet spin_lock holding time



Is this a measurable win? The newer locking looks like it could be dodgy on
32-bit Xen: the 64-bit reads of timer_deadline_{start,end} will be
non-atomic and unsynchronised so you can read garbage. Even on 64-bit Xen
you can read stale values. I'll be surprised if you got a performance win
from chopping up critical regions in individual functions like that anyway.

 -- Keir

On 20/04/2010 06:39, "Wei, Gang" <gang.wei@xxxxxxxxx> wrote:

> CPUIDLE: shorten hpet spin_lock holding time
> 
> Try to reduce spin_lock overhead for deep C state entry/exit. This will
> benefit systems with a lot of cpus which need the hpet broadcast to wakeup
> from deep C state.
> 
> Signed-off-by: Wei Gang <gang.wei@xxxxxxxxx>
> 
> diff -r 7ee8bb40200a xen/arch/x86/hpet.c
> --- a/xen/arch/x86/hpet.c Thu Apr 15 19:11:16 2010 +0100
> +++ b/xen/arch/x86/hpet.c Fri Apr 16 15:05:28 2010 +0800
> @@ -186,6 +186,9 @@ static void handle_hpet_broadcast(struct
>  
>  again:
>      ch->next_event = STIME_MAX;
> +
> +    spin_unlock_irq(&ch->lock);
> +
>      next_event = STIME_MAX;
>      mask = (cpumask_t)CPU_MASK_NONE;
>      now = NOW();
> @@ -204,10 +207,14 @@ again:
>  
>      if ( next_event != STIME_MAX )
>      {
> -        if ( reprogram_hpet_evt_channel(ch, next_event, now, 0) )
> +        spin_lock_irq(&ch->lock);
> +
> +        if ( next_event < ch->next_event &&
> +             reprogram_hpet_evt_channel(ch, next_event, now, 0) )
>              goto again;
> -    }
> -    spin_unlock_irq(&ch->lock);
> +
> +        spin_unlock_irq(&ch->lock);
> +    }
>  }
>  
>  static void hpet_interrupt_handler(int irq, void *data,
> @@ -656,10 +663,15 @@ void hpet_broadcast_enter(void)
>      BUG_ON( !ch );
>  
>      ASSERT(!local_irq_is_enabled());
> -    spin_lock(&ch->lock);
>  
>      if ( hpet_attach_channel )
> +    {
> +        spin_lock(&ch->lock);
> +
>          hpet_attach_channel(cpu, ch);
> +
> +        spin_unlock(&ch->lock);
> +    }
>  
>      /* Cancel any outstanding LAPIC timer event and disable interrupts. */
>      reprogram_timer(0);
> @@ -667,6 +679,8 @@ void hpet_broadcast_enter(void)
>  
>      cpu_set(cpu, ch->cpumask);
>  
> +    spin_lock(&ch->lock);
> +
>      /* reprogram if current cpu expire time is nearer */
>      if ( this_cpu(timer_deadline_end) < ch->next_event )
>          reprogram_hpet_evt_channel(ch, this_cpu(timer_deadline_end), NOW(),
> 1);
> @@ -683,8 +697,6 @@ void hpet_broadcast_exit(void)
>          return;
>  
>      BUG_ON( !ch );
> -
> -    spin_lock_irq(&ch->lock);
>  
>      if ( cpu_test_and_clear(cpu, ch->cpumask) )
>      {
> @@ -693,14 +705,22 @@ void hpet_broadcast_exit(void)
>          if ( !reprogram_timer(this_cpu(timer_deadline_start)) )
>              raise_softirq(TIMER_SOFTIRQ);
>  
> +        spin_lock_irq(&ch->lock);
> +
>          if ( cpus_empty(ch->cpumask) && ch->next_event != STIME_MAX )
>              reprogram_hpet_evt_channel(ch, STIME_MAX, 0, 0);
> +
> +        spin_unlock_irq(&ch->lock);
>      }
>  
>      if ( hpet_detach_channel )
> +    {
> +        spin_lock_irq(&ch->lock);
> +
>          hpet_detach_channel(cpu);
>  
> -    spin_unlock_irq(&ch->lock);
> +        spin_unlock_irq(&ch->lock);
> +    }
>  }
>  
>  int hpet_broadcast_is_available(void)




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.