WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 11/12] x86/hpet: allocate CPU masks dynamically

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 11/12] x86/hpet: allocate CPU masks dynamically
From: "Jan Beulich" <JBeulich@xxxxxxxx>
Date: Thu, 20 Oct 2011 14:43:35 +0100
Delivery-date: Thu, 20 Oct 2011 06:54:10 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- 2011-10-18.orig/xen/arch/x86/hpet.c 2011-10-12 09:00:51.000000000 +0200
+++ 2011-10-18/xen/arch/x86/hpet.c      2011-10-19 15:46:36.000000000 +0200
@@ -33,7 +33,7 @@ struct hpet_event_channel
     unsigned long mult;
     int           shift;
     s_time_t      next_event;
-    cpumask_t     cpumask;
+    cpumask_var_t cpumask;
     spinlock_t    lock;
     void          (*event_handler)(struct hpet_event_channel *);
 
@@ -182,14 +182,14 @@ again:
     now = NOW();
 
     /* find all expired events */
-    for_each_cpu_mask(cpu, ch->cpumask)
+    for_each_cpu_mask(cpu, *ch->cpumask)
     {
         s_time_t deadline;
 
         rmb();
         deadline = per_cpu(timer_deadline, cpu);
         rmb();
-        if ( !cpumask_test_cpu(cpu, &ch->cpumask) )
+        if ( !cpumask_test_cpu(cpu, ch->cpumask) )
             continue;
 
         if ( deadline <= now )
@@ -377,6 +377,16 @@ static void __init hpet_fsb_cap_lookup(v
         if ( !(cfg & HPET_TN_FSB_CAP) )
             continue;
 
+        if ( !zalloc_cpumask_var(&ch->cpumask) )
+        {
+            if ( !num_hpets_used )
+            {
+                xfree(hpet_events);
+                hpet_events = NULL;
+            }
+            break;
+        }
+
         ch->flags = 0;
         ch->idx = i;
 
@@ -449,14 +459,14 @@ static void hpet_detach_channel(unsigned
     if ( cpu != ch->cpu )
         return;
 
-    if ( cpus_empty(ch->cpumask) )
+    if ( cpumask_empty(ch->cpumask) )
     {
         ch->cpu = -1;
         clear_bit(HPET_EVT_USED_BIT, &ch->flags);
         return;
     }
 
-    ch->cpu = first_cpu(ch->cpumask);
+    ch->cpu = cpumask_first(ch->cpumask);
     hpet_msi_set_affinity(irq_to_desc(ch->irq), cpumask_of(ch->cpu));
 }
 
@@ -502,7 +512,14 @@ void __init hpet_broadcast_init(void)
             return;
 
         if ( !hpet_events )
+        {
             hpet_events = xzalloc(struct hpet_event_channel);
+            if ( hpet_events && !zalloc_cpumask_var(&hpet_events->cpumask) )
+            {
+                xfree(hpet_events);
+                hpet_events = NULL;
+            }
+        }
         if ( !hpet_events )
             return;
         hpet_events->irq = -1;
@@ -635,7 +652,7 @@ void hpet_broadcast_enter(void)
 
     /* Disable LAPIC timer interrupts. */
     disable_APIC_timer();
-    cpu_set(cpu, ch->cpumask);
+    cpumask_set_cpu(cpu, ch->cpumask);
 
     spin_lock(&ch->lock);
     /* reprogram if current cpu expire time is nearer */
@@ -660,7 +677,7 @@ void hpet_broadcast_exit(void)
     if ( !reprogram_timer(per_cpu(timer_deadline, cpu)) )
         raise_softirq(TIMER_SOFTIRQ);
 
-    cpu_clear(cpu, ch->cpumask);
+    cpumask_clear_cpu(cpu, ch->cpumask);
 
     if ( !(ch->flags & HPET_EVT_LEGACY) )
     {



Attachment: x86-hpet-cpumask-alloc.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 11/12] x86/hpet: allocate CPU masks dynamically, Jan Beulich <=