[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/4] x86/HPET: use dynamic allocation for hpet_events[]



Typically there are far less than 32 counters available, and hence
there's no use in wasting the memory on (almost) every system.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- 2011-03-09.orig/xen/arch/x86/hpet.c
+++ 2011-03-09/xen/arch/x86/hpet.c
@@ -21,8 +21,6 @@
 #define MAX_DELTA_NS MILLISECS(10*1000)
 #define MIN_DELTA_NS MICROSECS(20)
 
-#define MAX_HPET_NUM 32
-
 #define HPET_EVT_USED_BIT    0
 #define HPET_EVT_USED       (1 << HPET_EVT_USED_BIT)
 #define HPET_EVT_DISABLE_BIT 1
@@ -56,8 +54,7 @@ struct hpet_event_channel
     int irq;            /* msi irq */
     unsigned int flags; /* HPET_EVT_x */
 } __cacheline_aligned;
-static struct hpet_event_channel hpet_events[MAX_HPET_NUM] = 
-    { [0 ... MAX_HPET_NUM-1].irq = -1 };
+static struct hpet_event_channel *__read_mostly hpet_events;
 
 /* msi hpet channels used for broadcast */
 static unsigned int __read_mostly num_hpets_used;
@@ -426,6 +423,11 @@ static int __init hpet_fsb_cap_lookup(vo
     num_chs = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
     num_chs++; /* Value read out starts from 0 */
 
+    hpet_events = xmalloc_array(struct hpet_event_channel, num_chs);
+    if ( !hpet_events )
+        return 0;
+    memset(hpet_events, 0, num_chs * sizeof(*hpet_events));
+
     num_chs_used = 0;
     for ( i = 0; i < num_chs; i++ )
     {
@@ -585,10 +587,16 @@ void __init hpet_broadcast_init(void)
         if ( !(hpet_id & HPET_ID_LEGSUP) )
             return;
 
+        if ( !hpet_events )
+            hpet_events = xmalloc(struct hpet_event_channel);
+        if ( !hpet_events )
+            return;
+        memset(hpet_events, 0, sizeof(*hpet_events));
+        hpet_events->irq = -1;
+
         /* Start HPET legacy interrupts */
         cfg |= HPET_CFG_LEGACY;
         n = 1;
-        hpet_events->idx = 0;
 
         if ( !force_hpet_broadcast )
             pv_rtc_handler = handle_rtc_once;
@@ -627,6 +635,9 @@ void hpet_broadcast_resume(void)
     u32 cfg;
     unsigned int i, n;
 
+    if ( !hpet_events )
+        return;
+
     hpet_resume();
 
     cfg = hpet_read32(HPET_CFG);
@@ -668,7 +679,7 @@ void hpet_disable_legacy_broadcast(void)
     u32 cfg;
     unsigned long flags;
 
-    if ( !(hpet_events->flags & HPET_EVT_LEGACY) )
+    if ( !hpet_events || !(hpet_events->flags & HPET_EVT_LEGACY) )
         return;
 
     spin_lock_irqsave(&hpet_events->lock, flags);
@@ -751,7 +762,7 @@ void hpet_broadcast_exit(void)
 
 int hpet_broadcast_is_available(void)
 {
-    return ((hpet_events->flags & HPET_EVT_LEGACY)
+    return ((hpet_events && (hpet_events->flags & HPET_EVT_LEGACY))
             || num_hpets_used > 0);
 }
 
@@ -759,7 +770,8 @@ int hpet_legacy_irq_tick(void)
 {
     this_cpu(irq_count)--;
 
-    if ( (hpet_events->flags & (HPET_EVT_DISABLE|HPET_EVT_LEGACY)) !=
+    if ( !hpet_events ||
+         (hpet_events->flags & (HPET_EVT_DISABLE|HPET_EVT_LEGACY)) !=
          HPET_EVT_LEGACY )
         return 0;
     hpet_events->event_handler(hpet_events);



Attachment: x86-hpet-alloc.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.