[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [Patch v4 1/5] x86/hpet: Pre cleanup



These changes are ones which are able to be pulled out of the subsequent
patch, to make it clearer to understand and review.

They are all misc fixes with negligible functional changes.

* Rename hpet_next_event -> hpet_set_counter and convert it to take an
  hpet_event_channel pointer rather than a timer index.

* Rename reprogram_hpet_evt_channel -> hpet_program_time

* Move the position of setting up HPET_EVT_LEGACY in hpet_broadcast_init() It
  didn't need to be there.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Keir Fraser <keir@xxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/hpet.c |   31 +++++++++++++++++++------------
 1 file changed, 19 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/hpet.c b/xen/arch/x86/hpet.c
index 3a4f7e8..fd44582 100644
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -94,7 +94,12 @@ static inline unsigned long ns2ticks(unsigned long nsec, int 
shift,
     return (unsigned long) tmp;
 }
 
-static int hpet_next_event(unsigned long delta, int timer)
+/*
+ * Program an HPET channels counter relative to now.  'delta' is specified in
+ * ticks, and should be calculated with ns2ticks().  The channel lock should
+ * be taken and interrupts must be disabled.
+ */
+static int hpet_set_counter(struct hpet_event_channel *ch, unsigned long delta)
 {
     uint32_t cnt, cmp;
     unsigned long flags;
@@ -102,7 +107,7 @@ static int hpet_next_event(unsigned long delta, int timer)
     local_irq_save(flags);
     cnt = hpet_read32(HPET_COUNTER);
     cmp = cnt + delta;
-    hpet_write32(cmp, HPET_Tn_CMP(timer));
+    hpet_write32(cmp, HPET_Tn_CMP(ch->idx));
     cmp = hpet_read32(HPET_COUNTER);
     local_irq_restore(flags);
 
@@ -110,9 +115,12 @@ static int hpet_next_event(unsigned long delta, int timer)
     return ((cmp + 2 - cnt) > delta) ? -ETIME : 0;
 }
 
-static int reprogram_hpet_evt_channel(
-    struct hpet_event_channel *ch,
-    s_time_t expire, s_time_t now, int force)
+/*
+ * Set the time at which an HPET channel should fire.  The channel lock should
+ * be held.
+ */
+static int hpet_program_time(struct hpet_event_channel *ch,
+                             s_time_t expire, s_time_t now, int force)
 {
     int64_t delta;
     int ret;
@@ -143,11 +151,11 @@ static int reprogram_hpet_evt_channel(
     delta = max_t(int64_t, delta, MIN_DELTA_NS);
     delta = ns2ticks(delta, ch->shift, ch->mult);
 
-    ret = hpet_next_event(delta, ch->idx);
+    ret = hpet_set_counter(ch, delta);
     while ( ret && force )
     {
         delta += delta;
-        ret = hpet_next_event(delta, ch->idx);
+        ret = hpet_set_counter(ch, delta);
     }
 
     return ret;
@@ -209,7 +217,7 @@ again:
         spin_lock_irqsave(&ch->lock, flags);
 
         if ( next_event < ch->next_event &&
-             reprogram_hpet_evt_channel(ch, next_event, now, 0) )
+             hpet_program_time(ch, next_event, now, 0) )
             goto again;
 
         spin_unlock_irqrestore(&ch->lock, flags);
@@ -583,6 +591,8 @@ void __init hpet_broadcast_init(void)
         cfg |= HPET_CFG_LEGACY;
         n = 1;
 
+        hpet_events->flags = HPET_EVT_LEGACY;
+
         if ( !force_hpet_broadcast )
             pv_rtc_handler = handle_rtc_once;
     }
@@ -615,9 +625,6 @@ void __init hpet_broadcast_init(void)
         hpet_events[i].msi.msi_attrib.maskbit = 1;
         hpet_events[i].msi.msi_attrib.pos = MSI_TYPE_HPET;
     }
-
-    if ( !num_hpets_used )
-        hpet_events->flags = HPET_EVT_LEGACY;
 }
 
 void hpet_broadcast_resume(void)
@@ -716,7 +723,7 @@ void hpet_broadcast_enter(void)
     spin_lock(&ch->lock);
     /* reprogram if current cpu expire time is nearer */
     if ( per_cpu(timer_deadline, cpu) < ch->next_event )
-        reprogram_hpet_evt_channel(ch, per_cpu(timer_deadline, cpu), NOW(), 1);
+        hpet_program_time(ch, per_cpu(timer_deadline, cpu), NOW(), 1);
     spin_unlock(&ch->lock);
 }
 
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.