[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86: add irq count for IPIs


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
  • From: "Tian, Kevin" <kevin.tian@xxxxxxxxx>
  • Date: Tue, 30 Aug 2011 16:11:51 +0800
  • Accept-language: en-US
  • Acceptlanguage: en-US
  • Delivery-date: Tue, 30 Aug 2011 01:13:26 -0700
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>
  • Thread-index: Acxm7GODAKpcKr0oTK62Z+TnfZWJ+g==
  • Thread-topic: [PATCH] x86: add irq count for IPIs

x86: add irq count for IPIs

such count is useful to assist decision make in cpuidle governor,
while w/o this patch only device interrupts through do_IRQ is
currently counted.

Signed-off-by: Kevin Tian <kevin.tian@xxxxxxxxx>

diff -r 2c687e70a343 xen/arch/x86/apic.c
--- a/xen/arch/x86/apic.c       Sat Aug 27 12:20:19 2011 +0100
+++ b/xen/arch/x86/apic.c       Tue Aug 30 12:32:27 2011 +0800
@@ -1332,6 +1332,7 @@ fastcall void smp_apic_timer_interrupt(s
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
     ack_APIC_irq();
     perfc_incr(apic_timer);
+    this_cpu(irq_count)++;
     raise_softirq(TIMER_SOFTIRQ);
     set_irq_regs(old_regs);
 }
@@ -1353,6 +1354,7 @@ fastcall void smp_spurious_interrupt(str
     unsigned long v;
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
 
+    this_cpu(irq_count)++;
     irq_enter();
 
     /*
@@ -1388,6 +1390,7 @@ fastcall void smp_error_interrupt(struct
     unsigned long v, v1;
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
 
+    this_cpu(irq_count)++;
     irq_enter();
     /* First tickle the hardware, only then report what went on. -- REW */
     v = apic_read(APIC_ESR);
@@ -1419,6 +1422,7 @@ fastcall void smp_pmu_apic_interrupt(str
 {
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
     ack_APIC_irq();
+    this_cpu(irq_count)++;
     hvm_do_pmu_interrupt(regs);
     set_irq_regs(old_regs);
 }
diff -r 2c687e70a343 xen/arch/x86/cpu/mcheck/mce_intel.c
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c       Sat Aug 27 12:20:19 2011 +0100
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c       Tue Aug 30 12:32:27 2011 +0800
@@ -77,6 +77,7 @@ static void (*__read_mostly vendor_therm
 fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs)
 {
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
+    this_cpu(irq_count)++;
     irq_enter();
     vendor_thermal_interrupt(regs);
     irq_exit();
@@ -1147,6 +1148,7 @@ fastcall void smp_cmci_interrupt(struct 
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
 
     ack_APIC_irq();
+    this_cpu(irq_count)++;
     irq_enter();
 
     mctc = mcheck_mca_logout(
diff -r 2c687e70a343 xen/arch/x86/io_apic.c
--- a/xen/arch/x86/io_apic.c    Sat Aug 27 12:20:19 2011 +0100
+++ b/xen/arch/x86/io_apic.c    Tue Aug 30 12:32:27 2011 +0800
@@ -441,6 +441,7 @@ fastcall void smp_irq_move_cleanup_inter
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
 
     ack_APIC_irq();
+    this_cpu(irq_count)++;
     irq_enter();
 
     me = smp_processor_id();
diff -r 2c687e70a343 xen/arch/x86/smp.c
--- a/xen/arch/x86/smp.c        Sat Aug 27 12:20:19 2011 +0100
+++ b/xen/arch/x86/smp.c        Tue Aug 30 12:32:27 2011 +0800
@@ -221,6 +221,7 @@ fastcall void smp_invalidate_interrupt(v
 {
     ack_APIC_irq();
     perfc_incr(ipis);
+    this_cpu(irq_count)++;
     irq_enter();
     if ( !__sync_local_execstate() ||
          (flush_flags & (FLUSH_TLB_GLOBAL | FLUSH_CACHE)) )
@@ -385,6 +386,7 @@ fastcall void smp_event_check_interrupt(
     struct cpu_user_regs *old_regs = set_irq_regs(regs);
     ack_APIC_irq();
     perfc_incr(ipis);
+    this_cpu(irq_count)++;
     set_irq_regs(old_regs);
 }
 
@@ -421,6 +423,7 @@ fastcall void smp_call_function_interrup
 
     ack_APIC_irq();
     perfc_incr(ipis);
+    this_cpu(irq_count)++;
     __smp_call_function_interrupt();
     set_irq_regs(old_regs);
 }

Attachment: 20110830_count_ipi.patch
Description: 20110830_count_ipi.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.