[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2 of 2] xen tracing: Add tracing for IRQ-related events



Add tracing for various IRQ-related events.  Also, move
the exiting TRC_TRACE_IRQ from the "generic" class into the
new TRC_HW_IRQ sub-class.

Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>

diff -r 9744f91e8c0c -r d925415a4f93 xen/arch/x86/io_apic.c
--- a/xen/arch/x86/io_apic.c    Thu Jun 30 10:32:12 2011 +0100
+++ b/xen/arch/x86/io_apic.c    Thu Jun 30 10:32:15 2011 +0100
@@ -37,6 +37,7 @@
 #include <mach_apic.h>
 #include <io_ports.h>
 #include <public/physdev.h>
+#include <xen/trace.h>
 
 /* Where if anywhere is the i8259 connect in external int mode */
 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
@@ -439,8 +440,14 @@ fastcall void smp_irq_move_cleanup_inter
          */
         if (irr  & (1 << (vector % 32))) {
             genapic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
+            TRACE_3D(TRC_HW_IRQ_MOVE_CLEANUP_DELAY,
+                     irq, vector, smp_processor_id());
             goto unlock;
         }
+
+        TRACE_3D(TRC_HW_IRQ_MOVE_CLEANUP,
+                 irq, vector, smp_processor_id());
+
         __get_cpu_var(vector_irq)[vector] = -1;
         cfg->move_cleanup_count--;
 unlock:
diff -r 9744f91e8c0c -r d925415a4f93 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c        Thu Jun 30 10:32:12 2011 +0100
+++ b/xen/arch/x86/irq.c        Thu Jun 30 10:32:15 2011 +0100
@@ -90,6 +90,20 @@ static int __init __bind_irq_vector(int 
         return 0;
     if (cfg->vector != IRQ_VECTOR_UNASSIGNED) 
         return -EBUSY;
+
+    {
+        struct {
+            int irq, vec;
+            cpumask_t mask;
+        } d;
+        d.irq=irq;
+        d.vec = vector;
+        d.mask = online_mask;
+        trace_var(TRC_HW_IRQ_BIND_VECTOR, 1,
+                  sizeof(d),
+                  (unsigned char *)&d);
+    }
+
     for_each_cpu_mask(cpu, online_mask)
         per_cpu(vector_irq, cpu)[vector] = irq;
     cfg->vector = vector;
@@ -181,6 +195,19 @@ static void __clear_irq_vector(int irq)
     vector = cfg->vector;
     cpus_and(tmp_mask, cfg->cpu_mask, cpu_online_map);
 
+    {
+        struct {
+            int irq, vec;
+            cpumask_t mask;
+        } d;
+        d.irq=irq;
+        d.vec = vector;
+        d.mask = tmp_mask;
+        trace_var(TRC_HW_IRQ_CLEAR_VECTOR, 1,
+                  sizeof(d),
+                  (unsigned char *)&d);
+    }
+
     for_each_cpu_mask(cpu, tmp_mask)
         per_cpu(vector_irq, cpu)[vector] = -1;
 
@@ -195,6 +222,8 @@ static void __clear_irq_vector(int irq)
                                 vector++) {
             if (per_cpu(vector_irq, cpu)[vector] != irq)
                 continue;
+            TRACE_3D(TRC_HW_IRQ_MOVE_FINISH,
+                     irq, vector, cpu);
             per_cpu(vector_irq, cpu)[vector] = -1;
              break;
         }
@@ -394,6 +423,18 @@ next:
             cfg->move_in_progress = 1;
             cpus_copy(cfg->old_cpu_mask, cfg->cpu_mask);
         }
+        {
+            struct {
+                int irq, vec;
+                cpumask_t mask;
+            } d;
+            d.irq=irq;
+            d.vec = vector;
+            d.mask = tmp_mask;
+            trace_var(TRC_HW_IRQ_ASSIGN_VECTOR, 1,
+                      sizeof(d),
+                      (unsigned char *)&d);
+        }
         for_each_cpu_mask(new_cpu, tmp_mask)
             per_cpu(vector_irq, new_cpu)[vector] = irq;
         cfg->vector = vector;
@@ -539,6 +580,7 @@ asmlinkage void do_IRQ(struct cpu_user_r
         printk("%s: %d.%d No irq handler for vector (irq %d)\n",
                 __func__, smp_processor_id(), vector, irq);
         set_irq_regs(old_regs);
+        TRACE_1D(TRC_HW_IRQ_UNMAPPED_VECTOR, vector);
         return;
     }
 
@@ -579,7 +621,7 @@ asmlinkage void do_IRQ(struct cpu_user_r
 
         tsc_in = tb_init_done ? get_cycles() : 0;
         __do_IRQ_guest(irq);
-        TRACE_3D(TRC_TRACE_IRQ, irq, tsc_in, get_cycles());
+        TRACE_3D(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles());
         goto out_no_end;
     }
 
@@ -602,7 +644,7 @@ asmlinkage void do_IRQ(struct cpu_user_r
         spin_unlock_irq(&desc->lock);
         tsc_in = tb_init_done ? get_cycles() : 0;
         action->handler(irq, action->dev_id, regs);
-        TRACE_3D(TRC_TRACE_IRQ, irq, tsc_in, get_cycles());
+        TRACE_3D(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles());
         spin_lock_irq(&desc->lock);
     }
 
diff -r 9744f91e8c0c -r d925415a4f93 xen/include/public/trace.h
--- a/xen/include/public/trace.h        Thu Jun 30 10:32:12 2011 +0100
+++ b/xen/include/public/trace.h        Thu Jun 30 10:32:15 2011 +0100
@@ -59,12 +59,12 @@
 
 /* Trace classes for Hardware */
 #define TRC_HW_PM           0x00801000   /* Power management traces */
+#define TRC_HW_IRQ          0x00802000   /* Traces relating to the handling of 
IRQs */
 
 /* Trace events per class */
 #define TRC_LOST_RECORDS        (TRC_GEN + 1)
 #define TRC_TRACE_WRAP_BUFFER  (TRC_GEN + 2)
 #define TRC_TRACE_CPU_CHANGE    (TRC_GEN + 3)
-#define TRC_TRACE_IRQ           (TRC_GEN + 4)
 
 #define TRC_SCHED_RUNSTATE_CHANGE   (TRC_SCHED_MIN + 1)
 #define TRC_SCHED_CONTINUE_RUNNING  (TRC_SCHED_MIN + 2)
@@ -173,6 +173,17 @@
 #define TRC_PM_IDLE_ENTRY       (TRC_HW_PM + 0x02)
 #define TRC_PM_IDLE_EXIT        (TRC_HW_PM + 0x03)
 
+/* Trace events for IRQs */
+#define TRC_HW_IRQ_MOVE_CLEANUP_DELAY (TRC_HW_IRQ + 0x1)
+#define TRC_HW_IRQ_MOVE_CLEANUP       (TRC_HW_IRQ + 0x2)
+#define TRC_HW_IRQ_BIND_VECTOR        (TRC_HW_IRQ + 0x3)
+#define TRC_HW_IRQ_CLEAR_VECTOR       (TRC_HW_IRQ + 0x4)
+#define TRC_HW_IRQ_MOVE_FINISH        (TRC_HW_IRQ + 0x5)
+#define TRC_HW_IRQ_ASSIGN_VECTOR      (TRC_HW_IRQ + 0x6)
+#define TRC_HW_IRQ_UNMAPPED_VECTOR    (TRC_HW_IRQ + 0x7)
+#define TRC_HW_IRQ_HANDLED            (TRC_HW_IRQ + 0x8)
+
+
 /* This structure represents a single trace buffer record. */
 struct t_rec {
     uint32_t event:28;

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.