[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 1/4] x86: introduce a nmi_count tracking variable



This is modeled after the irq_count variable, and is used to account
for all the NMIs handled by the system.

This will allow to repurpose the nmi_count() helper so it can be used
in a similar manner as local_irq_count(): account for the NMIs
currently in service.

Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
Changes since v3:
 - Remove nmi_count macro and __nmi_count field in irq_cpustat_t.
---
 xen/arch/x86/nmi.c            | 11 +++++------
 xen/arch/x86/traps.c          |  4 +++-
 xen/include/asm-x86/hardirq.h |  1 -
 xen/include/asm-x86/nmi.h     |  2 ++
 xen/include/xen/irq_cpustat.h |  1 -
 5 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/xen/arch/x86/nmi.c b/xen/arch/x86/nmi.c
index a69b91a924..c3f92ed231 100644
--- a/xen/arch/x86/nmi.c
+++ b/xen/arch/x86/nmi.c
@@ -151,15 +151,14 @@ int nmi_active;
 
 static void __init wait_for_nmis(void *p)
 {
-    unsigned int cpu = smp_processor_id();
-    unsigned int start_count = nmi_count(cpu);
+    unsigned int start_count = this_cpu(nmi_count);
     unsigned long ticks = 10 * 1000 * cpu_khz / nmi_hz;
     unsigned long s, e;
 
     s = rdtsc();
     do {
         cpu_relax();
-        if ( nmi_count(cpu) >= start_count + 2 )
+        if ( this_cpu(nmi_count) >= start_count + 2 )
             break;
         e = rdtsc();
     } while( e - s < ticks );
@@ -177,7 +176,7 @@ void __init check_nmi_watchdog(void)
     printk("Testing NMI watchdog on all CPUs:");
 
     for_each_online_cpu ( cpu )
-        prev_nmi_count[cpu] = nmi_count(cpu);
+        prev_nmi_count[cpu] = per_cpu(nmi_count, cpu);
 
     /*
      * Wait at most 10 ticks for 2 watchdog NMIs on each CPU.
@@ -188,7 +187,7 @@ void __init check_nmi_watchdog(void)
 
     for_each_online_cpu ( cpu )
     {
-        if ( nmi_count(cpu) - prev_nmi_count[cpu] < 2 )
+        if ( per_cpu(nmi_count, cpu) - prev_nmi_count[cpu] < 2 )
         {
             printk(" %d", cpu);
             ok = false;
@@ -593,7 +592,7 @@ static void do_nmi_stats(unsigned char key)
 
     printk("CPU\tNMI\n");
     for_each_online_cpu ( cpu )
-        printk("%3u\t%3u\n", cpu, nmi_count(cpu));
+        printk("%3u\t%3u\n", cpu, per_cpu(nmi_count, cpu));
 
     if ( !hardware_domain || !(v = domain_vcpu(hardware_domain, 0)) )
         return;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 56067f85d1..3dbc66bb64 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1683,13 +1683,15 @@ static int dummy_nmi_callback(const struct 
cpu_user_regs *regs, int cpu)
 
 static nmi_callback_t *nmi_callback = dummy_nmi_callback;
 
+DEFINE_PER_CPU(unsigned int, nmi_count);
+
 void do_nmi(const struct cpu_user_regs *regs)
 {
     unsigned int cpu = smp_processor_id();
     unsigned char reason = 0;
     bool handle_unknown = false;
 
-    ++nmi_count(cpu);
+    this_cpu(nmi_count)++;
 
     if ( nmi_callback(regs, cpu) )
         return;
diff --git a/xen/include/asm-x86/hardirq.h b/xen/include/asm-x86/hardirq.h
index 34e1b49260..802f91cfdf 100644
--- a/xen/include/asm-x86/hardirq.h
+++ b/xen/include/asm-x86/hardirq.h
@@ -7,7 +7,6 @@
 typedef struct {
        unsigned int __softirq_pending;
        unsigned int __local_irq_count;
-       unsigned int __nmi_count;
        bool_t __mwait_wakeup;
 } __cacheline_aligned irq_cpustat_t;
 
diff --git a/xen/include/asm-x86/nmi.h b/xen/include/asm-x86/nmi.h
index f9dfca6afb..a288f02a50 100644
--- a/xen/include/asm-x86/nmi.h
+++ b/xen/include/asm-x86/nmi.h
@@ -31,5 +31,7 @@ nmi_callback_t *set_nmi_callback(nmi_callback_t *callback);
  * Remove the handler previously set.
  */
 void unset_nmi_callback(void);
+
+DECLARE_PER_CPU(unsigned int, nmi_count);
  
 #endif /* ASM_NMI_H */
diff --git a/xen/include/xen/irq_cpustat.h b/xen/include/xen/irq_cpustat.h
index 73629f6ec8..b9629f25c2 100644
--- a/xen/include/xen/irq_cpustat.h
+++ b/xen/include/xen/irq_cpustat.h
@@ -24,7 +24,6 @@ extern irq_cpustat_t irq_stat[];
   /* arch independent irq_stat fields */
 #define softirq_pending(cpu)   __IRQ_STAT((cpu), __softirq_pending)
 #define local_irq_count(cpu)   __IRQ_STAT((cpu), __local_irq_count)
-#define nmi_count(cpu)         __IRQ_STAT((cpu), __nmi_count)
 #define mwait_wakeup(cpu)      __IRQ_STAT((cpu), __mwait_wakeup)
 
 #endif /* __irq_cpustat_h */
-- 
2.25.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.