WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] Re: One question to IST stack for PV guest

To: Jeremy Fitzhardinge <jeremy@xxxxxxxx>
Subject: Re: [Xen-devel] Re: One question to IST stack for PV guest
From: Ian Campbell <Ian.Campbell@xxxxxxxxxx>
Date: Sat, 19 Dec 2009 09:24:32 +0000
Cc: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>, "Jiang, Yunhong" <yunhong.jiang@xxxxxxxxx>, Beulich <JBeulich@xxxxxxxxxx>, Jan, Keir Fraser <Keir.Fraser@xxxxxxxxxxxxx>, "Kleen, Andi" <andi.kleen@xxxxxxxxx>
Delivery-date: Sat, 19 Dec 2009 01:24:57 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <4B2BF269.5040608@xxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Organization: Citrix Systems, Inc.
References: <C8EDE645B81E5141A8C6B2F73FD9265105AE092F76@xxxxxxxxxxxxxxxxxxxxxxxxxxxxx> <4B2BF269.5040608@xxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
On Fri, 2009-12-18 at 21:21 +0000, Jeremy Fitzhardinge wrote:
> 
> > BTW, Jeremy, seems vNMI support is not included in pvops dom0, will
> it be supported in future?
> >    
> 
> There's been no call for it so far, so I hadn't worried about it much.
> I was thinking it might be useful as a debug tool, but I don't know
> what it gets used for normally. 

SysRQ-L (show all cpus) uses it via arch_trigger_all_cpu_backtrace()
which is a bit of a problem even in a domU because it goes to
apic->send_IPI_all(NMI_VECTOR) which ends up "BUG: unable to handle
kernel paging request" in default_send_IPI_mask_logical.

I started adding a new smp_op to handle allow this function to be
overidden yesterday (WIP appended) but having some sort of NMI support
would be useful so reduce the differences with native on the receiving
end, instead of using smp_call_function.

Ian.


diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 1e79678..00ef5f7 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -60,6 +60,8 @@ struct smp_ops {
 
        void (*send_call_func_ipi)(const struct cpumask *mask);
        void (*send_call_func_single_ipi)(int cpu);
+
+       void (*send_nmi_ipi)(void);
 };
 
 /* Globals due to paravirt */
@@ -126,6 +128,11 @@ static inline void arch_send_call_function_ipi_mask(const 
struct cpumask *mask)
        smp_ops.send_call_func_ipi(mask);
 }
 
+static inline void smp_send_nmi_ipi(void)
+{
+       smp_ops.send_nmi_ipi();
+}
+
 void cpu_disable_common(void);
 void native_smp_prepare_boot_cpu(void);
 void native_smp_prepare_cpus(unsigned int max_cpus);
@@ -139,6 +146,8 @@ void play_dead_common(void);
 void native_send_call_func_ipi(const struct cpumask *mask);
 void native_send_call_func_single_ipi(int cpu);
 
+void native_send_nmi_ipi(void);
+
 void smp_store_cpu_info(int id);
 #define cpu_physical_id(cpu)   per_cpu(x86_cpu_to_apicid, cpu)
 
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 7ff61d6..40c1414 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -561,7 +561,7 @@ void arch_trigger_all_cpu_backtrace(void)
        cpumask_copy(&backtrace_mask, cpu_online_mask);
 
        printk(KERN_INFO "sending NMI to all CPUs:\n");
-       apic->send_IPI_all(NMI_VECTOR);
+       smp_send_nmi_ipi();
 
        /* Wait for up to 10 seconds for all CPUs to do the backtrace */
        for (i = 0; i < 10 * 1000; i++) {
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index ec1de97..f53437f 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -146,6 +146,11 @@ void native_send_call_func_ipi(const struct cpumask *mask)
        free_cpumask_var(allbutself);
 }
 
+void native_send_nmi_ipi(void)
+{
+       apic->send_IPI_all(NMI_VECTOR);
+}
+
 /*
  * this function calls the 'stop' function on all other CPUs in the system.
  */
@@ -236,5 +241,7 @@ struct smp_ops smp_ops = {
 
        .send_call_func_ipi     = native_send_call_func_ipi,
        .send_call_func_single_ipi = native_send_call_func_single_ipi,
+
+       .send_nmi_ipi           = native_send_nmi_ipi,
 };
 EXPORT_SYMBOL_GPL(smp_ops);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 360f8d8..986f372 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -20,6 +20,7 @@
 #include <asm/desc.h>
 #include <asm/pgtable.h>
 #include <asm/cpu.h>
+#include <asm/nmi.h>
 
 #include <xen/interface/xen.h>
 #include <xen/interface/vcpu.h>
@@ -456,6 +457,16 @@ static irqreturn_t xen_call_function_single_interrupt(int 
irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static void xen_nmi_ipi_func(void *info)
+{
+       nmi_watchdog_tick(task_pt_regs(current), 0/*reason*/);
+}
+
+static void xen_send_nmi_ipi(void)
+{
+       smp_call_function(xen_nmi_ipi_func, NULL, 0);
+}
+
 static const struct smp_ops xen_smp_ops __initdata = {
        .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
        .smp_prepare_cpus = xen_smp_prepare_cpus,
@@ -471,6 +482,8 @@ static const struct smp_ops xen_smp_ops __initdata = {
 
        .send_call_func_ipi = xen_smp_send_call_function_ipi,
        .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
+
+       .send_nmi_ipi = xen_send_nmi_ipi,
 };
 
 void __init xen_smp_init(void)



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel