WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 6 of 6] KEXEC: disable iommu jumping into the kdump k

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH 6 of 6] KEXEC: disable iommu jumping into the kdump kernel
From: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Date: Wed, 25 May 2011 15:32:08 +0100
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Delivery-date: Wed, 25 May 2011 07:39:59 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1306333922@andrewcoop>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <patchbomb.1306333922@andrewcoop>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mercurial-patchbomb/1.4.3
kdump kernels are unable to boot with IOMMU enabled.
this patch disabled IOMMU mode and removes some of the generic
code from the shutdown path which doesnt work after other
CPUs have been shot down.

Because we need to replace the calls to disable_local_APIC,
we remove the calls to __stop_this_cpu and replace them inline,
with suitable modification regarding interrupts and local APICs.

At the bottom of nmi_shootdown_cpus, remove the call to
local_irq_enable as this causes us to jump into purgatory with
the interrupt flag enabled.  From a quick qrep through the current
kexec-tools source, there is a fair amount of time before the
interrupt flag is touched, meaning that we could possibly be
servicing interrupts in the Xen context even though we have really
crashed and left.

hpet_disable_legacy_broadcast has been split sideways into a crash
version which forgoes the locks (as only 1 cpu is still running by
this point), and forgoes the IPI to all other processors.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

diff -r d3026545e9a0 -r cb005f9078d3 xen/arch/x86/crash.c
--- a/xen/arch/x86/crash.c      Wed May 25 15:12:37 2011 +0100
+++ b/xen/arch/x86/crash.c      Wed May 25 15:30:43 2011 +0100
@@ -27,6 +27,7 @@
 #include <asm/hvm/support.h>
 #include <asm/apic.h>
 #include <asm/io_apic.h>
+#include <xen/iommu.h>
 
 static atomic_t waiting_for_crash_ipi;
 static unsigned int crashing_cpu;
@@ -43,7 +44,10 @@ static int crash_nmi_callback(struct cpu
 
     kexec_crash_save_cpu();
 
-    __stop_this_cpu();
+    crash_disable_local_APIC(FALSE);
+    hvm_cpu_down();
+    clts();
+    asm volatile ( "fninit" );
 
     atomic_dec(&waiting_for_crash_ipi);
 
@@ -77,10 +81,20 @@ static void nmi_shootdown_cpus(void)
         msecs--;
     }
 
-    __stop_this_cpu();
+    crash_disable_local_APIC(TRUE);
+    hvm_cpu_down();
+    clts();
+    asm volatile ( "fninit" );
+
+    /* This is a bit of a hack due to the problems with the x2apic_enabled
+     * variable, but we can't do any better without a significant refactoring
+     * of the APIC code */
+    if ( current_local_apic_mode() == APIC_MODE_X2APIC )
+        x2apic_enabled = 1;
+    else
+        x2apic_enabled = 0;
+
     disable_IO_APIC();
-
-    local_irq_enable();
 }
 
 void machine_crash_shutdown(void)
@@ -89,6 +103,10 @@ void machine_crash_shutdown(void)
 
     nmi_shootdown_cpus();
 
+    /* Crash shutdown any IOMMU functionality as the crashdump kernel is not
+     * happy when booting if interrupt/dma remapping is still enabled */
+    iommu_crash_shutdown();
+
     info = kexec_crash_save_info();
     info->xen_phys_start = xen_phys_start;
     info->dom0_pfn_to_mfn_frame_list_list =
diff -r d3026545e9a0 -r cb005f9078d3 xen/arch/x86/hpet.c
--- a/xen/arch/x86/hpet.c       Wed May 25 15:12:37 2011 +0100
+++ b/xen/arch/x86/hpet.c       Wed May 25 15:30:43 2011 +0100
@@ -670,6 +670,32 @@ void hpet_disable_legacy_broadcast(void)
     smp_send_event_check_mask(&cpu_online_map);
 }
 
+/* This function is similar to the regular
+ * hpet_disable_legacy_broadcast function, except it is called
+ * on the crash path with only the current processor up, so we
+ * can forget the locks and really cant send an event check IPI
+ * to the other processors */
+void crash_hpet_disable_legacy_broadcast(void)
+{
+    u32 cfg;
+
+    if ( !hpet_events || !(hpet_events->flags & HPET_EVT_LEGACY) )
+        return;
+
+    hpet_events->flags |= HPET_EVT_DISABLE;
+
+    /* disable HPET T0 */
+    cfg = hpet_read32(HPET_Tn_CFG(0));
+    cfg &= ~HPET_TN_ENABLE;
+    hpet_write32(cfg, HPET_Tn_CFG(0));
+
+    /* Stop HPET legacy interrupts */
+    cfg = hpet_read32(HPET_CFG);
+    cfg &= ~HPET_CFG_LEGACY;
+    hpet_write32(cfg, HPET_CFG);
+
+}
+
 void hpet_broadcast_enter(void)
 {
     unsigned int cpu = smp_processor_id();
diff -r d3026545e9a0 -r cb005f9078d3 xen/arch/x86/machine_kexec.c
--- a/xen/arch/x86/machine_kexec.c      Wed May 25 15:12:37 2011 +0100
+++ b/xen/arch/x86/machine_kexec.c      Wed May 25 15:30:43 2011 +0100
@@ -89,7 +89,7 @@ void machine_kexec(xen_kexec_image_t *im
     };
 
     if ( hpet_broadcast_is_available() )
-        hpet_disable_legacy_broadcast();
+        crash_hpet_disable_legacy_broadcast();
 
     /*
      * compat_machine_kexec() returns to idle pagetables, which requires us
diff -r d3026545e9a0 -r cb005f9078d3 xen/include/asm-x86/hpet.h
--- a/xen/include/asm-x86/hpet.h        Wed May 25 15:12:37 2011 +0100
+++ b/xen/include/asm-x86/hpet.h        Wed May 25 15:30:43 2011 +0100
@@ -73,5 +73,6 @@ void hpet_broadcast_enter(void);
 void hpet_broadcast_exit(void);
 int hpet_broadcast_is_available(void);
 void hpet_disable_legacy_broadcast(void);
+void crash_hpet_disable_legacy_broadcast(void);
 
 #endif /* __X86_HPET_H__ */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel