# HG changeset patch
# User Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
# Node ID a9225284962e7056929d16684f562105b9b452f1
# Parent a83cd26714b0be6a52f94f4cc1379929799949b8
[POWERPC][XEN] Track HID4[RM_CI] state for machine check
This patch creates a PIR indexed character array so we can note when a
processor is in RM_CI mode. Machine checks can happen here and we
need to at least report it (and maybe one day) recover from it. Also
some fun with the SCOM.
Signed-off-by: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
---
xen/arch/powerpc/powerpc64/exceptions.S | 42 ++++++++++--
xen/arch/powerpc/powerpc64/io.S | 26 ++++++--
xen/arch/powerpc/powerpc64/ppc970.c | 57 ++++++++++++++---
xen/include/asm-powerpc/percpu.h | 16 ++++
xen/include/asm-powerpc/powerpc64/ppc970-hid.h | 81 +++++++++++++++++++++++++
xen/include/asm-powerpc/processor.h | 7 ++
6 files changed, 206 insertions(+), 23 deletions(-)
diff -r a83cd26714b0 -r a9225284962e xen/arch/powerpc/powerpc64/exceptions.S
--- a/xen/arch/powerpc/powerpc64/exceptions.S Thu Sep 07 12:09:18 2006 -0400
+++ b/xen/arch/powerpc/powerpc64/exceptions.S Thu Sep 07 21:46:33 2006 -0400
@@ -1,27 +1,30 @@
/*
- * Copyright (C) 2005 Jimi Xenidis <jimix@xxxxxxxxxxxxxx>, IBM Corporation
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
+ * Copyright (C) IBM Corp. 2005, 2006
+ *
+ * Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
+ * Hollis Blanchard <hollisb@xxxxxxxxxx>
*/
-
+
#include <asm/config.h>
#include <asm/asm-offsets.h>
#include <asm/reg_defs.h>
#include <asm/msr.h>
#include <asm/processor.h>
+#include <asm/percpu.h>
.macro SAVE_GPR regno uregs
std \regno, (UREGS_gprs + GPR_WIDTH * \regno)(\uregs)
@@ -190,6 +193,14 @@ zero:
li r0, 0x0 /* exception vector for GDB stub */
bctr
+ /* The following byte array is where any per-CPU state flags
+ * that can be be used across interrupts. Currently it is only used
+ * to track Cache Inhibited Mode when a Machine Check occurs. */
+ /* NOTE: This array is indexed by PIR NOT CPUID */
+ . = MCK_CPU_STAT_BASE
+ .space NR_CPUS
+ . = MCK_GOOD_HID4
+ .quad 0
. = 0x100 # System Reset
ex_reset:
/* XXX thread initialization */
@@ -200,8 +211,19 @@ ex_reset:
. = 0x200 # Machine Check
ex_machcheck:
- GET_STACK r13 SPRN_SRR1
- EXCEPTION_HEAD r13 ex_program_continued
+ /* Restore HID4 to a known state early, we do not recover from
+ * machine check yet, but when we do we shoul dbe able to restore
+ * HID4 to it proper value */
+ mtspr SPRN_HSPRG1, r13
+ ld r13, MCK_GOOD_HID4(0)
+ sync
+ mtspr SPRN_HID4, r13
+ isync
+ /* Hopefully we don't have to worry about the ERAT */
+ mfspr r13, SPRN_HSPRG1
+ /* and now back to our regularly schedualed program */
+ GET_STACK r13 SPRN_SRR1
+ EXCEPTION_HEAD r13 ex_machcheck_continued
li r0, 0x200 /* exception vector for GDB stub */
bctr
@@ -336,6 +358,10 @@ exception_vectors_end:
HRFID
b . /* prevent speculative icache fetch */
.endm
+
+/* Not a whole lot just yet */
+ex_machcheck_continued:
+
/* We enter with the exception number in r0. The EXCEPTION_SAVE_STATE macro
* clobbers r0 though, so we have to move it around a little bit. Not ideal,
diff -r a83cd26714b0 -r a9225284962e xen/arch/powerpc/powerpc64/io.S
--- a/xen/arch/powerpc/powerpc64/io.S Thu Sep 07 12:09:18 2006 -0400
+++ b/xen/arch/powerpc/powerpc64/io.S Thu Sep 07 21:46:33 2006 -0400
@@ -1,23 +1,27 @@
/*
- * Copyright (C) 2005 Hollis Blanchard <hollisb@xxxxxxxxxx>, IBM Corporation
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) IBM Corp. 2005, 2006
+ *
+ * Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
+ * Hollis Blanchard <hollisb@xxxxxxxxxx>
*/
#include <asm/config.h>
#include <asm/processor.h>
+#include <asm/percpu.h>
/* Xen runs in real mode (i.e. untranslated, MMU disabled). This avoids TLB
* flushes and also makes it easy to access all domains' memory. However, on
@@ -54,6 +58,12 @@
sync
mtspr SPRN_HID4, r5
isync
+
+ /* Mark the processor as "in CI mode" */
+ mfspr r5, SPRN_PIR
+ li r6, MCK_CPU_STAT_CI
+ stb r6, MCK_CPU_STAT_BASE(r5)
+ sync
.endm
.macro ENABLE_DCACHE addr
@@ -63,6 +73,12 @@
mtspr SPRN_HID4, r9
isync
+ /* Mark the processor as "out of CI mode" */
+ mfspr r5, SPRN_PIR
+ li r6, 0
+ stb r6, MCK_CPU_STAT_BASE(r5)
+ sync
+
/* re-enable interrupts */
mtmsr r8
.endm
diff -r a83cd26714b0 -r a9225284962e xen/arch/powerpc/powerpc64/ppc970.c
--- a/xen/arch/powerpc/powerpc64/ppc970.c Thu Sep 07 12:09:18 2006 -0400
+++ b/xen/arch/powerpc/powerpc64/ppc970.c Thu Sep 07 21:46:33 2006 -0400
@@ -103,19 +103,28 @@ int cpu_io_mfn(ulong mfn)
return 0;
}
+#ifdef DEBUG
+static void scom_init(void)
+{
+ write_scom(SCOM_AMCS_AND_MASK, 0);
+
+ printk("scom MCKE: 0x%016lx\n", read_scom(SCOM_CMCE));
+ write_scom(SCOM_CMCE, ~0UL);
+ printk("scom MCKE: 0x%016lx\n", read_scom(SCOM_CMCE));
+}
+#else
+#define scom_init()
+#endif
+
static u64 cpu0_hids[6];
static u64 cpu0_hior;
void cpu_initialize(int cpuid)
{
- ulong r1, r2;
union hid0 hid0;
union hid1 hid1;
union hid4 hid4;
union hid5 hid5;
-
- __asm__ __volatile__ ("mr %0, 1" : "=r" (r1));
- __asm__ __volatile__ ("mr %0, 2" : "=r" (r2));
if (cpuid == 0) {
/* we can assume that these are sane to start with. We
@@ -139,6 +148,19 @@ void cpu_initialize(int cpuid)
mthsprg0((ulong)parea); /* now ready for exceptions */
+ printk("CPU[PIR:%u IPI:%u Logical:%u] Hello World!\n",
+ mfpir(), raw_smp_processor_id(), smp_processor_id());
+
+#ifdef DEBUG
+ {
+ ulong r1, r2;
+
+ asm volatile ("mr %0, 1" : "=r" (r1));
+ asm volatile ("mr %0, 2" : "=r" (r2));
+ printk(" SP = %lx TOC = %lx\n", r1, r2);
+ }
+#endif
+
/* Set decrementers for 1 second to keep them out of the way during
* intialization. */
/* XXX make tickless */
@@ -147,11 +169,12 @@ void cpu_initialize(int cpuid)
hid0.bits.nap = 1; /* NAP */
hid0.bits.dpm = 1; /* Dynamic Power Management */
- hid0.bits.nhr = 0; /* ! Not Hard Reset */
+ hid0.bits.nhr = 1; /* Not Hard Reset */
hid0.bits.hdice_en = 1; /* enable HDEC */
hid0.bits.en_therm = 0; /* ! Enable ext thermal ints */
/* onlu debug Xen should do this */
- hid0.bits.en_attn = 1; /* Enable attn instruction */
+ hid0.bits.en_attn = 1; /* Enable attn instruction */
+ hid0.bits.en_mck = 1; /* Enable external machine check interrupts */
#ifdef SERIALIZE
hid0.bits.one_ppc = 1;
@@ -162,9 +185,6 @@ void cpu_initialize(int cpuid)
hid0.bits.ser-gp = 1;
#endif
- printk("CPU #%d: Hello World! SP = %lx TOC = %lx HID0 = %lx\n",
- smp_processor_id(), r1, r2, hid0.word);
-
mthid0(hid0.word);
hid1.bits.bht_pm = 7; /* branch history table prediction mode */
@@ -188,6 +208,7 @@ void cpu_initialize(int cpuid)
hid4.bits.lg_pg_dis = 0; /* make sure we enable large pages */
mthid4(hid4.word);
+ hid5.bits.DC_mck = 1; /* Machine check enabled for dcache errors */
hid5.bits.DCBZ_size = 0; /* make dcbz size 32 bytes */
hid5.bits.DCBZ32_ill = 0; /* make dzbz 32byte illeagal */
mthid5(hid5.word);
@@ -200,12 +221,27 @@ void cpu_initialize(int cpuid)
mfhid0(), mfhid1(), mfhid4(), mfhid5());
#endif
+ /* Make sure firmware has not left this dirty */
mthior(cpu0_hior);
+ /* some machine check goodness */
+ /* save this for checkstop processing */
+ if (cpuid == 0)
+ *mck_good_hid4 = hid4.word;
+
+ if (mfpir() > NR_CPUS)
+ panic("we do not expect a processor to have a PIR (%u) "
+ "to be larger that NR_CPUS(%u)\n",
+ mfpir(), NR_CPUS);
+
+ scom_init();
+
+ /* initialize the SLB */
#ifdef DEBUG
dump_segments(1);
#endif
flush_segments();
+ local_flush_tlb();
}
void cpu_init_vcpu(struct vcpu *v)
@@ -252,6 +288,9 @@ int cpu_machinecheck(struct cpu_user_reg
recover = 1;
printk("MACHINE CHECK: %s Recoverable\n", recover ? "IS": "NOT");
+ if (mck_cpu_stats[mfpir()] != 0)
+ printk("While in CI IO\n");
+
printk("SRR1: 0x%016lx\n", regs->msr);
if (regs->msr & MCK_SRR1_INSN_FETCH_UNIT)
printk("42: Exception caused by Instruction Fetch Unit (IFU) "
diff -r a83cd26714b0 -r a9225284962e xen/include/asm-powerpc/percpu.h
--- a/xen/include/asm-powerpc/percpu.h Thu Sep 07 12:09:18 2006 -0400
+++ b/xen/include/asm-powerpc/percpu.h Thu Sep 07 21:46:33 2006 -0400
@@ -5,6 +5,20 @@
#define PERCPU_SHIFT 12
#define PERCPU_SIZE (1UL << PERCPU_SHIFT)
+
+/* We care out NR_CPUS bytes below the reset vector (0x100) so we can
+ * track per-cpu state that we wish we had a register for. Currently
+ * it is only used to track Cache Inhibited Mode when a Machine Check
+ * occurs. */
+/* NOTE: This array is indexed by PIR NOT CPUID */
+#define MCK_GOOD_HID4 (0x100 - 8)
+#define MCK_CPU_STAT_BASE (MCK_GOOD_HID4 - NR_CPUS) /* accomodate a hid4 */
+/* Currently, the only state we track, so lets make it easy */
+#define MCK_CPU_STAT_CI -1
+
+#ifndef __ASSEMBLY__
+#define mck_cpu_stats ((char *)MCK_CPU_STAT_BASE)
+#define mck_good_hid4 ((ulong *)MCK_GOOD_HID4)
/* Separate out the type, so (int[3], foo) works. */
#define DEFINE_PER_CPU(type, name) \
@@ -18,5 +32,5 @@
(per_cpu(var, smp_processor_id()))
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
-
+#endif /* __ASSEMBLY__ */
#endif /* __PPC_PERCPU_H__ */
diff -r a83cd26714b0 -r a9225284962e
xen/include/asm-powerpc/powerpc64/ppc970-hid.h
--- a/xen/include/asm-powerpc/powerpc64/ppc970-hid.h Thu Sep 07 12:09:18
2006 -0400
+++ b/xen/include/asm-powerpc/powerpc64/ppc970-hid.h Thu Sep 07 21:46:33
2006 -0400
@@ -158,4 +158,85 @@ union hid5 {
#define MCK_DSISR_TLB_PAR 0x00000400 /* 21 */
#define MCK_DSISR_SLB_PAR 0x00000100 /* 23 */
+#define SPRN_SCOMC 276
+#define SPRN_SCOMD 277
+
+static inline void mtscomc(ulong scomc)
+{
+ __asm__ __volatile__ ("mtspr %1, %0" : : "r" (scomc), "i"(SPRN_SCOMC));
+}
+
+static inline ulong mfscomc(void)
+{
+ ulong scomc;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r" (scomc): "i"(SPRN_SCOMC));
+ return scomc;
+}
+
+static inline void mtscomd(ulong scomd)
+{
+ __asm__ __volatile__ ("mtspr %1, %0" : : "r" (scomd), "i"(SPRN_SCOMD));
+}
+
+static inline ulong mfscomd(void)
+{
+ ulong scomd;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r" (scomd): "i"(SPRN_SCOMD));
+ return scomd;
+}
+
+union scomc {
+ struct scomc_bits {
+ ulong _reserved_0_31: 32;
+ ulong addr: 16;
+ ulong RW: 1;
+ ulong _reserved_49_55: 7;
+ ulong _reserved_56_57: 2;
+ ulong addr_error: 1;
+ ulong iface_error: 1;
+ ulong disabled: 1;
+ ulong _reserved_61_62: 2;
+ ulong failure: 1;
+ } bits;
+ ulong word;
+};
+
+
+static inline ulong read_scom(ulong addr)
+{
+ union scomc c;
+ ulong d;
+
+ c.word = 0;
+ c.bits.addr = addr;
+ c.bits.RW = 0;
+
+ mtscomc(c.word);
+ d = mfscomd();
+ c.word = mfscomc();
+ if (c.bits.failure)
+ panic("scom status: 0x%016lx\n", c.word);
+
+ return d;
+}
+
+static inline void write_scom(ulong addr, ulong val)
+{
+ union scomc c;
+
+ c.word = 0;
+ c.bits.addr = addr;
+ c.bits.RW = 0;
+
+ mtscomd(val);
+ mtscomc(c.word);
+ c.word = mfscomc();
+ if (c.bits.failure)
+ panic("scom status: 0x%016lx\n", c.word);
+}
+
+#define SCOM_AMCS_REG 0x022601
+#define SCOM_AMCS_AND_MASK 0x022700
+#define SCOM_AMCS_OR_MASK 0x022800
+#define SCOM_CMCE 0x030901
#endif
diff -r a83cd26714b0 -r a9225284962e xen/include/asm-powerpc/processor.h
--- a/xen/include/asm-powerpc/processor.h Thu Sep 07 12:09:18 2006 -0400
+++ b/xen/include/asm-powerpc/processor.h Thu Sep 07 21:46:33 2006 -0400
@@ -84,6 +84,13 @@ static inline void nop(void) {
}
#define cpu_relax() nop()
+static inline unsigned int mfpir(void)
+{
+ unsigned int pir;
+ __asm__ __volatile__ ("mfspr %0, %1" : "=r" (pir): "i"(SPRN_PIR));
+ return pir;
+}
+
static inline unsigned int mftbu(void)
{
unsigned int tbu;
_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel
|