WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [IA64] merge in INIT patches

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [IA64] merge in INIT patches
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 28 Jul 2006 16:20:49 +0000
Delivery-date: Fri, 28 Jul 2006 09:27:15 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 97c290c7b0155f6c17fc48bfe394671e592fae75
# Parent  50ed5c116b4dcc7697eb79ee3036ccad2ebb26d4
# Parent  efdfbb40db3f124d1bcf8855b976cfc1fd95c363
[IA64] merge in INIT patches
---
 xen/include/asm-ia64/linux/asm/asmmacro.h        |  111 -
 xen/arch/ia64/asm-offsets.c                      |    4 
 xen/arch/ia64/linux-xen/Makefile                 |    2 
 xen/arch/ia64/linux-xen/README.origin            |    2 
 xen/arch/ia64/linux-xen/mca.c                    | 1600 +++++++++++++++++++++++
 xen/arch/ia64/linux-xen/mca_asm.S                |  970 +++++++++++++
 xen/arch/ia64/linux-xen/minstate.h               |   46 
 xen/arch/ia64/linux-xen/unwind.c                 |   22 
 xen/arch/ia64/xen/xenmisc.c                      |    2 
 xen/include/asm-ia64/linux-xen/asm/README.origin |    1 
 xen/include/asm-ia64/linux-xen/asm/asmmacro.h    |  119 +
 xen/include/asm-ia64/linux-xen/asm/mca_asm.h     |    4 
 xen/include/asm-ia64/linux-xen/asm/system.h      |    2 
 xen/include/asm-ia64/linux/asm/README.origin     |    1 
 xen/include/asm-ia64/xensystem.h                 |    1 
 15 files changed, 2767 insertions(+), 120 deletions(-)

diff -r 50ed5c116b4d -r 97c290c7b015 xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c       Fri Jul 07 10:36:31 2006 -0600
+++ b/xen/arch/ia64/asm-offsets.c       Sun Jul 09 20:04:23 2006 -0600
@@ -8,6 +8,7 @@
 #include <xen/sched.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
+#include <asm/mca.h>
 #include <public/xen.h>
 #include <asm/tlb.h>
 #include <asm/regs.h>
@@ -31,6 +32,9 @@ void foo(void)
        DEFINE(IA64_CPU_SIZE, sizeof (struct cpuinfo_ia64));
        DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
        DEFINE(SHARED_INFO_SIZE, sizeof (struct shared_info));
+
+       BLANK();
+       DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, offsetof (struct ia64_mca_cpu, 
init_stack));
 
        BLANK();
 #ifdef   VTI_DEBUG
diff -r 50ed5c116b4d -r 97c290c7b015 xen/arch/ia64/linux-xen/Makefile
--- a/xen/arch/ia64/linux-xen/Makefile  Fri Jul 07 10:36:31 2006 -0600
+++ b/xen/arch/ia64/linux-xen/Makefile  Sun Jul 09 20:04:23 2006 -0600
@@ -1,6 +1,8 @@ obj-y += efi.o
 obj-y += efi.o
 obj-y += entry.o
 obj-y += irq_ia64.o
+obj-y += mca.o
+obj-y += mca_asm.o
 obj-y += mm_contig.o
 obj-y += pal.o
 obj-y += process-linux-xen.o
diff -r 50ed5c116b4d -r 97c290c7b015 xen/arch/ia64/linux-xen/README.origin
--- a/xen/arch/ia64/linux-xen/README.origin     Fri Jul 07 10:36:31 2006 -0600
+++ b/xen/arch/ia64/linux-xen/README.origin     Sun Jul 09 20:04:23 2006 -0600
@@ -11,6 +11,8 @@ head.S                        -> linux/arch/ia64/kernel/head.
 head.S                 -> linux/arch/ia64/kernel/head.S
 hpsim_ssc.h            -> linux/arch/ia64/hp/sim/hpsim_ssc.h
 irq_ia64.c             -> linux/arch/ia64/kernel/irq_ia64.c
+mca.c                  -> linux/arch/ia64/kernel/mca.c
+mca_asm.S              -> linux/arch/ia64/kernel/mca_asm.S
 minstate.h             -> linux/arch/ia64/kernel/minstate.h
 mm_contig.c            -> linux/arch/ia64/mm/contig.c
 pal.S                  -> linux/arch/ia64/kernel/pal.S
diff -r 50ed5c116b4d -r 97c290c7b015 xen/arch/ia64/linux-xen/minstate.h
--- a/xen/arch/ia64/linux-xen/minstate.h        Fri Jul 07 10:36:31 2006 -0600
+++ b/xen/arch/ia64/linux-xen/minstate.h        Sun Jul 09 20:04:23 2006 -0600
@@ -36,7 +36,31 @@
  * For mca_asm.S we want to access the stack physically since the state is 
saved before we
  * go virtual and don't want to destroy the iip or ipsr.
  */
-#define MINSTATE_START_SAVE_MIN_PHYS                                           
                \
+#ifdef XEN
+# define MINSTATE_START_SAVE_MIN_PHYS                                          
                \
+(pKStk)        movl r3=THIS_CPU(ia64_mca_data);;                               
                        \
+(pKStk)        tpa r3 = r3;;                                                   
                        \
+(pKStk)        ld8 r3 = [r3];;                                                 
                        \
+(pKStk)        addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;;                     
                        \
+(pKStk)        addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3;                   
                        \
+(pUStk)        mov ar.rsc=0;           /* set enforced lazy mode, pl 0, 
little-endian, loadrs=0 */     \
+(pUStk)        addl r22=IA64_RBS_OFFSET,r1;            /* compute base of 
register backing store */    \
+       ;;                                                                      
                \
+(pUStk)        mov r24=ar.rnat;                                                
                        \
+(pUStk)        addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;   /* compute base 
of memory stack */      \
+(pUStk)        mov r23=ar.bspstore;                            /* save 
ar.bspstore */                  \
+(pUStk)        dep r22=-1,r22,60,4;                    /* compute Xen virtual 
addr of RBS */   \
+       ;;                                                                      
                \
+(pUStk)        mov ar.bspstore=r22;                    /* switch to Xen RBS */ 
                \
+       ;;                                                                      
                \
+(pUStk)        mov r18=ar.bsp;                                                 
                        \
+(pUStk)        mov ar.rsc=0x3;  /* set eager mode, pl 0, little-endian, 
loadrs=0 */                    \
+
+# define MINSTATE_END_SAVE_MIN_PHYS                                            
                \
+       dep r12=-1,r12,60,4;        /* make sp a Xen virtual address */         
        \
+       ;;
+#else
+# define MINSTATE_START_SAVE_MIN_PHYS                                          
                \
 (pKStk) mov r3=IA64_KR(PER_CPU_DATA);;                                         
                \
 (pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;;                                   
                \
 (pKStk) ld8 r3 = [r3];;                                                        
                        \
@@ -55,15 +79,17 @@
 (pUStk)        mov r18=ar.bsp;                                                 
                        \
 (pUStk)        mov ar.rsc=0x3;         /* set eager mode, pl 0, little-endian, 
loadrs=0 */             \
 
-#define MINSTATE_END_SAVE_MIN_PHYS                                             
                \
+# define MINSTATE_END_SAVE_MIN_PHYS                                            
                \
        dep r12=-1,r12,61,3;            /* make sp a kernel virtual address */  
                \
        ;;
+#endif /* XEN */
 
 #ifdef MINSTATE_VIRT
 #ifdef XEN
 # define MINSTATE_GET_CURRENT(reg)                                     \
                movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;      \
                ld8 reg=[reg]
+# define MINSTATE_GET_CURRENT_VIRT(reg)        MINSTATE_GET_CURRENT(reg)
 #else
 # define MINSTATE_GET_CURRENT(reg)     mov reg=IA64_KR(CURRENT)
 #endif
@@ -72,7 +98,19 @@
 #endif
 
 #ifdef MINSTATE_PHYS
+# ifdef XEN
+# define MINSTATE_GET_CURRENT(reg)                                     \
+       movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;              \
+       tpa reg=reg;;                                                   \
+       ld8 reg=[reg];;                                                 \
+       tpa reg=reg;;
+# define MINSTATE_GET_CURRENT_VIRT(reg)                                        
\
+       movl reg=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;              \
+       tpa reg=reg;;                                                   \
+       ld8 reg=[reg];;
+#else
 # define MINSTATE_GET_CURRENT(reg)     mov reg=IA64_KR(CURRENT);; tpa reg=reg
+#endif /* XEN */
 # define MINSTATE_START_SAVE_MIN       MINSTATE_START_SAVE_MIN_PHYS
 # define MINSTATE_END_SAVE_MIN         MINSTATE_END_SAVE_MIN_PHYS
 #endif
@@ -175,8 +213,8 @@
        ;;                                                                      
                \
 .mem.offset 0,0; st8.spill [r16]=r13,16;                                       
                \
 .mem.offset 8,0; st8.spill [r17]=r21,16;       /* save ar.fpsr */              
                \
-       /* XEN mov r13=IA64_KR(CURRENT);*/      /* establish `current' */       
                        \
-       MINSTATE_GET_CURRENT(r13);              /* XEN establish `current' */   
                        \
+       /* XEN mov r13=IA64_KR(CURRENT);*/      /* establish `current' */       
                \
+       MINSTATE_GET_CURRENT_VIRT(r13);         /* XEN establish `current' */   
                \
        ;;                                                                      
                \
 .mem.offset 0,0; st8.spill [r16]=r15,16;                                       
                \
 .mem.offset 8,0; st8.spill [r17]=r14,16;                                       
                \
diff -r 50ed5c116b4d -r 97c290c7b015 xen/arch/ia64/linux-xen/unwind.c
--- a/xen/arch/ia64/linux-xen/unwind.c  Fri Jul 07 10:36:31 2006 -0600
+++ b/xen/arch/ia64/linux-xen/unwind.c  Sun Jul 09 20:04:23 2006 -0600
@@ -2056,6 +2056,28 @@ init_frame_info (struct unw_frame_info *
 }
 
 void
+unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
+                           struct pt_regs *pt, struct switch_stack *sw)
+{
+       unsigned long sof;
+
+       init_frame_info(info, t, sw, pt->r12);
+       info->cfm_loc = &pt->cr_ifs;
+       info->unat_loc = &pt->ar_unat;
+       info->pfs_loc = &pt->ar_pfs;
+       sof = *info->cfm_loc & 0x7f;
+       info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) 
info->regstk.top, -sof);
+       info->ip = pt->cr_iip + ia64_psr(pt)->ri;
+       info->pt = (unsigned long) pt;
+       UNW_DPRINT(3, "unwind.%s:\n"
+                  "  bsp    0x%lx\n"
+                  "  sof    0x%lx\n"
+                  "  ip     0x%lx\n",
+                  __FUNCTION__, info->bsp, sof, info->ip);
+       find_save_locs(info);
+}
+
+void
 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, 
struct switch_stack *sw)
 {
        unsigned long sol;
diff -r 50ed5c116b4d -r 97c290c7b015 xen/arch/ia64/xen/xenmisc.c
--- a/xen/arch/ia64/xen/xenmisc.c       Fri Jul 07 10:36:31 2006 -0600
+++ b/xen/arch/ia64/xen/xenmisc.c       Sun Jul 09 20:04:23 2006 -0600
@@ -28,8 +28,6 @@ unsigned long loops_per_jiffy = (1<<12);
 /* FIXME: where these declarations should be there ? */
 extern void show_registers(struct pt_regs *regs);
 
-void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check 
abort handling)\n"); }
-void ia64_mca_cpu_init(void *x) { }
 void hpsim_setup(char **x)
 {
 #ifdef CONFIG_SMP
diff -r 50ed5c116b4d -r 97c290c7b015 
xen/include/asm-ia64/linux-xen/asm/README.origin
--- a/xen/include/asm-ia64/linux-xen/asm/README.origin  Fri Jul 07 10:36:31 
2006 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/README.origin  Sun Jul 09 20:04:23 
2006 -0600
@@ -5,6 +5,7 @@
 # (e.g. with #ifdef XEN or XEN in a comment) so that they can be
 # easily updated to future versions of the corresponding Linux files.
 
+asmmacro.h             -> linux/include/asm-ia64/asmmacro.h
 cache.h                        -> linux/include/asm-ia64/cache.h
 gcc_intrin.h           -> linux/include/asm-ia64/gcc_intrin.h
 ia64regs.h             -> linux/include/asm-ia64/ia64regs.h
diff -r 50ed5c116b4d -r 97c290c7b015 
xen/include/asm-ia64/linux-xen/asm/mca_asm.h
--- a/xen/include/asm-ia64/linux-xen/asm/mca_asm.h      Fri Jul 07 10:36:31 
2006 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/mca_asm.h      Sun Jul 09 20:04:23 
2006 -0600
@@ -58,7 +58,9 @@
 #endif
 
 #ifdef XEN
-//FIXME LATER
+#define GET_THIS_PADDR(reg, var)               \
+       movl    reg = THIS_CPU(var)             \
+       tpa     reg = reg
 #else
 #define GET_THIS_PADDR(reg, var)               \
        mov     reg = IA64_KR(PER_CPU_DATA);;   \
diff -r 50ed5c116b4d -r 97c290c7b015 xen/include/asm-ia64/linux-xen/asm/system.h
--- a/xen/include/asm-ia64/linux-xen/asm/system.h       Fri Jul 07 10:36:31 
2006 -0600
+++ b/xen/include/asm-ia64/linux-xen/asm/system.h       Sun Jul 09 20:04:23 
2006 -0600
@@ -19,8 +19,8 @@
 #include <asm/pal.h>
 #include <asm/percpu.h>
 
+#ifndef XEN
 #define GATE_ADDR              __IA64_UL_CONST(0xa000000000000000)
-#ifndef XEN
 /*
  * 0xa000000000000000+2*PERCPU_PAGE_SIZE
  * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
diff -r 50ed5c116b4d -r 97c290c7b015 
xen/include/asm-ia64/linux/asm/README.origin
--- a/xen/include/asm-ia64/linux/asm/README.origin      Fri Jul 07 10:36:31 
2006 -0600
+++ b/xen/include/asm-ia64/linux/asm/README.origin      Sun Jul 09 20:04:23 
2006 -0600
@@ -5,7 +5,6 @@
 # the instructions in the README there.
 
 acpi.h                 -> linux/include/asm-ia64/acpi.h
-asmmacro.h             -> linux/include/asm-ia64/asmmacro.h
 atomic.h               -> linux/include/asm-ia64/atomic.h
 bitops.h               -> linux/include/asm-ia64/bitops.h
 break.h                        -> linux/include/asm-ia64/break.h
diff -r 50ed5c116b4d -r 97c290c7b015 xen/include/asm-ia64/xensystem.h
--- a/xen/include/asm-ia64/xensystem.h  Fri Jul 07 10:36:31 2006 -0600
+++ b/xen/include/asm-ia64/xensystem.h  Sun Jul 09 20:04:23 2006 -0600
@@ -19,6 +19,7 @@
 
 #define HYPERVISOR_VIRT_START   0xe800000000000000
 #define KERNEL_START            0xf000000004000000
+#define GATE_ADDR              KERNEL_START
 #define DEFAULT_SHAREDINFO_ADDR         0xf100000000000000
 #define PERCPU_ADDR             (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
 #define VHPT_ADDR               0xf200000000000000
diff -r 50ed5c116b4d -r 97c290c7b015 xen/arch/ia64/linux-xen/mca.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux-xen/mca.c     Sun Jul 09 20:04:23 2006 -0600
@@ -0,0 +1,1600 @@
+/*
+ * File:       mca.c
+ * Purpose:    Generic MCA handling layer
+ *
+ * Updated for latest kernel
+ * Copyright (C) 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ *
+ * Copyright (C) 2002 Dell Inc.
+ * Copyright (C) Matt Domsch (Matt_Domsch@xxxxxxxx)
+ *
+ * Copyright (C) 2002 Intel
+ * Copyright (C) Jenna Hall (jenna.s.hall@xxxxxxxxx)
+ *
+ * Copyright (C) 2001 Intel
+ * Copyright (C) Fred Lewis (frederick.v.lewis@xxxxxxxxx)
+ *
+ * Copyright (C) 2000 Intel
+ * Copyright (C) Chuck Fleckenstein (cfleck@xxxxxxxxxxxx)
+ *
+ * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
+ * Copyright (C) Vijay Chander(vijay@xxxxxxxxxxxx)
+ *
+ * 03/04/15 D. Mosberger Added INIT backtrace support.
+ * 02/03/25 M. Domsch  GUID cleanups
+ *
+ * 02/01/04 J. Hall    Aligned MCA stack to 16 bytes, added platform vs. CPU
+ *                     error flag, set SAL default return values, changed
+ *                     error record structure to linked list, added init call
+ *                     to sal_get_state_info_size().
+ *
+ * 01/01/03 F. Lewis    Added setup of CMCI and CPEI IRQs, logging of corrected
+ *                      platform errors, completed code for logging of
+ *                      corrected & uncorrected machine check errors, and
+ *                      updated for conformance with Nov. 2000 revision of the
+ *                      SAL 3.0 spec.
+ * 00/03/29 C. Fleckenstein  Fixed PAL/SAL update issues, began MCA bug fixes, 
logging issues,
+ *                           added min save state dump, added INIT handler.
+ *
+ * 2003-12-08 Keith Owens <kaos@xxxxxxx>
+ *            smp_call_function() must not be called from interrupt context 
(can
+ *            deadlock on tasklist_lock).  Use keventd to call 
smp_call_function().
+ *
+ * 2004-02-01 Keith Owens <kaos@xxxxxxx>
+ *            Avoid deadlock when using printk() for MCA and INIT records.
+ *            Delete all record printing code, moved to salinfo_decode in user 
space.
+ *            Mark variables and functions static where possible.
+ *            Delete dead variables and functions.
+ *            Reorder to remove the need for forward declarations and to 
consolidate
+ *            related code.
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kallsyms.h>
+#include <linux/smp_lock.h>
+#include <linux/bootmem.h>
+#include <linux/acpi.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/workqueue.h>
+
+#include <asm/delay.h>
+#include <asm/machvec.h>
+#include <asm/meminit.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/sal.h>
+#include <asm/mca.h>
+
+#include <asm/irq.h>
+#include <asm/hw_irq.h>
+
+#ifdef XEN
+#include <xen/symbols.h>
+#endif
+
+#if defined(IA64_MCA_DEBUG_INFO)
+# define IA64_MCA_DEBUG(fmt...)        printk(fmt)
+#else
+# define IA64_MCA_DEBUG(fmt...)
+#endif
+
+/* Used by mca_asm.S */
+#ifndef XEN
+ia64_mca_sal_to_os_state_t     ia64_sal_to_os_handoff_state;
+#else
+ia64_mca_sal_to_os_state_t     ia64_sal_to_os_handoff_state[NR_CPUS];
+DEFINE_PER_CPU(u64, ia64_sal_to_os_handoff_state_addr); 
+#endif
+ia64_mca_os_to_sal_state_t     ia64_os_to_sal_handoff_state;
+u64                            ia64_mca_serialize;
+DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
+DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
+DEFINE_PER_CPU(u64, ia64_mca_pal_pte);     /* PTE to map PAL code */
+DEFINE_PER_CPU(u64, ia64_mca_pal_base);    /* vaddr PAL code granule */
+
+unsigned long __per_cpu_mca[NR_CPUS];
+
+/* In mca_asm.S */
+extern void                    ia64_monarch_init_handler (void);
+extern void                    ia64_slave_init_handler (void);
+
+static ia64_mc_info_t          ia64_mc_info;
+
+#ifndef XEN
+#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
+#define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */
+#define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */
+#define CPE_HISTORY_LENGTH    5
+#define CMC_HISTORY_LENGTH    5
+
+static struct timer_list cpe_poll_timer;
+static struct timer_list cmc_poll_timer;
+/*
+ * This variable tells whether we are currently in polling mode.
+ * Start with this in the wrong state so we won't play w/ timers
+ * before the system is ready.
+ */
+static int cmc_polling_enabled = 1;
+
+/*
+ * Clearing this variable prevents CPE polling from getting activated
+ * in mca_late_init.  Use it if your system doesn't provide a CPEI,
+ * but encounters problems retrieving CPE logs.  This should only be
+ * necessary for debugging.
+ */
+static int cpe_poll_enabled = 1;
+
+extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
+#endif /* !XEN */
+
+static int mca_init;
+
+#ifndef XEN
+/*
+ * IA64_MCA log support
+ */
+#define IA64_MAX_LOGS          2       /* Double-buffering for nested MCAs */
+#define IA64_MAX_LOG_TYPES      4   /* MCA, INIT, CMC, CPE */
+
+typedef struct ia64_state_log_s
+{
+       spinlock_t      isl_lock;
+       int             isl_index;
+       unsigned long   isl_count;
+       ia64_err_rec_t  *isl_log[IA64_MAX_LOGS]; /* need space to store header 
+ error log */
+} ia64_state_log_t;
+
+static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
+
+#define IA64_LOG_ALLOCATE(it, size) \
+       {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
+               (ia64_err_rec_t *)alloc_bootmem(size); \
+       ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
+               (ia64_err_rec_t *)alloc_bootmem(size);}
+#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
+#define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, 
s)
+#define IA64_LOG_UNLOCK(it)    
spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
+#define IA64_LOG_NEXT_INDEX(it)    ia64_state_log[it].isl_index
+#define IA64_LOG_CURR_INDEX(it)    1 - ia64_state_log[it].isl_index
+#define IA64_LOG_INDEX_INC(it) \
+    {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
+    ia64_state_log[it].isl_count++;}
+#define IA64_LOG_INDEX_DEC(it) \
+    ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
+#define IA64_LOG_NEXT_BUFFER(it)   (void 
*)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
+#define IA64_LOG_CURR_BUFFER(it)   (void 
*)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
+#define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
+
+/*
+ * ia64_log_init
+ *     Reset the OS ia64 log buffer
+ * Inputs   :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
+ * Outputs     :       None
+ */
+static void
+ia64_log_init(int sal_info_type)
+{
+       u64     max_size = 0;
+
+       IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
+       IA64_LOG_LOCK_INIT(sal_info_type);
+
+       // SAL will tell us the maximum size of any error record of this type
+       max_size = ia64_sal_get_state_info_size(sal_info_type);
+       if (!max_size)
+               /* alloc_bootmem() doesn't like zero-sized allocations! */
+               return;
+
+       // set up OS data structures to hold error info
+       IA64_LOG_ALLOCATE(sal_info_type, max_size);
+       memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
+       memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
+}
+
+/*
+ * ia64_log_get
+ *
+ *     Get the current MCA log from SAL and copy it into the OS log buffer.
+ *
+ *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
+ *              irq_safe    whether you can use printk at this point
+ *  Outputs :   size        (total record length)
+ *              *buffer     (ptr to error record)
+ *
+ */
+static u64
+ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
+{
+       sal_log_record_header_t     *log_buffer;
+       u64                         total_len = 0;
+       int                         s;
+
+       IA64_LOG_LOCK(sal_info_type);
+
+       /* Get the process state information */
+       log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
+
+       total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
+
+       if (total_len) {
+               IA64_LOG_INDEX_INC(sal_info_type);
+               IA64_LOG_UNLOCK(sal_info_type);
+               if (irq_safe) {
+                       IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. 
"
+                                      "Record length = %ld\n", __FUNCTION__, 
sal_info_type, total_len);
+               }
+               *buffer = (u8 *) log_buffer;
+               return total_len;
+       } else {
+               IA64_LOG_UNLOCK(sal_info_type);
+               return 0;
+       }
+}
+
+/*
+ *  ia64_mca_log_sal_error_record
+ *
+ *  This function retrieves a specified error record type from SAL
+ *  and wakes up any processes waiting for error records.
+ *
+ *  Inputs  :   sal_info_type   (Type of error record MCA/CMC/CPE/INIT)
+ */
+static void
+ia64_mca_log_sal_error_record(int sal_info_type)
+{
+       u8 *buffer;
+       sal_log_record_header_t *rh;
+       u64 size;
+       int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != 
SAL_INFO_TYPE_INIT;
+#ifdef IA64_MCA_DEBUG_INFO
+       static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
+#endif
+
+       size = ia64_log_get(sal_info_type, &buffer, irq_safe);
+       if (!size)
+               return;
+
+       salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
+
+       if (irq_safe)
+               IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
+                       smp_processor_id(),
+                       sal_info_type < ARRAY_SIZE(rec_name) ? 
rec_name[sal_info_type] : "UNKNOWN");
+
+       /* Clear logs from corrected errors in case there's no user-level 
logger */
+       rh = (sal_log_record_header_t *)buffer;
+       if (rh->severity == sal_log_severity_corrected)
+               ia64_sal_clear_state_info(sal_info_type);
+}
+
+/*
+ * platform dependent error handling
+ */
+#endif /* !XEN */
+#ifndef PLATFORM_MCA_HANDLERS
+#ifndef XEN
+
+#ifdef CONFIG_ACPI
+
+int cpe_vector = -1;
+
+static irqreturn_t
+ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
+{
+       static unsigned long    cpe_history[CPE_HISTORY_LENGTH];
+       static int              index;
+       static DEFINE_SPINLOCK(cpe_history_lock);
+
+       IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
+                      __FUNCTION__, cpe_irq, smp_processor_id());
+
+       /* SAL spec states this should run w/ interrupts enabled */
+       local_irq_enable();
+
+       /* Get the CPE error record and log it */
+       ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
+
+       spin_lock(&cpe_history_lock);
+       if (!cpe_poll_enabled && cpe_vector >= 0) {
+
+               int i, count = 1; /* we know 1 happened now */
+               unsigned long now = jiffies;
+
+               for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
+                       if (now - cpe_history[i] <= HZ)
+                               count++;
+               }
+
+               IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, 
CPE_HISTORY_LENGTH);
+               if (count >= CPE_HISTORY_LENGTH) {
+
+                       cpe_poll_enabled = 1;
+                       spin_unlock(&cpe_history_lock);
+                       
disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
+
+                       /*
+                        * Corrected errors will still be corrected, but
+                        * make sure there's a log somewhere that indicates
+                        * something is generating more than we can handle.
+                        */
+                       printk(KERN_WARNING "WARNING: Switching to polling CPE 
handler; error records may be lost\n");
+
+                       mod_timer(&cpe_poll_timer, jiffies + 
MIN_CPE_POLL_INTERVAL);
+
+                       /* lock already released, get out now */
+                       return IRQ_HANDLED;
+               } else {
+                       cpe_history[index++] = now;
+                       if (index == CPE_HISTORY_LENGTH)
+                               index = 0;
+               }
+       }
+       spin_unlock(&cpe_history_lock);
+       return IRQ_HANDLED;
+}
+
+#endif /* CONFIG_ACPI */
+#endif /* !XEN */
+
+static void
+show_min_state (pal_min_state_area_t *minstate)
+{
+       u64 iip = minstate->pmsa_iip + ((struct ia64_psr 
*)(&minstate->pmsa_ipsr))->ri;
+       u64 xip = minstate->pmsa_xip + ((struct ia64_psr 
*)(&minstate->pmsa_xpsr))->ri;
+
+       printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);
+       printk("pr\t\t%016lx\n", minstate->pmsa_pr);
+       printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", 
minstate->pmsa_br0);
+       printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);
+       printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);
+       printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);
+       printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);
+       printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);
+       printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);
+       printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);
+       printk("b1\t\t%016lx ", minstate->pmsa_br1);
+       print_symbol("%s\n", minstate->pmsa_br1);
+
+       printk("\nstatic registers r0-r15:\n");
+       printk(" r0- 3 %016lx %016lx %016lx %016lx\n",
+              0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], 
minstate->pmsa_gr[2]);
+       printk(" r4- 7 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_gr[3], minstate->pmsa_gr[4],
+              minstate->pmsa_gr[5], minstate->pmsa_gr[6]);
+       printk(" r8-11 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_gr[7], minstate->pmsa_gr[8],
+              minstate->pmsa_gr[9], minstate->pmsa_gr[10]);
+       printk("r12-15 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_gr[11], minstate->pmsa_gr[12],
+              minstate->pmsa_gr[13], minstate->pmsa_gr[14]);
+
+       printk("\nbank 0:\n");
+       printk("r16-19 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],
+              minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);
+       printk("r20-23 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],
+              minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);
+       printk("r24-27 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],
+              minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);
+       printk("r28-31 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],
+              minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);
+
+       printk("\nbank 1:\n");
+       printk("r16-19 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],
+              minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);
+       printk("r20-23 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],
+              minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);
+       printk("r24-27 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],
+              minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);
+       printk("r28-31 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],
+              minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);
+}
+
+static void
+fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct 
switch_stack *sw)
+{
+       u64 *dst_banked, *src_banked, bit, shift, nat_bits;
+       int i;
+
+       /*
+        * First, update the pt-regs and switch-stack structures with the 
contents stored
+        * in the min-state area:
+        */
+       if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {
+               pt->cr_ipsr = ms->pmsa_xpsr;
+               pt->cr_iip = ms->pmsa_xip;
+               pt->cr_ifs = ms->pmsa_xfs;
+       } else {
+               pt->cr_ipsr = ms->pmsa_ipsr;
+               pt->cr_iip = ms->pmsa_iip;
+               pt->cr_ifs = ms->pmsa_ifs;
+       }
+       pt->ar_rsc = ms->pmsa_rsc;
+       pt->pr = ms->pmsa_pr;
+       pt->r1 = ms->pmsa_gr[0];
+       pt->r2 = ms->pmsa_gr[1];
+       pt->r3 = ms->pmsa_gr[2];
+       sw->r4 = ms->pmsa_gr[3];
+       sw->r5 = ms->pmsa_gr[4];
+       sw->r6 = ms->pmsa_gr[5];
+       sw->r7 = ms->pmsa_gr[6];
+       pt->r8 = ms->pmsa_gr[7];
+       pt->r9 = ms->pmsa_gr[8];
+       pt->r10 = ms->pmsa_gr[9];
+       pt->r11 = ms->pmsa_gr[10];
+       pt->r12 = ms->pmsa_gr[11];
+       pt->r13 = ms->pmsa_gr[12];
+       pt->r14 = ms->pmsa_gr[13];
+       pt->r15 = ms->pmsa_gr[14];
+       dst_banked = &pt->r16;          /* r16-r31 are contiguous in struct 
pt_regs */
+       src_banked = ms->pmsa_bank1_gr;
+       for (i = 0; i < 16; ++i)
+               dst_banked[i] = src_banked[i];
+       pt->b0 = ms->pmsa_br0;
+       sw->b1 = ms->pmsa_br1;
+
+       /* construct the NaT bits for the pt-regs structure: */
+#      define PUT_NAT_BIT(dst, addr)                                   \
+       do {                                                            \
+               bit = nat_bits & 1; nat_bits >>= 1;                     \
+               shift = ((unsigned long) addr >> 3) & 0x3f;             \
+               dst = ((dst) & ~(1UL << shift)) | (bit << shift);       \
+       } while (0)
+
+       /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: 
*/
+       shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;
+       nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - 
shift));
+
+       PUT_NAT_BIT(sw->caller_unat, &pt->r1);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r2);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r3);
+       PUT_NAT_BIT(sw->ar_unat, &sw->r4);
+       PUT_NAT_BIT(sw->ar_unat, &sw->r5);
+       PUT_NAT_BIT(sw->ar_unat, &sw->r6);
+       PUT_NAT_BIT(sw->ar_unat, &sw->r7);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r8);  PUT_NAT_BIT(sw->caller_unat, 
&pt->r9);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, 
&pt->r11);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, 
&pt->r13);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, 
&pt->r15);
+       nat_bits >>= 16;        /* skip over bank0 NaT bits */
+       PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, 
&pt->r17);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, 
&pt->r19);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, 
&pt->r21);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, 
&pt->r23);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, 
&pt->r25);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, 
&pt->r27);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, 
&pt->r29);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, 
&pt->r31);
+}
+
+#ifdef XEN
+static spinlock_t init_dump_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t show_stack_lock = SPIN_LOCK_UNLOCKED;
+
+static void
+save_ksp (struct unw_frame_info *info, void *arg)
+{
+       current->arch._thread.ksp = (__u64)(info->sw) - 16;
+       wmb();
+}
+
+/* FIXME */
+int try_crashdump(struct pt_regs *a) { return 0; }
+
+#define CPU_FLUSH_RETRY_MAX 5
+static void
+init_cache_flush (void)
+{
+       unsigned long flags;
+       int i;
+       s64 rval = 0;
+       u64 vector, progress = 0;
+
+       for (i = 0; i < CPU_FLUSH_RETRY_MAX; i++) {
+               local_irq_save(flags);
+               rval = ia64_pal_cache_flush(PAL_CACHE_TYPE_INSTRUCTION_DATA,
+                                           0, &progress, &vector);
+               local_irq_restore(flags);
+               if (rval == 0){
+                       printk("\nPAL cache flush success\n");
+                       return;
+               }
+       }
+       printk("\nPAL cache flush failed. status=%ld\n",rval);
+}
+#endif /* XEN */
+
+static void
+init_handler_platform (pal_min_state_area_t *ms,
+                      struct pt_regs *pt, struct switch_stack *sw)
+{
+       struct unw_frame_info info;
+
+       /* if a kernel debugger is available call it here else just dump the 
registers */
+
+       /*
+        * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, 
INIT can be
+        * generated via the BMC's command-line interface, but since the 
console is on the
+        * same serial line, the user will need some time to switch out of the 
BMC before
+        * the dump begins.
+        */
+       printk("Delaying for 5 seconds...\n");
+       udelay(5*1000000);
+#ifdef XEN
+       fetch_min_state(ms, pt, sw);
+       spin_lock(&show_stack_lock);
+#endif
+       show_min_state(ms);
+
+#ifdef XEN
+       printk("Backtrace of current vcpu (vcpu_id %d)\n", current->vcpu_id);
+#else
+       printk("Backtrace of current task (pid %d, %s)\n", current->pid, 
current->comm);
+       fetch_min_state(ms, pt, sw);
+#endif
+       unw_init_from_interruption(&info, current, pt, sw);
+       ia64_do_show_stack(&info, NULL);
+#ifdef XEN
+       unw_init_running(save_ksp, NULL);
+       spin_unlock(&show_stack_lock);
+       wmb();
+       init_cache_flush();
+
+       if (spin_trylock(&init_dump_lock)) {
+#ifdef CONFIG_SMP
+               udelay(5*1000000);
+#endif
+               if (try_crashdump(pt) == 0)
+                       printk("\nINIT dump complete.  Please reboot now.\n");
+       }
+       printk("%s: CPU%d init handler done\n",
+              __FUNCTION__, smp_processor_id());
+#else /* XEN */
+#ifdef CONFIG_SMP
+       /* read_trylock() would be handy... */
+       if (!tasklist_lock.write_lock)
+               read_lock(&tasklist_lock);
+#endif
+       {
+               struct task_struct *g, *t;
+               do_each_thread (g, t) {
+                       if (t == current)
+                               continue;
+
+                       printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
+                       show_stack(t, NULL);
+               } while_each_thread (g, t);
+       }
+#ifdef CONFIG_SMP
+       if (!tasklist_lock.write_lock)
+               read_unlock(&tasklist_lock);
+#endif
+
+       printk("\nINIT dump complete.  Please reboot now.\n");
+#endif /* XEN */
+       while (1);                      /* hang city if no debugger */
+}
+
+#ifndef XEN
+#ifdef CONFIG_ACPI
+/*
+ * ia64_mca_register_cpev
+ *
+ *  Register the corrected platform error vector with SAL.
+ *
+ *  Inputs
+ *      cpev        Corrected Platform Error Vector number
+ *
+ *  Outputs
+ *      None
+ */
+static void
+ia64_mca_register_cpev (int cpev)
+{
+       /* Register the CPE interrupt vector with SAL */
+       struct ia64_sal_retval isrv;
+
+       isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, 
SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
+       if (isrv.status) {
+               printk(KERN_ERR "Failed to register Corrected Platform "
+                      "Error interrupt vector with SAL (status %ld)\n", 
isrv.status);
+               return;
+       }
+
+       IA64_MCA_DEBUG("%s: corrected platform error "
+                      "vector %#x registered\n", __FUNCTION__, cpev);
+}
+#endif /* CONFIG_ACPI */
+
+#endif /* !XEN */
+#endif /* PLATFORM_MCA_HANDLERS */
+#ifndef XEN
+
+/*
+ * ia64_mca_cmc_vector_setup
+ *
+ *  Setup the corrected machine check vector register in the processor.
+ *  (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
+ *  This function is invoked on a per-processor basis.
+ *
+ * Inputs
+ *      None
+ *
+ * Outputs
+ *     None
+ */
+void
+ia64_mca_cmc_vector_setup (void)
+{
+       cmcv_reg_t      cmcv;
+
+       cmcv.cmcv_regval        = 0;
+       cmcv.cmcv_mask          = 1;        /* Mask/disable interrupt at first 
*/
+       cmcv.cmcv_vector        = IA64_CMC_VECTOR;
+       ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
+
+       IA64_MCA_DEBUG("%s: CPU %d corrected "
+                      "machine check vector %#x registered.\n",
+                      __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
+
+       IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
+                      __FUNCTION__, smp_processor_id(), 
ia64_getreg(_IA64_REG_CR_CMCV));
+}
+
+/*
+ * ia64_mca_cmc_vector_disable
+ *
+ *  Mask the corrected machine check vector register in the processor.
+ *  This function is invoked on a per-processor basis.
+ *
+ * Inputs
+ *      dummy(unused)
+ *
+ * Outputs
+ *     None
+ */
+static void
+ia64_mca_cmc_vector_disable (void *dummy)
+{
+       cmcv_reg_t      cmcv;
+
+       cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
+
+       cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
+       ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
+
+       IA64_MCA_DEBUG("%s: CPU %d corrected "
+                      "machine check vector %#x disabled.\n",
+                      __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
+}
+
+/*
+ * ia64_mca_cmc_vector_enable
+ *
+ *  Unmask the corrected machine check vector register in the processor.
+ *  This function is invoked on a per-processor basis.
+ *
+ * Inputs
+ *      dummy(unused)
+ *
+ * Outputs
+ *     None
+ */
+static void
+ia64_mca_cmc_vector_enable (void *dummy)
+{
+       cmcv_reg_t      cmcv;
+
+       cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
+
+       cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
+       ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
+
+       IA64_MCA_DEBUG("%s: CPU %d corrected "
+                      "machine check vector %#x enabled.\n",
+                      __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
+}
+
+/*
+ * ia64_mca_cmc_vector_disable_keventd
+ *
+ * Called via keventd (smp_call_function() is not safe in interrupt context) to
+ * disable the cmc interrupt vector.
+ */
+static void
+ia64_mca_cmc_vector_disable_keventd(void *unused)
+{
+       on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
+}
+
+/*
+ * ia64_mca_cmc_vector_enable_keventd
+ *
+ * Called via keventd (smp_call_function() is not safe in interrupt context) to
+ * enable the cmc interrupt vector.
+ */
+static void
+ia64_mca_cmc_vector_enable_keventd(void *unused)
+{
+       on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
+}
+
+/*
+ * ia64_mca_wakeup_ipi_wait
+ *
+ *     Wait for the inter-cpu interrupt to be sent by the
+ *     monarch processor once it is done with handling the
+ *     MCA.
+ *
+ *  Inputs  :   None
+ *  Outputs :   None
+ */
+static void
+ia64_mca_wakeup_ipi_wait(void)
+{
+       int     irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6);
+       int     irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f);
+       u64     irr = 0;
+
+       do {
+               switch(irr_num) {
+                     case 0:
+                       irr = ia64_getreg(_IA64_REG_CR_IRR0);
+                       break;
+                     case 1:
+                       irr = ia64_getreg(_IA64_REG_CR_IRR1);
+                       break;
+                     case 2:
+                       irr = ia64_getreg(_IA64_REG_CR_IRR2);
+                       break;
+                     case 3:
+                       irr = ia64_getreg(_IA64_REG_CR_IRR3);
+                       break;
+               }
+               cpu_relax();
+       } while (!(irr & (1UL << irr_bit))) ;
+}
+
+/*
+ * ia64_mca_wakeup
+ *
+ *     Send an inter-cpu interrupt to wake-up a particular cpu
+ *     and mark that cpu to be out of rendez.
+ *
+ *  Inputs  :   cpuid
+ *  Outputs :   None
+ */
+static void
+ia64_mca_wakeup(int cpu)
+{
+       platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
+       ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
+
+}
+
+/*
+ * ia64_mca_wakeup_all
+ *
+ *     Wakeup all the cpus which have rendez'ed previously.
+ *
+ *  Inputs  :   None
+ *  Outputs :   None
+ */
+static void
+ia64_mca_wakeup_all(void)
+{
+       int cpu;
+
+       /* Clear the Rendez checkin flag for all cpus */
+       for(cpu = 0; cpu < NR_CPUS; cpu++) {
+               if (!cpu_online(cpu))
+                       continue;
+               if (ia64_mc_info.imi_rendez_checkin[cpu] == 
IA64_MCA_RENDEZ_CHECKIN_DONE)
+                       ia64_mca_wakeup(cpu);
+       }
+
+}
+
+/*
+ * ia64_mca_rendez_interrupt_handler
+ *
+ *     This is handler used to put slave processors into spinloop
+ *     while the monarch processor does the mca handling and later
+ *     wake each slave up once the monarch is done.
+ *
+ *  Inputs  :   None
+ *  Outputs :   None
+ */
+static irqreturn_t
+ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
+{
+       unsigned long flags;
+       int cpu = smp_processor_id();
+
+       /* Mask all interrupts */
+       local_irq_save(flags);
+
+       ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
+       /* Register with the SAL monarch that the slave has
+        * reached SAL
+        */
+       ia64_sal_mc_rendez();
+
+       /* Wait for the wakeup IPI from the monarch
+        * This waiting is done by polling on the wakeup-interrupt
+        * vector bit in the processor's IRRs
+        */
+       ia64_mca_wakeup_ipi_wait();
+
+       /* Enable all interrupts */
+       local_irq_restore(flags);
+       return IRQ_HANDLED;
+}
+
+/*
+ * ia64_mca_wakeup_int_handler
+ *
+ *     The interrupt handler for processing the inter-cpu interrupt to the
+ *     slave cpu which was spinning in the rendez loop.
+ *     Since this spinning is done by turning off the interrupts and
+ *     polling on the wakeup-interrupt bit in the IRR, there is
+ *     nothing useful to be done in the handler.
+ *
+ *  Inputs  :   wakeup_irq  (Wakeup-interrupt bit)
+ *     arg             (Interrupt handler specific argument)
+ *     ptregs          (Exception frame at the time of the interrupt)
+ *  Outputs :   None
+ *
+ */
+static irqreturn_t
+ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
+{
+       return IRQ_HANDLED;
+}
+
+/*
+ * ia64_return_to_sal_check
+ *
+ *     This is function called before going back from the OS_MCA handler
+ *     to the OS_MCA dispatch code which finally takes the control back
+ *     to the SAL.
+ *     The main purpose of this routine is to setup the OS_MCA to SAL
+ *     return state which can be used by the OS_MCA dispatch code
+ *     just before going back to SAL.
+ *
+ *  Inputs  :   None
+ *  Outputs :   None
+ */
+
+static void
+ia64_return_to_sal_check(int recover)
+{
+
+       /* Copy over some relevant stuff from the sal_to_os_mca_handoff
+        * so that it can be used at the time of os_mca_to_sal_handoff
+        */
+       ia64_os_to_sal_handoff_state.imots_sal_gp =
+               ia64_sal_to_os_handoff_state.imsto_sal_gp;
+
+       ia64_os_to_sal_handoff_state.imots_sal_check_ra =
+               ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
+
+       if (recover)
+               ia64_os_to_sal_handoff_state.imots_os_status = 
IA64_MCA_CORRECTED;
+       else
+               ia64_os_to_sal_handoff_state.imots_os_status = 
IA64_MCA_COLD_BOOT;
+
+       /* Default = tell SAL to return to same context */
+       ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
+
+       ia64_os_to_sal_handoff_state.imots_new_min_state =
+               (u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
+
+}
+
+/* Function pointer for extra MCA recovery */
+int (*ia64_mca_ucmc_extension)
+       (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*)
+       = NULL;
+
+int
+ia64_reg_MCA_extension(void *fn)
+{
+       if (ia64_mca_ucmc_extension)
+               return 1;
+
+       ia64_mca_ucmc_extension = fn;
+       return 0;
+}
+
+void
+ia64_unreg_MCA_extension(void)
+{
+       if (ia64_mca_ucmc_extension)
+               ia64_mca_ucmc_extension = NULL;
+}
+
+EXPORT_SYMBOL(ia64_reg_MCA_extension);
+EXPORT_SYMBOL(ia64_unreg_MCA_extension);
+
+/*
+ * ia64_mca_ucmc_handler
+ *
+ *     This is uncorrectable machine check handler called from OS_MCA
+ *     dispatch code which is in turn called from SAL_CHECK().
+ *     This is the place where the core of OS MCA handling is done.
+ *     Right now the logs are extracted and displayed in a well-defined
+ *     format. This handler code is supposed to be run only on the
+ *     monarch processor. Once the monarch is done with MCA handling
+ *     further MCA logging is enabled by clearing logs.
+ *     Monarch also has the duty of sending wakeup-IPIs to pull the
+ *     slave processors out of rendezvous spinloop.
+ *
+ *  Inputs  :   None
+ *  Outputs :   None
+ */
+void
+ia64_mca_ucmc_handler(void)
+{
+       pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
+               &ia64_sal_to_os_handoff_state.proc_state_param;
+       int recover; 
+
+       /* Get the MCA error record and log it */
+       ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
+
+       /* TLB error is only exist in this SAL error record */
+       recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
+       /* other error recovery */
+          || (ia64_mca_ucmc_extension 
+               && ia64_mca_ucmc_extension(
+                       IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
+                       &ia64_sal_to_os_handoff_state,
+                       &ia64_os_to_sal_handoff_state)); 
+
+       if (recover) {
+               sal_log_record_header_t *rh = 
IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
+               rh->severity = sal_log_severity_corrected;
+               ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
+       }
+       /*
+        *  Wakeup all the processors which are spinning in the rendezvous
+        *  loop.
+        */
+       ia64_mca_wakeup_all();
+
+       /* Return to SAL */
+       ia64_return_to_sal_check(recover);
+}
+
+static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, 
NULL);
+static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
+
+/*
+ * ia64_mca_cmc_int_handler
+ *
+ *  This is corrected machine check interrupt handler.
+ *     Right now the logs are extracted and displayed in a well-defined
+ *     format.
+ *
+ * Inputs
+ *      interrupt number
+ *      client data arg ptr
+ *      saved registers ptr
+ *
+ * Outputs
+ *     None
+ */
+static irqreturn_t
+ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
+{
+       static unsigned long    cmc_history[CMC_HISTORY_LENGTH];
+       static int              index;
+       static DEFINE_SPINLOCK(cmc_history_lock);
+
+       IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
+                      __FUNCTION__, cmc_irq, smp_processor_id());
+
+       /* SAL spec states this should run w/ interrupts enabled */
+       local_irq_enable();
+
+       /* Get the CMC error record and log it */
+       ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
+
+       spin_lock(&cmc_history_lock);
+       if (!cmc_polling_enabled) {
+               int i, count = 1; /* we know 1 happened now */
+               unsigned long now = jiffies;
+
+               for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
+                       if (now - cmc_history[i] <= HZ)
+                               count++;
+               }
+
+               IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, 
CMC_HISTORY_LENGTH);
+               if (count >= CMC_HISTORY_LENGTH) {
+
+                       cmc_polling_enabled = 1;
+                       spin_unlock(&cmc_history_lock);
+                       schedule_work(&cmc_disable_work);
+
+                       /*
+                        * Corrected errors will still be corrected, but
+                        * make sure there's a log somewhere that indicates
+                        * something is generating more than we can handle.
+                        */
+                       printk(KERN_WARNING "WARNING: Switching to polling CMC 
handler; error records may be lost\n");
+
+                       mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
+
+                       /* lock already released, get out now */
+                       return IRQ_HANDLED;
+               } else {
+                       cmc_history[index++] = now;
+                       if (index == CMC_HISTORY_LENGTH)
+                               index = 0;
+               }
+       }
+       spin_unlock(&cmc_history_lock);
+       return IRQ_HANDLED;
+}
+
+/*
+ *  ia64_mca_cmc_int_caller
+ *
+ *     Triggered by sw interrupt from CMC polling routine.  Calls
+ *     real interrupt handler and either triggers a sw interrupt
+ *     on the next cpu or does cleanup at the end.
+ *
+ * Inputs
+ *     interrupt number
+ *     client data arg ptr
+ *     saved registers ptr
+ * Outputs
+ *     handled
+ */
+static irqreturn_t
+ia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs)
+{
+       static int start_count = -1;
+       unsigned int cpuid;
+
+       cpuid = smp_processor_id();
+
+       /* If first cpu, update count */
+       if (start_count == -1)
+               start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
+
+       ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
+
+       for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
+
+       if (cpuid < NR_CPUS) {
+               platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
+       } else {
+               /* If no log record, switch out of polling mode */
+               if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
+
+                       printk(KERN_WARNING "Returning to interrupt driven CMC 
handler\n");
+                       schedule_work(&cmc_enable_work);
+                       cmc_polling_enabled = 0;
+
+               } else {
+
+                       mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
+               }
+
+               start_count = -1;
+       }
+
+       return IRQ_HANDLED;
+}
+
+/*
+ *  ia64_mca_cmc_poll
+ *
+ *     Poll for Corrected Machine Checks (CMCs)
+ *
+ * Inputs   :   dummy(unused)
+ * Outputs  :   None
+ *
+ */
+static void
+ia64_mca_cmc_poll (unsigned long dummy)
+{
+       /* Trigger a CMC interrupt cascade  */
+       platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, 
IA64_IPI_DM_INT, 0);
+}
+
+/*
+ *  ia64_mca_cpe_int_caller
+ *
+ *     Triggered by sw interrupt from CPE polling routine.  Calls
+ *     real interrupt handler and either triggers a sw interrupt
+ *     on the next cpu or does cleanup at the end.
+ *
+ * Inputs
+ *     interrupt number
+ *     client data arg ptr
+ *     saved registers ptr
+ * Outputs
+ *     handled
+ */
+#ifdef CONFIG_ACPI
+
+static irqreturn_t
+ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
+{
+       static int start_count = -1;
+       static int poll_time = MIN_CPE_POLL_INTERVAL;
+       unsigned int cpuid;
+
+       cpuid = smp_processor_id();
+
+       /* If first cpu, update count */
+       if (start_count == -1)
+               start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
+
+       ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
+
+       for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
+
+       if (cpuid < NR_CPUS) {
+               platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
+       } else {
+               /*
+                * If a log was recorded, increase our polling frequency,
+                * otherwise, backoff or return to interrupt mode.
+                */
+               if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
+                       poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
+               } else if (cpe_vector < 0) {
+                       poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
+               } else {
+                       poll_time = MIN_CPE_POLL_INTERVAL;
+
+                       printk(KERN_WARNING "Returning to interrupt driven CPE 
handler\n");
+                       enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
+                       cpe_poll_enabled = 0;
+               }
+
+               if (cpe_poll_enabled)
+                       mod_timer(&cpe_poll_timer, jiffies + poll_time);
+               start_count = -1;
+       }
+
+       return IRQ_HANDLED;
+}
+
+/*
+ *  ia64_mca_cpe_poll
+ *
+ *     Poll for Corrected Platform Errors (CPEs), trigger interrupt
+ *     on first cpu, from there it will trickle through all the cpus.
+ *
+ * Inputs   :   dummy(unused)
+ * Outputs  :   None
+ *
+ */
+static void
+ia64_mca_cpe_poll (unsigned long dummy)
+{
+       /* Trigger a CPE interrupt cascade  */
+       platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, 
IA64_IPI_DM_INT, 0);
+}
+
+#endif /* CONFIG_ACPI */
+#endif /* !XEN */
+
+/*
+ * C portion of the OS INIT handler
+ *
+ * Called from ia64_monarch_init_handler
+ *
+ * Inputs: pointer to pt_regs where processor info was saved.
+ *
+ * Returns:
+ *   0 if SAL must warm boot the System
+ *   1 if SAL must return to interrupted context using PAL_MC_RESUME
+ *
+ */
+void
+ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
+{
+       pal_min_state_area_t *ms;
+#ifdef XEN
+       int cpu = smp_processor_id();
+
+       printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
+              ia64_sal_to_os_handoff_state[cpu].proc_state_param);
+#endif
+
+#ifndef XEN
+       oops_in_progress = 1;   /* avoid deadlock in printk, but it makes 
recovery dodgy */
+       console_loglevel = 15;  /* make sure printks make it to console */
+
+       printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
+               ia64_sal_to_os_handoff_state.proc_state_param);
+
+       /*
+        * Address of minstate area provided by PAL is physical,
+        * uncacheable (bit 63 set). Convert to Linux virtual
+        * address in region 6.
+        */
+       ms = (pal_min_state_area_t 
*)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
+#else
+       /* Xen virtual address in region 7. */
+       ms = __va((pal_min_state_area_t 
*)(ia64_sal_to_os_handoff_state[cpu].pal_min_state));
+#endif
+
+       init_handler_platform(ms, pt, sw);      /* call platform specific 
routines */
+}
+
+#ifndef XEN
+static int __init
+ia64_mca_disable_cpe_polling(char *str)
+{
+       cpe_poll_enabled = 0;
+       return 1;
+}
+
+__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
+
+static struct irqaction cmci_irqaction = {
+       .handler =      ia64_mca_cmc_int_handler,
+       .flags =        SA_INTERRUPT,
+       .name =         "cmc_hndlr"
+};
+
+static struct irqaction cmcp_irqaction = {
+       .handler =      ia64_mca_cmc_int_caller,
+       .flags =        SA_INTERRUPT,
+       .name =         "cmc_poll"
+};
+
+static struct irqaction mca_rdzv_irqaction = {
+       .handler =      ia64_mca_rendez_int_handler,
+       .flags =        SA_INTERRUPT,
+       .name =         "mca_rdzv"
+};
+
+static struct irqaction mca_wkup_irqaction = {
+       .handler =      ia64_mca_wakeup_int_handler,
+       .flags =        SA_INTERRUPT,
+       .name =         "mca_wkup"
+};
+
+#ifdef CONFIG_ACPI
+static struct irqaction mca_cpe_irqaction = {
+       .handler =      ia64_mca_cpe_int_handler,
+       .flags =        SA_INTERRUPT,
+       .name =         "cpe_hndlr"
+};
+
+static struct irqaction mca_cpep_irqaction = {
+       .handler =      ia64_mca_cpe_int_caller,
+       .flags =        SA_INTERRUPT,
+       .name =         "cpe_poll"
+};
+#endif /* CONFIG_ACPI */
+#endif /* !XEN */
+
+/* Do per-CPU MCA-related initialization.  */
+
+void __devinit
+ia64_mca_cpu_init(void *cpu_data)
+{
+       void *pal_vaddr;
+
+       if (smp_processor_id() == 0) {
+               void *mca_data;
+               int cpu;
+
+#ifdef XEN
+               unsigned int pageorder;
+               pageorder  = get_order_from_bytes(sizeof(struct ia64_mca_cpu));
+#else
+               mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
+                                        * NR_CPUS);
+#endif
+               for (cpu = 0; cpu < NR_CPUS; cpu++) {
+#ifdef XEN
+                       mca_data = alloc_xenheap_pages(pageorder);
+                       __per_cpu_mca[cpu] = __pa(mca_data);
+                       IA64_MCA_DEBUG("%s: __per_cpu_mca[%d]=%lx"
+                                      "(mca_data[%d]=%lx)\n",
+                                      __FUNCTION__, cpu, __per_cpu_mca[cpu],
+                                      cpu, (u64)mca_data);
+#else
+                       __per_cpu_mca[cpu] = __pa(mca_data);
+                       mca_data += sizeof(struct ia64_mca_cpu);
+#endif
+               }
+       }
+
+        /*
+         * The MCA info structure was allocated earlier and its
+         * physical address saved in __per_cpu_mca[cpu].  Copy that
+         * address * to ia64_mca_data so we can access it as a per-CPU
+         * variable.
+         */
+       __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
+#ifdef XEN
+       IA64_MCA_DEBUG("%s: CPU#%d, ia64_mca_data=%lx\n", __FUNCTION__,
+                      smp_processor_id(), __get_cpu_var(ia64_mca_data));
+
+       /* sal_to_os_handoff for smp support */
+       __get_cpu_var(ia64_sal_to_os_handoff_state_addr) =
+                     __pa(&ia64_sal_to_os_handoff_state[smp_processor_id()]);
+       IA64_MCA_DEBUG("%s: CPU#%d, ia64_sal_to_os=%lx\n", __FUNCTION__,
+                      smp_processor_id(),
+                      __get_cpu_var(ia64_sal_to_os_handoff_state_addr));
+#endif
+
+       /*
+        * Stash away a copy of the PTE needed to map the per-CPU page.
+        * We may need it during MCA recovery.
+        */
+       __get_cpu_var(ia64_mca_per_cpu_pte) =
+               pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
+
+        /*
+         * Also, stash away a copy of the PAL address and the PTE
+         * needed to map it.
+         */
+        pal_vaddr = efi_get_pal_addr();
+       if (!pal_vaddr)
+               return;
+       __get_cpu_var(ia64_mca_pal_base) =
+               GRANULEROUNDDOWN((unsigned long) pal_vaddr);
+       __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
+                                                             PAGE_KERNEL));
+}
+
+/*
+ * ia64_mca_init
+ *
+ *  Do all the system level mca specific initialization.
+ *
+ *     1. Register spinloop and wakeup request interrupt vectors
+ *
+ *     2. Register OS_MCA handler entry point
+ *
+ *     3. Register OS_INIT handler entry point
+ *
+ *  4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
+ *
+ *  Note that this initialization is done very early before some kernel
+ *  services are available.
+ *
+ *  Inputs  :   None
+ *
+ *  Outputs :   None
+ */
+void __init
+ia64_mca_init(void)
+{
+       ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
+       ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
+       ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
+#ifdef XEN
+       s64 rc;
+
+       slave_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
+
+       IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
+#else
+       int i;
+       s64 rc;
+       struct ia64_sal_retval isrv;
+       u64 timeout = IA64_MCA_RENDEZ_TIMEOUT;  /* platform specific */
+
+       IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
+
+       /* Clear the Rendez checkin flag for all cpus */
+       for(i = 0 ; i < NR_CPUS; i++)
+               ia64_mc_info.imi_rendez_checkin[i] = 
IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
+
+       /*
+        * Register the rendezvous spinloop and wakeup mechanism with SAL
+        */
+
+       /* Register the rendezvous interrupt vector with SAL */
+       while (1) {
+               isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
+                                             SAL_MC_PARAM_MECHANISM_INT,
+                                             IA64_MCA_RENDEZ_VECTOR,
+                                             timeout,
+                                             SAL_MC_PARAM_RZ_ALWAYS);
+               rc = isrv.status;
+               if (rc == 0)
+                       break;
+               if (rc == -2) {
+                       printk(KERN_INFO "Increasing MCA rendezvous timeout 
from "
+                               "%ld to %ld milliseconds\n", timeout, isrv.v0);
+                       timeout = isrv.v0;
+                       continue;
+               }
+               printk(KERN_ERR "Failed to register rendezvous interrupt "
+                      "with SAL (status %ld)\n", rc);
+               return;
+       }
+
+       /* Register the wakeup interrupt vector with SAL */
+       isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
+                                     SAL_MC_PARAM_MECHANISM_INT,
+                                     IA64_MCA_WAKEUP_VECTOR,
+                                     0, 0);
+       rc = isrv.status;
+       if (rc) {
+               printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
+                      "(status %ld)\n", rc);
+               return;
+       }
+
+       IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup 
mech.\n", __FUNCTION__);
+#endif /* !XEN */
+
+       ia64_mc_info.imi_mca_handler        = ia64_tpa(mca_hldlr_ptr->fp);
+       /*
+        * XXX - disable SAL checksum by setting size to 0; should be
+        *      ia64_tpa(ia64_os_mca_dispatch_end) - 
ia64_tpa(ia64_os_mca_dispatch);
+        */
+       ia64_mc_info.imi_mca_handler_size       = 0;
+
+       /* Register the os mca handler with SAL */
+       if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
+                                      ia64_mc_info.imi_mca_handler,
+                                      ia64_tpa(mca_hldlr_ptr->gp),
+                                      ia64_mc_info.imi_mca_handler_size,
+                                      0, 0, 0)))
+       {
+               printk(KERN_ERR "Failed to register OS MCA handler with SAL "
+                      "(status %ld)\n", rc);
+               return;
+       }
+
+       IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 
0x%lx\n", __FUNCTION__,
+                      ia64_mc_info.imi_mca_handler, 
ia64_tpa(mca_hldlr_ptr->gp));
+
+       /*
+        * XXX - disable SAL checksum by setting size to 0, should be
+        * size of the actual init handler in mca_asm.S.
+        */
+       ia64_mc_info.imi_monarch_init_handler           = 
ia64_tpa(mon_init_ptr->fp);
+       ia64_mc_info.imi_monarch_init_handler_size      = 0;
+       ia64_mc_info.imi_slave_init_handler             = 
ia64_tpa(slave_init_ptr->fp);
+       ia64_mc_info.imi_slave_init_handler_size        = 0;
+
+       IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
+                      ia64_mc_info.imi_monarch_init_handler);
+
+       /* Register the os init handler with SAL */
+       if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
+                                      ia64_mc_info.imi_monarch_init_handler,
+                                      ia64_tpa(ia64_getreg(_IA64_REG_GP)),
+                                      
ia64_mc_info.imi_monarch_init_handler_size,
+                                      ia64_mc_info.imi_slave_init_handler,
+                                      ia64_tpa(ia64_getreg(_IA64_REG_GP)),
+                                      
ia64_mc_info.imi_slave_init_handler_size)))
+       {
+               printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
+                      "(status %ld)\n", rc);
+               return;
+       }
+
+       IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", 
__FUNCTION__);
+
+#ifndef XEN
+       /*
+        *  Configure the CMCI/P vector and handler. Interrupts for CMC are
+        *  per-processor, so AP CMC interrupts are setup in smp_callin() 
(smpboot.c).
+        */
+       register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
+       register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
+       ia64_mca_cmc_vector_setup();       /* Setup vector on BSP */
+
+       /* Setup the MCA rendezvous interrupt vector */
+       register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
+
+       /* Setup the MCA wakeup interrupt vector */
+       register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
+
+#ifdef CONFIG_ACPI
+       /* Setup the CPEI/P handler */
+       register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
+#endif
+
+       /* Initialize the areas set aside by the OS to buffer the
+        * platform/processor error states for MCA/INIT/CMC
+        * handling.
+        */
+       ia64_log_init(SAL_INFO_TYPE_MCA);
+       ia64_log_init(SAL_INFO_TYPE_INIT);
+       ia64_log_init(SAL_INFO_TYPE_CMC);
+       ia64_log_init(SAL_INFO_TYPE_CPE);
+#endif /* !XEN */
+
+       mca_init = 1;
+       printk(KERN_INFO "MCA related initialization done\n");
+}
+
+#ifndef XEN
+/*
+ * ia64_mca_late_init
+ *
+ *     Opportunity to setup things that require initialization later
+ *     than ia64_mca_init.  Setup a timer to poll for CPEs if the
+ *     platform doesn't support an interrupt driven mechanism.
+ *
+ *  Inputs  :   None
+ *  Outputs :   Status
+ */
+static int __init
+ia64_mca_late_init(void)
+{
+       if (!mca_init)
+               return 0;
+
+       /* Setup the CMCI/P vector and handler */
+       init_timer(&cmc_poll_timer);
+       cmc_poll_timer.function = ia64_mca_cmc_poll;
+
+       /* Unmask/enable the vector */
+       cmc_polling_enabled = 0;
+       schedule_work(&cmc_enable_work);
+
+       IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
+
+#ifdef CONFIG_ACPI
+       /* Setup the CPEI/P vector and handler */
+       cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
+       init_timer(&cpe_poll_timer);
+       cpe_poll_timer.function = ia64_mca_cpe_poll;
+
+       {
+               irq_desc_t *desc;
+               unsigned int irq;
+
+               if (cpe_vector >= 0) {
+                       /* If platform supports CPEI, enable the irq. */
+                       cpe_poll_enabled = 0;
+                       for (irq = 0; irq < NR_IRQS; ++irq)
+                               if (irq_to_vector(irq) == cpe_vector) {
+                                       desc = irq_descp(irq);
+                                       desc->status |= IRQ_PER_CPU;
+                                       setup_irq(irq, &mca_cpe_irqaction);
+                               }
+                       ia64_mca_register_cpev(cpe_vector);
+                       IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", 
__FUNCTION__);
+               } else {
+                       /* If platform doesn't support CPEI, get the timer 
going. */
+                       if (cpe_poll_enabled) {
+                               ia64_mca_cpe_poll(0UL);
+                               IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", 
__FUNCTION__);
+                       }
+               }
+       }
+#endif
+
+       return 0;
+}
+
+device_initcall(ia64_mca_late_init);
+#endif /* !XEN */
diff -r 50ed5c116b4d -r 97c290c7b015 xen/arch/ia64/linux-xen/mca_asm.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux-xen/mca_asm.S Sun Jul 09 20:04:23 2006 -0600
@@ -0,0 +1,970 @@
+//
+// assembly portion of the IA64 MCA handling
+//
+// Mods by cfleck to integrate into kernel build
+// 00/03/15 davidm Added various stop bits to get a clean compile
+//
+// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, 
switch to temp
+//                kstack, switch modes, jump to C INIT handler
+//
+// 02/01/04 J.Hall <jenna.s.hall@xxxxxxxxx>
+//                Before entering virtual mode code:
+//                1. Check for TLB CPU error
+//                2. Restore current thread pointer to kr6
+//                3. Move stack ptr 16 bytes to conform to C calling convention
+//
+// 04/11/12 Russ Anderson <rja@xxxxxxx>
+//                Added per cpu MCA/INIT stack save areas.
+//
+#include <linux/config.h>
+#include <linux/threads.h>
+
+#include <asm/asmmacro.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/mca_asm.h>
+#include <asm/mca.h>
+
+/*
+ * When we get a machine check, the kernel stack pointer is no longer
+ * valid, so we need to set a new stack pointer.
+ */
+#define        MINSTATE_PHYS   /* Make sure stack access is physical for 
MINSTATE */
+
+/*
+ * Needed for return context to SAL
+ */
+#define IA64_MCA_SAME_CONTEXT  0
+#define IA64_MCA_COLD_BOOT     -2
+
+#include "minstate.h"
+
+/*
+ * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
+ *             1. GR1 = OS GP
+ *             2. GR8 = PAL_PROC physical address
+ *             3. GR9 = SAL_PROC physical address
+ *             4. GR10 = SAL GP (physical)
+ *             5. GR11 = Rendez state
+ *             6. GR12 = Return address to location within SAL_CHECK
+ */
+#ifdef XEN
+#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)         \
+       movl    _tmp=THIS_CPU(ia64_sal_to_os_handoff_state_addr);;      \
+       tpa     _tmp=_tmp;;                             \
+       ld8     _tmp=[_tmp];;                           \
+       st8     [_tmp]=r1,0x08;;                        \
+       st8     [_tmp]=r8,0x08;;                        \
+       st8     [_tmp]=r9,0x08;;                        \
+       st8     [_tmp]=r10,0x08;;                       \
+       st8     [_tmp]=r11,0x08;;                       \
+       st8     [_tmp]=r12,0x08;;                       \
+       st8     [_tmp]=r17,0x08;;                       \
+       st8     [_tmp]=r18,0x08
+#else
+#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp)         \
+       LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
+       st8     [_tmp]=r1,0x08;;                        \
+       st8     [_tmp]=r8,0x08;;                        \
+       st8     [_tmp]=r9,0x08;;                        \
+       st8     [_tmp]=r10,0x08;;                       \
+       st8     [_tmp]=r11,0x08;;                       \
+       st8     [_tmp]=r12,0x08;;                       \
+       st8     [_tmp]=r17,0x08;;                       \
+       st8     [_tmp]=r18,0x08
+
+/*
+ * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
+ * (p6) is executed if we never entered virtual mode (TLB error)
+ * (p7) is executed if we entered virtual mode as expected (normal case)
+ *     1. GR8 = OS_MCA return status
+ *     2. GR9 = SAL GP (physical)
+ *     3. GR10 = 0/1 returning same/new context
+ *     4. GR22 = New min state save area pointer
+ *     returns ptr to SAL rtn save loc in _tmp
+ */
+#define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp)      \
+       movl    _tmp=ia64_os_to_sal_handoff_state;;     \
+       DATA_VA_TO_PA(_tmp);;                           \
+       ld8     r8=[_tmp],0x08;;                        \
+       ld8     r9=[_tmp],0x08;;                        \
+       ld8     r10=[_tmp],0x08;;                       \
+       ld8     r22=[_tmp],0x08;;
+       // now _tmp is pointing to SAL rtn save location
+
+/*
+ * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
+ *     imots_os_status=IA64_MCA_COLD_BOOT
+ *     imots_sal_gp=SAL GP
+ *     imots_context=IA64_MCA_SAME_CONTEXT
+ *     imots_new_min_state=Min state save area pointer
+ *     imots_sal_check_ra=Return address to location within SAL_CHECK
+ *
+ */
+#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
+       movl    tmp=IA64_MCA_COLD_BOOT;                                 \
+       movl    sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state);   \
+       movl    os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);;  \
+       st8     [os_to_sal_handoff]=tmp,8;;                             \
+       ld8     tmp=[sal_to_os_handoff],48;;                            \
+       st8     [os_to_sal_handoff]=tmp,8;;                             \
+       movl    tmp=IA64_MCA_SAME_CONTEXT;;                             \
+       st8     [os_to_sal_handoff]=tmp,8;;                             \
+       ld8     tmp=[sal_to_os_handoff],-8;;                            \
+       st8     [os_to_sal_handoff]=tmp,8;;                             \
+       ld8     tmp=[sal_to_os_handoff];;                               \
+       st8     [os_to_sal_handoff]=tmp;;
+
+#define GET_IA64_MCA_DATA(reg)                                         \
+       GET_THIS_PADDR(reg, ia64_mca_data)                              \
+       ;;                                                              \
+       ld8 reg=[reg]
+
+#endif /* XEN */
+       .global ia64_os_mca_dispatch
+       .global ia64_os_mca_dispatch_end
+#ifndef XEN
+       .global ia64_sal_to_os_handoff_state
+       .global ia64_os_to_sal_handoff_state
+       .global ia64_do_tlb_purge
+#endif
+
+       .text
+       .align 16
+
+#ifndef XEN
+/*
+ * Just the TLB purge part is moved to a separate function
+ * so we can re-use the code for cpu hotplug code as well
+ * Caller should now setup b1, so we can branch once the
+ * tlb flush is complete.
+ */
+
+ia64_do_tlb_purge:
+#define O(member)      IA64_CPUINFO_##member##_OFFSET
+
+       GET_THIS_PADDR(r2, cpu_info)    // load phys addr of cpu_info into r2
+       ;;
+       addl r17=O(PTCE_STRIDE),r2
+       addl r2=O(PTCE_BASE),r2
+       ;;
+       ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));;     // r18=ptce_base
+       ld4 r19=[r2],4                                  // r19=ptce_count[0]
+       ld4 r21=[r17],4                                 // r21=ptce_stride[0]
+       ;;
+       ld4 r20=[r2]                                    // r20=ptce_count[1]
+       ld4 r22=[r17]                                   // r22=ptce_stride[1]
+       mov r24=0
+       ;;
+       adds r20=-1,r20
+       ;;
+#undef O
+
+2:
+       cmp.ltu p6,p7=r24,r19
+(p7)   br.cond.dpnt.few 4f
+       mov ar.lc=r20
+3:
+       ptc.e r18
+       ;;
+       add r18=r22,r18
+       br.cloop.sptk.few 3b
+       ;;
+       add r18=r21,r18
+       add r24=1,r24
+       ;;
+       br.sptk.few 2b
+4:
+       srlz.i                  // srlz.i implies srlz.d
+       ;;
+
+        // Now purge addresses formerly mapped by TR registers
+       // 1. Purge ITR&DTR for kernel.
+       movl r16=KERNEL_START
+       mov r18=KERNEL_TR_PAGE_SHIFT<<2
+       ;;
+       ptr.i r16, r18
+       ptr.d r16, r18
+       ;;
+       srlz.i
+       ;;
+       srlz.d
+       ;;
+       // 2. Purge DTR for PERCPU data.
+       movl r16=PERCPU_ADDR
+       mov r18=PERCPU_PAGE_SHIFT<<2
+       ;;
+       ptr.d r16,r18
+       ;;
+       srlz.d
+       ;;
+       // 3. Purge ITR for PAL code.
+       GET_THIS_PADDR(r2, ia64_mca_pal_base)
+       ;;
+       ld8 r16=[r2]
+       mov r18=IA64_GRANULE_SHIFT<<2
+       ;;
+       ptr.i r16,r18
+       ;;
+       srlz.i
+       ;;
+       // 4. Purge DTR for stack.
+       mov r16=IA64_KR(CURRENT_STACK)
+       ;;
+       shl r16=r16,IA64_GRANULE_SHIFT
+       movl r19=PAGE_OFFSET
+       ;;
+       add r16=r19,r16
+       mov r18=IA64_GRANULE_SHIFT<<2
+       ;;
+       ptr.d r16,r18
+       ;;
+       srlz.i
+       ;;
+       // Now branch away to caller.
+       br.sptk.many b1
+       ;;
+
+ia64_os_mca_dispatch:
+
+       // Serialize all MCA processing
+       mov     r3=1;;
+       LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
+ia64_os_mca_spin:
+       xchg8   r4=[r2],r3;;
+       cmp.ne  p6,p0=r4,r0
+(p6)   br ia64_os_mca_spin
+
+       // Save the SAL to OS MCA handoff state as defined
+       // by SAL SPEC 3.0
+       // NOTE : The order in which the state gets saved
+       //        is dependent on the way the C-structure
+       //        for ia64_mca_sal_to_os_state_t has been
+       //        defined in include/asm/mca.h
+       SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
+       ;;
+
+       // LOG PROCESSOR STATE INFO FROM HERE ON..
+begin_os_mca_dump:
+       br      ia64_os_mca_proc_state_dump;;
+
+ia64_os_mca_done_dump:
+
+       LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
+       ;;
+       ld8 r18=[r16]           // Get processor state parameter on existing 
PALE_CHECK.
+       ;;
+       tbit.nz p6,p7=r18,60
+(p7)   br.spnt done_tlb_purge_and_reload
+
+       // The following code purges TC and TR entries. Then reload all TC 
entries.
+       // Purge percpu data TC entries.
+begin_tlb_purge_and_reload:
+       movl r18=ia64_reload_tr;;
+       LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
+       mov b1=r18;;
+       br.sptk.many ia64_do_tlb_purge;;
+
+ia64_reload_tr:
+       // Finally reload the TR registers.
+       // 1. Reload DTR/ITR registers for kernel.
+       mov r18=KERNEL_TR_PAGE_SHIFT<<2
+       movl r17=KERNEL_START
+       ;;
+       mov cr.itir=r18
+       mov cr.ifa=r17
+        mov r16=IA64_TR_KERNEL
+       mov r19=ip
+       movl r18=PAGE_KERNEL
+       ;;
+        dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
+       ;;
+       or r18=r17,r18
+       ;;
+        itr.i itr[r16]=r18
+       ;;
+        itr.d dtr[r16]=r18
+        ;;
+       srlz.i
+       srlz.d
+       ;;
+       // 2. Reload DTR register for PERCPU data.
+       GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
+       ;;
+       movl r16=PERCPU_ADDR            // vaddr
+       movl r18=PERCPU_PAGE_SHIFT<<2
+       ;;
+       mov cr.itir=r18
+       mov cr.ifa=r16
+       ;;
+       ld8 r18=[r2]                    // load per-CPU PTE
+       mov r16=IA64_TR_PERCPU_DATA;
+       ;;
+       itr.d dtr[r16]=r18
+       ;;
+       srlz.d
+       ;;
+       // 3. Reload ITR for PAL code.
+       GET_THIS_PADDR(r2, ia64_mca_pal_pte)
+       ;;
+       ld8 r18=[r2]                    // load PAL PTE
+       ;;
+       GET_THIS_PADDR(r2, ia64_mca_pal_base)
+       ;;
+       ld8 r16=[r2]                    // load PAL vaddr
+       mov r19=IA64_GRANULE_SHIFT<<2
+       ;;
+       mov cr.itir=r19
+       mov cr.ifa=r16
+       mov r20=IA64_TR_PALCODE
+       ;;
+       itr.i itr[r20]=r18
+       ;;
+       srlz.i
+       ;;
+       // 4. Reload DTR for stack.
+       mov r16=IA64_KR(CURRENT_STACK)
+       ;;
+       shl r16=r16,IA64_GRANULE_SHIFT
+       movl r19=PAGE_OFFSET
+       ;;
+       add r18=r19,r16
+       movl r20=PAGE_KERNEL
+       ;;
+       add r16=r20,r16
+       mov r19=IA64_GRANULE_SHIFT<<2
+       ;;
+       mov cr.itir=r19
+       mov cr.ifa=r18
+       mov r20=IA64_TR_CURRENT_STACK
+       ;;
+       itr.d dtr[r20]=r16
+       ;;
+       srlz.d
+       ;;
+       br.sptk.many done_tlb_purge_and_reload
+err:
+       COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
+       br.sptk.many ia64_os_mca_done_restore
+
+done_tlb_purge_and_reload:
+
+       // Setup new stack frame for OS_MCA handling
+       GET_IA64_MCA_DATA(r2)
+       ;;
+       add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
+       add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
+       ;;
+       rse_switch_context(r6,r3,r2);;  // RSC management in this new context
+
+       GET_IA64_MCA_DATA(r2)
+       ;;
+       add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
+       ;;
+       mov r12=r2              // establish new stack-pointer
+
+        // Enter virtual mode from physical mode
+       VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
+ia64_os_mca_virtual_begin:
+
+       // Call virtual mode handler
+       movl            r2=ia64_mca_ucmc_handler;;
+       mov             b6=r2;;
+       br.call.sptk.many    b0=b6;;
+.ret0:
+       // Revert back to physical mode before going back to SAL
+       PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
+ia64_os_mca_virtual_end:
+
+       // restore the original stack frame here
+       GET_IA64_MCA_DATA(r2)
+       ;;
+       add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
+       ;;
+       movl    r4=IA64_PSR_MC
+       ;;
+       rse_return_context(r4,r3,r2)    // switch from interrupt context for RSE
+
+       // let us restore all the registers from our PSI structure
+       mov     r8=gp
+       ;;
+begin_os_mca_restore:
+       br      ia64_os_mca_proc_state_restore;;
+
+ia64_os_mca_done_restore:
+       OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
+       // branch back to SALE_CHECK
+       ld8             r3=[r2];;
+       mov             b0=r3;;         // SAL_CHECK return address
+
+       // release lock
+       movl            r3=ia64_mca_serialize;;
+       DATA_VA_TO_PA(r3);;
+       st8.rel         [r3]=r0
+
+       br              b0
+       ;;
+ia64_os_mca_dispatch_end:
+//EndMain//////////////////////////////////////////////////////////////////////
+
+
+//++
+// Name:
+//      ia64_os_mca_proc_state_dump()
+//
+// Stub Description:
+//
+//       This stub dumps the processor state during MCHK to a data area
+//
+//--
+
+ia64_os_mca_proc_state_dump:
+// Save bank 1 GRs 16-31 which will be used by c-language code when we switch
+//  to virtual addressing mode.
+       GET_IA64_MCA_DATA(r2)
+       ;;
+       add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
+       ;;
+// save ar.NaT
+       mov             r5=ar.unat                  // ar.unat
+
+// save banked GRs 16-31 along with NaT bits
+       bsw.1;;
+       st8.spill       [r2]=r16,8;;
+       st8.spill       [r2]=r17,8;;
+       st8.spill       [r2]=r18,8;;
+       st8.spill       [r2]=r19,8;;
+       st8.spill       [r2]=r20,8;;
+       st8.spill       [r2]=r21,8;;
+       st8.spill       [r2]=r22,8;;
+       st8.spill       [r2]=r23,8;;
+       st8.spill       [r2]=r24,8;;
+       st8.spill       [r2]=r25,8;;
+       st8.spill       [r2]=r26,8;;
+       st8.spill       [r2]=r27,8;;
+       st8.spill       [r2]=r28,8;;
+       st8.spill       [r2]=r29,8;;
+       st8.spill       [r2]=r30,8;;
+       st8.spill       [r2]=r31,8;;
+
+       mov             r4=ar.unat;;
+       st8             [r2]=r4,8                // save User NaT bits for 
r16-r31
+       mov             ar.unat=r5                  // restore original unat
+       bsw.0;;
+
+//save BRs
+       add             r4=8,r2                  // duplicate r2 in r4
+       add             r6=2*8,r2                // duplicate r2 in r4
+
+       mov             r3=b0
+       mov             r5=b1
+       mov             r7=b2;;
+       st8             [r2]=r3,3*8
+       st8             [r4]=r5,3*8
+       st8             [r6]=r7,3*8;;
+
+       mov             r3=b3
+       mov             r5=b4
+       mov             r7=b5;;
+       st8             [r2]=r3,3*8
+       st8             [r4]=r5,3*8
+       st8             [r6]=r7,3*8;;
+
+       mov             r3=b6
+       mov             r5=b7;;
+       st8             [r2]=r3,2*8
+       st8             [r4]=r5,2*8;;
+
+cSaveCRs:
+// save CRs
+       add             r4=8,r2                  // duplicate r2 in r4
+       add             r6=2*8,r2                // duplicate r2 in r4
+
+       mov             r3=cr.dcr
+       mov             r5=cr.itm
+       mov             r7=cr.iva;;
+
+       st8             [r2]=r3,8*8
+       st8             [r4]=r5,3*8
+       st8             [r6]=r7,3*8;;            // 48 byte rements
+
+       mov             r3=cr.pta;;
+       st8             [r2]=r3,8*8;;            // 64 byte rements
+
+// if PSR.ic=0, reading interruption registers causes an illegal operation 
fault
+       mov             r3=psr;;
+       tbit.nz.unc     p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. 
test
+(p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
+begin_skip_intr_regs:
+(p6)   br              SkipIntrRegs;;
+
+       add             r4=8,r2                  // duplicate r2 in r4
+       add             r6=2*8,r2                // duplicate r2 in r6
+
+       mov             r3=cr.ipsr
+       mov             r5=cr.isr
+       mov             r7=r0;;
+       st8             [r2]=r3,3*8
+       st8             [r4]=r5,3*8
+       st8             [r6]=r7,3*8;;
+
+       mov             r3=cr.iip
+       mov             r5=cr.ifa
+       mov             r7=cr.itir;;
+       st8             [r2]=r3,3*8
+       st8             [r4]=r5,3*8
+       st8             [r6]=r7,3*8;;
+
+       mov             r3=cr.iipa
+       mov             r5=cr.ifs
+       mov             r7=cr.iim;;
+       st8             [r2]=r3,3*8
+       st8             [r4]=r5,3*8
+       st8             [r6]=r7,3*8;;
+
+       mov             r3=cr25;;                   // cr.iha
+       st8             [r2]=r3,160;;               // 160 byte rement
+
+SkipIntrRegs:
+       st8             [r2]=r0,152;;               // another 152 byte .
+
+       add             r4=8,r2                     // duplicate r2 in r4
+       add             r6=2*8,r2                   // duplicate r2 in r6
+
+       mov             r3=cr.lid
+//     mov             r5=cr.ivr                     // cr.ivr, don't read it
+       mov             r7=cr.tpr;;
+       st8             [r2]=r3,3*8
+       st8             [r4]=r5,3*8
+       st8             [r6]=r7,3*8;;
+
+       mov             r3=r0                       // cr.eoi => cr67
+       mov             r5=r0                       // cr.irr0 => cr68
+       mov             r7=r0;;                     // cr.irr1 => cr69
+       st8             [r2]=r3,3*8
+       st8             [r4]=r5,3*8
+       st8             [r6]=r7,3*8;;
+
+       mov             r3=r0                       // cr.irr2 => cr70
+       mov             r5=r0                       // cr.irr3 => cr71
+       mov             r7=cr.itv;;
+       st8             [r2]=r3,3*8
+       st8             [r4]=r5,3*8
+       st8             [r6]=r7,3*8;;
+
+       mov             r3=cr.pmv
+       mov             r5=cr.cmcv;;
+       st8             [r2]=r3,7*8
+       st8             [r4]=r5,7*8;;
+
+       mov             r3=r0                       // cr.lrr0 => cr80
+       mov             r5=r0;;                     // cr.lrr1 => cr81
+       st8             [r2]=r3,23*8
+       st8             [r4]=r5,23*8;;
+
+       adds            r2=25*8,r2;;
+
+cSaveARs:
+// save ARs
+       add             r4=8,r2                  // duplicate r2 in r4
+       add             r6=2*8,r2                // duplicate r2 in r6
+
+       mov             r3=ar.k0
+       mov             r5=ar.k1
+       mov             r7=ar.k2;;
+       st8             [r2]=r3,3*8
+       st8             [r4]=r5,3*8
+       st8             [r6]=r7,3*8;;
+
+       mov             r3=ar.k3
+       mov             r5=ar.k4
+       mov             r7=ar.k5;;
+       st8             [r2]=r3,3*8
+       st8             [r4]=r5,3*8
+       st8             [r6]=r7,3*8;;
+
+       mov             r3=ar.k6
+       mov             r5=ar.k7
+       mov             r7=r0;;                     // ar.kr8
+       st8             [r2]=r3,10*8
+       st8             [r4]=r5,10*8
+       st8             [r6]=r7,10*8;;           // rement by 72 bytes
+
+       mov             r3=ar.rsc
+       mov             ar.rsc=r0                           // put RSE in 
enforced lazy mode
+       mov             r5=ar.bsp
+       ;;
+       mov             r7=ar.bspstore;;
+       st8             [r2]=r3,3*8
+       st8             [r4]=r5,3*8
+       st8             [r6]=r7,3*8;;
+
+       mov             r3=ar.rnat;;
+       st8             [r2]=r3,8*13             // increment by 13x8 bytes
+
+       mov             r3=ar.ccv;;
+       st8             [r2]=r3,8*4
+
+       mov             r3=ar.unat;;
+       st8             [r2]=r3,8*4
+
+       mov             r3=ar.fpsr;;
+       st8             [r2]=r3,8*4
+
+       mov             r3=ar.itc;;
+       st8             [r2]=r3,160                 // 160
+
+       mov             r3=ar.pfs;;
+       st8             [r2]=r3,8
+
+       mov             r3=ar.lc;;
+       st8             [r2]=r3,8
+
+       mov             r3=ar.ec;;
+       st8             [r2]=r3
+       add             r2=8*62,r2               //padding
+
+// save RRs
+       mov             ar.lc=0x08-1
+       movl            r4=0x00;;
+
+cStRR:
+       dep.z           r5=r4,61,3;;
+       mov             r3=rr[r5];;
+       st8             [r2]=r3,8
+       add             r4=1,r4
+       br.cloop.sptk.few       cStRR
+       ;;
+end_os_mca_dump:
+       br      ia64_os_mca_done_dump;;
+
+//EndStub//////////////////////////////////////////////////////////////////////
+
+
+//++
+// Name:
+//       ia64_os_mca_proc_state_restore()
+//
+// Stub Description:
+//
+//       This is a stub to restore the saved processor state during MCHK
+//
+//--
+
+ia64_os_mca_proc_state_restore:
+
+// Restore bank1 GR16-31
+       GET_IA64_MCA_DATA(r2)
+       ;;
+       add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
+
+restore_GRs:                                    // restore bank-1 GRs 16-31
+       bsw.1;;
+       add             r3=16*8,r2;;                // to get to NaT of GR 16-31
+       ld8             r3=[r3];;
+       mov             ar.unat=r3;;                // first restore NaT
+
+       ld8.fill        r16=[r2],8;;
+       ld8.fill        r17=[r2],8;;
+       ld8.fill        r18=[r2],8;;
+       ld8.fill        r19=[r2],8;;
+       ld8.fill        r20=[r2],8;;
+       ld8.fill        r21=[r2],8;;
+       ld8.fill        r22=[r2],8;;
+       ld8.fill        r23=[r2],8;;
+       ld8.fill        r24=[r2],8;;
+       ld8.fill        r25=[r2],8;;
+       ld8.fill        r26=[r2],8;;
+       ld8.fill        r27=[r2],8;;
+       ld8.fill        r28=[r2],8;;
+       ld8.fill        r29=[r2],8;;
+       ld8.fill        r30=[r2],8;;
+       ld8.fill        r31=[r2],8;;
+
+       ld8             r3=[r2],8;;              // increment to skip NaT
+       bsw.0;;
+
+restore_BRs:
+       add             r4=8,r2                  // duplicate r2 in r4
+       add             r6=2*8,r2;;              // duplicate r2 in r4
+
+       ld8             r3=[r2],3*8
+       ld8             r5=[r4],3*8
+       ld8             r7=[r6],3*8;;
+       mov             b0=r3
+       mov             b1=r5
+       mov             b2=r7;;
+
+       ld8             r3=[r2],3*8
+       ld8             r5=[r4],3*8
+       ld8             r7=[r6],3*8;;
+       mov             b3=r3
+       mov             b4=r5
+       mov             b5=r7;;
+
+       ld8             r3=[r2],2*8
+       ld8             r5=[r4],2*8;;
+       mov             b6=r3
+       mov             b7=r5;;
+
+restore_CRs:
+       add             r4=8,r2                  // duplicate r2 in r4
+       add             r6=2*8,r2;;              // duplicate r2 in r4
+
+       ld8             r3=[r2],8*8
+       ld8             r5=[r4],3*8
+       ld8             r7=[r6],3*8;;            // 48 byte increments
+       mov             cr.dcr=r3
+       mov             cr.itm=r5
+       mov             cr.iva=r7;;
+
+       ld8             r3=[r2],8*8;;            // 64 byte increments
+//      mov            cr.pta=r3
+
+
+// if PSR.ic=1, reading interruption registers causes an illegal operation 
fault
+       mov             r3=psr;;
+       tbit.nz.unc     p6,p0=r3,PSR_IC;;           // PSI Valid Log bit pos. 
test
+(p6)    st8     [r2]=r0,9*8+160             // increment by 232 byte inc.
+
+begin_rskip_intr_regs:
+(p6)   br              rSkipIntrRegs;;
+
+       add             r4=8,r2                  // duplicate r2 in r4
+       add             r6=2*8,r2;;              // duplicate r2 in r4
+
+       ld8             r3=[r2],3*8
+       ld8             r5=[r4],3*8
+       ld8             r7=[r6],3*8;;
+       mov             cr.ipsr=r3
+//     mov             cr.isr=r5                   // cr.isr is read only
+
+       ld8             r3=[r2],3*8
+       ld8             r5=[r4],3*8
+       ld8             r7=[r6],3*8;;
+       mov             cr.iip=r3
+       mov             cr.ifa=r5
+       mov             cr.itir=r7;;
+
+       ld8             r3=[r2],3*8
+       ld8             r5=[r4],3*8
+       ld8             r7=[r6],3*8;;
+       mov             cr.iipa=r3
+       mov             cr.ifs=r5
+       mov             cr.iim=r7
+
+       ld8             r3=[r2],160;;               // 160 byte increment
+       mov             cr.iha=r3
+
+rSkipIntrRegs:
+       ld8             r3=[r2],152;;               // another 152 byte inc.
+
+       add             r4=8,r2                     // duplicate r2 in r4
+       add             r6=2*8,r2;;                 // duplicate r2 in r6
+
+       ld8             r3=[r2],8*3
+       ld8             r5=[r4],8*3
+       ld8             r7=[r6],8*3;;
+       mov             cr.lid=r3
+//     mov             cr.ivr=r5                   // cr.ivr is read only
+       mov             cr.tpr=r7;;
+
+       ld8             r3=[r2],8*3
+       ld8             r5=[r4],8*3
+       ld8             r7=[r6],8*3;;
+//     mov             cr.eoi=r3
+//     mov             cr.irr0=r5                  // cr.irr0 is read only
+//     mov             cr.irr1=r7;;                // cr.irr1 is read only
+
+       ld8             r3=[r2],8*3
+       ld8             r5=[r4],8*3
+       ld8             r7=[r6],8*3;;
+//     mov             cr.irr2=r3                  // cr.irr2 is read only
+//     mov             cr.irr3=r5                  // cr.irr3 is read only
+       mov             cr.itv=r7;;
+
+       ld8             r3=[r2],8*7
+       ld8             r5=[r4],8*7;;
+       mov             cr.pmv=r3
+       mov             cr.cmcv=r5;;
+
+       ld8             r3=[r2],8*23
+       ld8             r5=[r4],8*23;;
+       adds            r2=8*23,r2
+       adds            r4=8*23,r4;;
+//     mov             cr.lrr0=r3
+//     mov             cr.lrr1=r5
+
+       adds            r2=8*2,r2;;
+
+restore_ARs:
+       add             r4=8,r2                  // duplicate r2 in r4
+       add             r6=2*8,r2;;              // duplicate r2 in r4
+
+       ld8             r3=[r2],3*8
+       ld8             r5=[r4],3*8
+       ld8             r7=[r6],3*8;;
+       mov             ar.k0=r3
+       mov             ar.k1=r5
+       mov             ar.k2=r7;;
+
+       ld8             r3=[r2],3*8
+       ld8             r5=[r4],3*8
+       ld8             r7=[r6],3*8;;
+       mov             ar.k3=r3
+       mov             ar.k4=r5
+       mov             ar.k5=r7;;
+
+       ld8             r3=[r2],10*8
+       ld8             r5=[r4],10*8
+       ld8             r7=[r6],10*8;;
+       mov             ar.k6=r3
+       mov             ar.k7=r5
+       ;;
+
+       ld8             r3=[r2],3*8
+       ld8             r5=[r4],3*8
+       ld8             r7=[r6],3*8;;
+//     mov             ar.rsc=r3
+//     mov             ar.bsp=r5                   // ar.bsp is read only
+       mov             ar.rsc=r0                           // make sure that 
RSE is in enforced lazy mode
+       ;;
+       mov             ar.bspstore=r7;;
+
+       ld8             r9=[r2],8*13;;
+       mov             ar.rnat=r9
+
+       mov             ar.rsc=r3
+       ld8             r3=[r2],8*4;;
+       mov             ar.ccv=r3
+
+       ld8             r3=[r2],8*4;;
+       mov             ar.unat=r3
+
+       ld8             r3=[r2],8*4;;
+       mov             ar.fpsr=r3
+
+       ld8             r3=[r2],160;;               // 160
+//      mov            ar.itc=r3
+
+       ld8             r3=[r2],8;;
+       mov             ar.pfs=r3
+
+       ld8             r3=[r2],8;;
+       mov             ar.lc=r3
+
+       ld8             r3=[r2];;
+       mov             ar.ec=r3
+       add             r2=8*62,r2;;             // padding
+
+restore_RRs:
+       mov             r5=ar.lc
+       mov             ar.lc=0x08-1
+       movl            r4=0x00;;
+cStRRr:
+       dep.z           r7=r4,61,3
+       ld8             r3=[r2],8;;
+       mov             rr[r7]=r3                   // what are its access 
previledges?
+       add             r4=1,r4
+       br.cloop.sptk.few       cStRRr
+       ;;
+       mov             ar.lc=r5
+       ;;
+end_os_mca_restore:
+       br      ia64_os_mca_done_restore;;
+
+//EndStub//////////////////////////////////////////////////////////////////////
+#else
+ia64_os_mca_dispatch:
+1:
+       br.sptk 1b
+ia64_os_mca_dispatch_end:
+#endif /* !XEN */
+
+
+// ok, the issue here is that we need to save state information so
+// it can be useable by the kernel debugger and show regs routines.
+// In order to do this, our best bet is save the current state (plus
+// the state information obtain from the MIN_STATE_AREA) into a pt_regs
+// format.  This way we can pass it on in a useable format.
+//
+
+//
+// SAL to OS entry point for INIT on the monarch processor
+// This has been defined for registration purposes with SAL
+// as a part of ia64_mca_init.
+//
+// When we get here, the following registers have been
+// set by the SAL for our use
+//
+//             1. GR1 = OS INIT GP
+//             2. GR8 = PAL_PROC physical address
+//             3. GR9 = SAL_PROC physical address
+//             4. GR10 = SAL GP (physical)
+//             5. GR11 = Init Reason
+//                     0 = Received INIT for event other than crash dump switch
+//                     1 = Received wakeup at the end of an OS_MCA corrected 
machine check
+//                     2 = Received INIT dude to CrashDump switch assertion
+//
+//             6. GR12 = Return address to location within SAL_INIT procedure
+
+
+GLOBAL_ENTRY(ia64_monarch_init_handler)
+       .prologue
+       // stash the information the SAL passed to os
+       SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
+       ;;
+       SAVE_MIN_WITH_COVER
+       ;;
+       mov r8=cr.ifa
+       mov r9=cr.isr
+       adds r3=8,r2                            // set up second base pointer
+       ;;
+       SAVE_REST
+
+// ok, enough should be saved at this point to be dangerous, and supply
+// information for a dump
+// We need to switch to Virtual mode before hitting the C functions.
+
+       movl    
r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
+       mov     r3=psr  // get the current psr, minimum enabled at this point
+       ;;
+       or      r2=r2,r3
+       ;;
+       movl    r3=IVirtual_Switch
+       ;;
+       mov     cr.iip=r3       // short return to set the appropriate bits
+       mov     cr.ipsr=r2      // need to do an rfi to set appropriate bits
+       ;;
+       rfi
+       ;;
+IVirtual_Switch:
+       //
+       // We should now be running virtual
+       //
+       // Let's call the C handler to get the rest of the state info
+       //
+       alloc r14=ar.pfs,0,0,2,0                // now it's safe (must be first 
in insn group!)
+       ;;
+       adds out0=16,sp                         // out0 = pointer to pt_regs
+       ;;
+       DO_SAVE_SWITCH_STACK
+       .body
+       adds out1=16,sp                         // out0 = pointer to 
switch_stack
+
+       br.call.sptk.many rp=ia64_init_handler
+.ret1:
+
+return_from_init:
+       br.sptk return_from_init
+END(ia64_monarch_init_handler)
+
+//
+// SAL to OS entry point for INIT on the slave processor
+// This has been defined for registration purposes with SAL
+// as a part of ia64_mca_init.
+//
+
+GLOBAL_ENTRY(ia64_slave_init_handler)
+1:     br.sptk 1b
+END(ia64_slave_init_handler)
diff -r 50ed5c116b4d -r 97c290c7b015 
xen/include/asm-ia64/linux-xen/asm/asmmacro.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/asmmacro.h     Sun Jul 09 20:04:23 
2006 -0600
@@ -0,0 +1,119 @@
+#ifndef _ASM_IA64_ASMMACRO_H
+#define _ASM_IA64_ASMMACRO_H
+
+/*
+ * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+
+#include <linux/config.h>
+
+#define ENTRY(name)                            \
+       .align 32;                              \
+       .proc name;                             \
+name:
+
+#define ENTRY_MIN_ALIGN(name)                  \
+       .align 16;                              \
+       .proc name;                             \
+name:
+
+#define GLOBAL_ENTRY(name)                     \
+       .global name;                           \
+       ENTRY(name)
+
+#define END(name)                              \
+       .endp name
+
+/*
+ * Helper macros to make unwind directives more readable:
+ */
+
+/* prologue_gr: */
+#define ASM_UNW_PRLG_RP                        0x8
+#define ASM_UNW_PRLG_PFS               0x4
+#define ASM_UNW_PRLG_PSP               0x2
+#define ASM_UNW_PRLG_PR                        0x1
+#define ASM_UNW_PRLG_GRSAVE(ninputs)   (32+(ninputs))
+
+/*
+ * Helper macros for accessing user memory.
+ */
+
+       .section "__ex_table", "a"              // declare section & section 
attributes
+       .previous
+
+# define EX(y,x...)                            \
+       .xdata4 "__ex_table", 99f-., y-.;       \
+  [99:]        x
+# define EXCLR(y,x...)                         \
+       .xdata4 "__ex_table", 99f-., y-.+4;     \
+  [99:]        x
+
+/*
+ * Mark instructions that need a load of a virtual address patched to be
+ * a load of a physical address.  We use this either in critical performance
+ * path (ivt.S - TLB miss processing) or in places where it might not be
+ * safe to use a "tpa" instruction (mca_asm.S - error recovery).
+ */
+       .section ".data.patch.vtop", "a"        // declare section & section 
attributes
+       .previous
+
+#ifdef XEN
+#define        LOAD_PHYSICAL(pr, reg, obj)             \
+[1:](pr)movl reg = obj;;                       \
+       shl reg = reg,4;;                       \
+       shr.u reg = reg,4;;                     \
+       .xdata4 ".data.patch.vtop", 1b-.
+#else
+#define        LOAD_PHYSICAL(pr, reg, obj)             \
+[1:](pr)movl reg = obj;                                \
+       .xdata4 ".data.patch.vtop", 1b-.
+#endif
+
+/*
+ * For now, we always put in the McKinley E9 workaround.  On CPUs that don't 
need it,
+ * we'll patch out the work-around bundles with NOPs, so their impact is 
minimal.
+ */
+#define DO_MCKINLEY_E9_WORKAROUND
+
+#ifdef DO_MCKINLEY_E9_WORKAROUND
+       .section ".data.patch.mckinley_e9", "a"
+       .previous
+/* workaround for Itanium 2 Errata 9: */
+# define FSYS_RETURN                                   \
+       .xdata4 ".data.patch.mckinley_e9", 1f-.;        \
+1:{ .mib;                                              \
+       nop.m 0;                                        \
+       mov r16=ar.pfs;                                 \
+       br.call.sptk.many b7=2f;;                       \
+  };                                                   \
+2:{ .mib;                                              \
+       nop.m 0;                                        \
+       mov ar.pfs=r16;                                 \
+       br.ret.sptk.many b6;;                           \
+  }
+#else
+# define FSYS_RETURN   br.ret.sptk.many b6
+#endif
+
+/*
+ * Up until early 2004, use of .align within a function caused bad unwind info.
+ * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into 
nothing
+ * otherwise.
+ */
+#ifdef HAVE_WORKING_TEXT_ALIGN
+# define TEXT_ALIGN(n) .align n
+#else
+# define TEXT_ALIGN(n)
+#endif
+
+#ifdef HAVE_SERIALIZE_DIRECTIVE
+# define dv_serialize_data             .serialize.data
+# define dv_serialize_instruction      .serialize.instruction
+#else
+# define dv_serialize_data
+# define dv_serialize_instruction
+#endif
+
+#endif /* _ASM_IA64_ASMMACRO_H */
diff -r 50ed5c116b4d -r 97c290c7b015 xen/include/asm-ia64/linux/asm/asmmacro.h
--- a/xen/include/asm-ia64/linux/asm/asmmacro.h Fri Jul 07 10:36:31 2006 -0600
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,111 +0,0 @@
-#ifndef _ASM_IA64_ASMMACRO_H
-#define _ASM_IA64_ASMMACRO_H
-
-/*
- * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- */
-
-#include <linux/config.h>
-
-#define ENTRY(name)                            \
-       .align 32;                              \
-       .proc name;                             \
-name:
-
-#define ENTRY_MIN_ALIGN(name)                  \
-       .align 16;                              \
-       .proc name;                             \
-name:
-
-#define GLOBAL_ENTRY(name)                     \
-       .global name;                           \
-       ENTRY(name)
-
-#define END(name)                              \
-       .endp name
-
-/*
- * Helper macros to make unwind directives more readable:
- */
-
-/* prologue_gr: */
-#define ASM_UNW_PRLG_RP                        0x8
-#define ASM_UNW_PRLG_PFS               0x4
-#define ASM_UNW_PRLG_PSP               0x2
-#define ASM_UNW_PRLG_PR                        0x1
-#define ASM_UNW_PRLG_GRSAVE(ninputs)   (32+(ninputs))
-
-/*
- * Helper macros for accessing user memory.
- */
-
-       .section "__ex_table", "a"              // declare section & section 
attributes
-       .previous
-
-# define EX(y,x...)                            \
-       .xdata4 "__ex_table", 99f-., y-.;       \
-  [99:]        x
-# define EXCLR(y,x...)                         \
-       .xdata4 "__ex_table", 99f-., y-.+4;     \
-  [99:]        x
-
-/*
- * Mark instructions that need a load of a virtual address patched to be
- * a load of a physical address.  We use this either in critical performance
- * path (ivt.S - TLB miss processing) or in places where it might not be
- * safe to use a "tpa" instruction (mca_asm.S - error recovery).
- */
-       .section ".data.patch.vtop", "a"        // declare section & section 
attributes
-       .previous
-
-#define        LOAD_PHYSICAL(pr, reg, obj)             \
-[1:](pr)movl reg = obj;                                \
-       .xdata4 ".data.patch.vtop", 1b-.
-
-/*
- * For now, we always put in the McKinley E9 workaround.  On CPUs that don't 
need it,
- * we'll patch out the work-around bundles with NOPs, so their impact is 
minimal.
- */
-#define DO_MCKINLEY_E9_WORKAROUND
-
-#ifdef DO_MCKINLEY_E9_WORKAROUND
-       .section ".data.patch.mckinley_e9", "a"
-       .previous
-/* workaround for Itanium 2 Errata 9: */
-# define FSYS_RETURN                                   \
-       .xdata4 ".data.patch.mckinley_e9", 1f-.;        \
-1:{ .mib;                                              \
-       nop.m 0;                                        \
-       mov r16=ar.pfs;                                 \
-       br.call.sptk.many b7=2f;;                       \
-  };                                                   \
-2:{ .mib;                                              \
-       nop.m 0;                                        \
-       mov ar.pfs=r16;                                 \
-       br.ret.sptk.many b6;;                           \
-  }
-#else
-# define FSYS_RETURN   br.ret.sptk.many b6
-#endif
-
-/*
- * Up until early 2004, use of .align within a function caused bad unwind info.
- * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into 
nothing
- * otherwise.
- */
-#ifdef HAVE_WORKING_TEXT_ALIGN
-# define TEXT_ALIGN(n) .align n
-#else
-# define TEXT_ALIGN(n)
-#endif
-
-#ifdef HAVE_SERIALIZE_DIRECTIVE
-# define dv_serialize_data             .serialize.data
-# define dv_serialize_instruction      .serialize.instruction
-#else
-# define dv_serialize_data
-# define dv_serialize_instruction
-#endif
-
-#endif /* _ASM_IA64_ASMMACRO_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [IA64] merge in INIT patches, Xen patchbot-unstable <=