WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] merge with ia64 sn2+machvec tree

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] merge with ia64 sn2+machvec tree
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 18 Jan 2007 21:10:25 -0800
Delivery-date: Thu, 18 Jan 2007 21:11:18 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxxx
# Date 1166651702 25200
# Node ID 80c5b5914b795b28249c8f7251ecd24b53186339
# Parent  46c44b5e6a1b4d462cf02990098a63dfcc36913a
# Parent  37309be2686190a25aa11241ac1a754437faca9d
merge with ia64 sn2+machvec tree
---
 xen/include/asm-ia64/linux-null/linux/ioport.h           |    1 
 xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h           |  994 ----
 xen/include/asm-ia64/linux/asm/machvec.h                 |  390 -
 xen/include/asm-ia64/linux/asm/pci.h                     |  161 
 xen/arch/ia64/linux-xen/Makefile                         |    2 
 xen/arch/ia64/linux-xen/sn/Makefile                      |    1 
 xen/arch/ia64/linux-xen/sn/kernel/Makefile               |    5 
 xen/arch/ia64/linux-xen/sn/kernel/README.origin          |   12 
 xen/arch/ia64/linux-xen/sn/kernel/io_init.c              |  783 +++
 xen/arch/ia64/linux-xen/sn/kernel/iomv.c                 |   82 
 xen/arch/ia64/linux-xen/sn/kernel/irq.c                  |  542 ++
 xen/arch/ia64/linux-xen/sn/kernel/setup.c                |  808 +++
 xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c              |  548 ++
 xen/arch/ia64/linux/Makefile                             |    5 
 xen/arch/ia64/linux/README.origin                        |    3 
 xen/arch/ia64/linux/dig/Makefile                         |    1 
 xen/arch/ia64/linux/dig/README.origin                    |    7 
 xen/arch/ia64/linux/dig/machvec.c                        |    3 
 xen/arch/ia64/linux/hp/Makefile                          |    1 
 xen/arch/ia64/linux/hp/zx1/Makefile                      |    1 
 xen/arch/ia64/linux/hp/zx1/README.origin                 |    7 
 xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c               |    3 
 xen/arch/ia64/linux/io.c                                 |  164 
 xen/arch/ia64/linux/sn/Makefile                          |    2 
 xen/arch/ia64/linux/sn/kernel/Makefile                   |    3 
 xen/arch/ia64/linux/sn/kernel/README.origin              |    9 
 xen/arch/ia64/linux/sn/kernel/machvec.c                  |   11 
 xen/arch/ia64/linux/sn/kernel/pio_phys.S                 |   71 
 xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S             |   92 
 xen/arch/ia64/linux/sn/pci/Makefile                      |    1 
 xen/arch/ia64/linux/sn/pci/pcibr/Makefile                |    1 
 xen/arch/ia64/linux/sn/pci/pcibr/README.origin           |    7 
 xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c             |  285 +
 xen/arch/ia64/xen/irq.c                                  |    7 
 xen/arch/ia64/xen/vhpt.c                                 |    8 
 xen/include/asm-ia64/config.h                            |   23 
 xen/include/asm-ia64/linux-null/linux/dmapool.h          |    1 
 xen/include/asm-ia64/linux-null/linux/rwsem.h            |    1 
 xen/include/asm-ia64/linux-xen/asm/README.origin         |    7 
 xen/include/asm-ia64/linux-xen/asm/machvec.h             |  498 ++
 xen/include/asm-ia64/linux-xen/asm/machvec_dig.h         |   46 
 xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h       |   66 
 xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h         |  166 
 xen/include/asm-ia64/linux-xen/asm/page.h                |   10 
 xen/include/asm-ia64/linux-xen/asm/pci.h                 |  185 
 xen/include/asm-ia64/linux-xen/asm/sn/README.origin      |   16 
 xen/include/asm-ia64/linux-xen/asm/sn/addrs.h            |  299 +
 xen/include/asm-ia64/linux-xen/asm/sn/arch.h             |   92 
 xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h           |   95 
 xen/include/asm-ia64/linux-xen/asm/sn/intr.h             |   73 
 xen/include/asm-ia64/linux-xen/asm/sn/io.h               |  281 +
 xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h          |   87 
 xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h   |  153 
 xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h           |   32 
 xen/include/asm-ia64/linux-xen/asm/sn/types.h            |   28 
 xen/include/asm-ia64/linux-xen/asm/system.h              |    1 
 xen/include/asm-ia64/linux-xen/asm/types.h               |    8 
 xen/include/asm-ia64/linux-xen/linux/README.origin       |    5 
 xen/include/asm-ia64/linux-xen/linux/device.h            |  489 ++
 xen/include/asm-ia64/linux-xen/linux/kobject.h           |  286 +
 xen/include/asm-ia64/linux-xen/linux/pci.h               |  820 +++
 xen/include/asm-ia64/linux/README.origin                 |   12 
 xen/include/asm-ia64/linux/asm/README.origin             |    4 
 xen/include/asm-ia64/linux/asm/machvec_init.h            |   32 
 xen/include/asm-ia64/linux/asm/sn/README.origin          |   24 
 xen/include/asm-ia64/linux/asm/sn/geo.h                  |  132 
 xen/include/asm-ia64/linux/asm/sn/klconfig.h             |  246 +
 xen/include/asm-ia64/linux/asm/sn/l1.h                   |   51 
 xen/include/asm-ia64/linux/asm/sn/leds.h                 |   33 
 xen/include/asm-ia64/linux/asm/sn/module.h               |  127 
 xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h |   68 
 xen/include/asm-ia64/linux/asm/sn/pcidev.h               |   83 
 xen/include/asm-ia64/linux/asm/sn/pda.h                  |   69 
 xen/include/asm-ia64/linux/asm/sn/pic.h                  |  261 +
 xen/include/asm-ia64/linux/asm/sn/shub_mmr.h             |  502 ++
 xen/include/asm-ia64/linux/asm/sn/shubio.h               | 3358 +++++++++++++++
 xen/include/asm-ia64/linux/asm/sn/simulator.h            |   20 
 xen/include/asm-ia64/linux/asm/sn/sn_cpuid.h             |  132 
 xen/include/asm-ia64/linux/asm/sn/sn_feature_sets.h      |   51 
 xen/include/asm-ia64/linux/asm/sn/sn_sal.h               | 1157 +++++
 xen/include/asm-ia64/linux/asm/sn/tiocp.h                |  257 +
 xen/include/asm-ia64/linux/asm/sn/xbow.h                 |  301 +
 xen/include/asm-ia64/linux/asm/sn/xwidgetdev.h           |   70 
 xen/include/asm-ia64/linux/completion.h                  |   57 
 xen/include/asm-ia64/linux/ioport.h                      |  136 
 xen/include/asm-ia64/linux/klist.h                       |   61 
 xen/include/asm-ia64/linux/kref.h                        |   32 
 xen/include/asm-ia64/linux/mod_devicetable.h             |  323 +
 xen/include/asm-ia64/linux/pci_ids.h                     | 2356 ++++++++++
 xen/include/asm-ia64/linux/pci_regs.h                    |  488 ++
 xen/include/asm-ia64/linux/pm.h                          |  279 +
 xen/include/asm-ia64/linux/sysfs.h                       |  206 
 92 files changed, 18155 insertions(+), 1562 deletions(-)

diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux-xen/Makefile
--- a/xen/arch/ia64/linux-xen/Makefile  Wed Dec 20 08:53:42 2006 -0700
+++ b/xen/arch/ia64/linux-xen/Makefile  Wed Dec 20 14:55:02 2006 -0700
@@ -1,3 +1,5 @@ obj-y += efi.o
+subdir-y += sn
+
 obj-y += efi.o
 obj-y += entry.o
 obj-y += irq_ia64.o
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux-xen/sn/Makefile
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux-xen/sn/Makefile       Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,1 @@
+subdir-y += kernel
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux-xen/sn/kernel/Makefile
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux-xen/sn/kernel/Makefile        Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,5 @@
+obj-y += sn2_smp.o
+obj-y += setup.o
+obj-y += iomv.o
+obj-y += irq.o
+obj-y += io_init.o
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/arch/ia64/linux-xen/sn/kernel/README.origin
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux-xen/sn/kernel/README.origin   Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,12 @@
+# Source files in this directory are near-identical copies of linux-2.6.19
+# files:
+
+# NOTE: ALL changes to these files should be clearly marked
+# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
+# easily updated to future versions of the corresponding Linux files.
+
+io_init.c              -> linux/arch/ia64/sn/kernel/io_init.c
+iomv.c                 -> linux/arch/ia64/sn/kernel/iomv.c
+irq.c                  -> linux/arch/ia64/sn/kernel/irq.c
+setup.c                        -> linux/arch/ia64/sn/kernel/setup.c
+sn2_smp.c              -> linux/arch/ia64/sn/kernel/sn2/sn2_smp.c
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux-xen/sn/kernel/io_init.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux-xen/sn/kernel/io_init.c       Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,783 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights 
reserved.
+ */
+
+#include <linux/bootmem.h>
+#include <linux/nodemask.h>
+#ifdef XEN
+#include <linux/init.h>
+#endif
+#include <asm/sn/types.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/sn_feature_sets.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/io.h>
+#include <asm/sn/l1.h>
+#include <asm/sn/module.h>
+#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#ifndef XEN
+#include <asm/sn/pcidev.h>
+#endif
+#include <asm/sn/simulator.h>
+#include <asm/sn/sn_sal.h>
+#ifndef XEN
+#include <asm/sn/tioca_provider.h>
+#include <asm/sn/tioce_provider.h>
+#endif
+#ifdef XEN
+#include "asm/sn/hubdev.h"
+#include "asm/sn/xwidgetdev.h"
+#else
+#include "xtalk/hubdev.h"
+#include "xtalk/xwidgetdev.h"
+#endif
+
+
+extern void sn_init_cpei_timer(void);
+extern void register_sn_procfs(void);
+#ifdef XEN
+extern void sn_irq_lh_init(void);
+#endif
+
+static struct list_head sn_sysdata_list;
+
+/* sysdata list struct */
+struct sysdata_el {
+       struct list_head entry;
+       void *sysdata;
+};
+
+struct slab_info {
+       struct hubdev_info hubdev;
+};
+
+struct brick {
+       moduleid_t id;          /* Module ID of this module        */
+       struct slab_info slab_info[MAX_SLABS + 1];
+};
+
+int sn_ioif_inited;            /* SN I/O infrastructure initialized? */
+
+struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES];      /* 
indexed by asic type */
+
+#ifndef XEN
+static int max_segment_number;          /* Default highest segment number */
+static int max_pcibus_number = 255;    /* Default highest pci bus number */
+
+/*
+ * Hooks and struct for unsupported pci providers
+ */
+
+static dma_addr_t
+sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int 
type)
+{
+       return 0;
+}
+
+static void
+sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
+{
+       return;
+}
+
+static void *
+sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller 
*controller)
+{
+       return NULL;
+}
+
+static struct sn_pcibus_provider sn_pci_default_provider = {
+       .dma_map = sn_default_pci_map,
+       .dma_map_consistent = sn_default_pci_map,
+       .dma_unmap = sn_default_pci_unmap,
+       .bus_fixup = sn_default_pci_bus_fixup,
+};
+#endif
+
+/*
+ * Retrieve the DMA Flush List given nasid, widget, and device.
+ * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
+ */
+static inline u64
+sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
+                            u64 address)
+{
+       struct ia64_sal_retval ret_stuff;
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+
+       SAL_CALL_NOLOCK(ret_stuff,
+                       (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
+                       (u64) nasid, (u64) widget_num,
+                       (u64) device_num, (u64) address, 0, 0, 0);
+       return ret_stuff.status;
+}
+
+/*
+ * Retrieve the hub device info structure for the given nasid.
+ */
+static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
+{
+       struct ia64_sal_retval ret_stuff;
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+
+       SAL_CALL_NOLOCK(ret_stuff,
+                       (u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
+                       (u64) handle, (u64) address, 0, 0, 0, 0, 0);
+       return ret_stuff.v0;
+}
+
+/*
+ * Retrieve the pci bus information given the bus number.
+ */
+static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
+{
+       struct ia64_sal_retval ret_stuff;
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+
+       SAL_CALL_NOLOCK(ret_stuff,
+                       (u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
+                       (u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
+       return ret_stuff.v0;
+}
+
+#ifndef XEN
+/*
+ * Retrieve the pci device information given the bus and device|function 
number.
+ */
+static inline u64
+sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
+                   u64 sn_irq_info)
+{
+       struct ia64_sal_retval ret_stuff;
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+
+       SAL_CALL_NOLOCK(ret_stuff,
+                       (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
+                       (u64) segment, (u64) bus_number, (u64) devfn,
+                       (u64) pci_dev,
+                       sn_irq_info, 0, 0);
+       return ret_stuff.v0;
+}
+
+/*
+ * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
+ *                       device.
+ */
+inline struct pcidev_info *
+sn_pcidev_info_get(struct pci_dev *dev)
+{
+       struct pcidev_info *pcidev;
+
+       list_for_each_entry(pcidev,
+                           &(SN_PCI_CONTROLLER(dev)->pcidev_info), pdi_list) {
+               if (pcidev->pdi_linux_pcidev == dev) {
+                       return pcidev;
+               }
+       }
+       return NULL;
+}
+
+/* Older PROM flush WAR
+ *
+ * 01/16/06 -- This war will be in place until a new official PROM is released.
+ * Additionally note that the struct sn_flush_device_war also has to be
+ * removed from arch/ia64/sn/include/xtalk/hubdev.h
+ */
+static u8 war_implemented = 0;
+
+static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
+                              struct sn_flush_device_common *common)
+{
+       struct sn_flush_device_war *war_list;
+       struct sn_flush_device_war *dev_entry;
+       struct ia64_sal_retval isrv = {0,0,0,0};
+
+       if (!war_implemented) {
+               printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
+                      "PROM flush WAR\n");
+               war_implemented = 1;
+       }
+
+       war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
+       if (!war_list)
+               BUG();
+
+       SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
+                       nasid, widget, __pa(war_list), 0, 0, 0 ,0);
+       if (isrv.status)
+               panic("sn_device_fixup_war failed: %s\n",
+                     ia64_sal_strerror(isrv.status));
+
+       dev_entry = war_list + device;
+       memcpy(common,dev_entry, sizeof(*common));
+       kfree(war_list);
+
+       return isrv.status;
+}
+
+/*
+ * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
+ *     each node in the system.
+ */
+static void __init sn_fixup_ionodes(void)
+{
+       struct sn_flush_device_kernel *sn_flush_device_kernel;
+       struct sn_flush_device_kernel *dev_entry;
+       struct hubdev_info *hubdev;
+       u64 status;
+       u64 nasid;
+       int i, widget, device, size;
+
+       /*
+        * Get SGI Specific HUB chipset information.
+        * Inform Prom that this kernel can support domain bus numbering.
+        */
+       for (i = 0; i < num_cnodes; i++) {
+               hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
+               nasid = cnodeid_to_nasid(i);
+               hubdev->max_segment_number = 0xffffffff;
+               hubdev->max_pcibus_number = 0xff;
+               status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev));
+               if (status)
+                       continue;
+
+               /* Save the largest Domain and pcibus numbers found. */
+               if (hubdev->max_segment_number) {
+                       /*
+                        * Dealing with a Prom that supports segments.
+                        */
+                       max_segment_number = hubdev->max_segment_number;
+                       max_pcibus_number = hubdev->max_pcibus_number;
+               }
+
+               /* Attach the error interrupt handlers */
+               if (nasid & 1)
+                       ice_error_init(hubdev);
+               else
+                       hub_error_init(hubdev);
+
+               for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
+                       hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
+
+               if (!hubdev->hdi_flush_nasid_list.widget_p)
+                       continue;
+
+               size = (HUB_WIDGET_ID_MAX + 1) *
+                       sizeof(struct sn_flush_device_kernel *);
+               hubdev->hdi_flush_nasid_list.widget_p =
+                       kzalloc(size, GFP_KERNEL);
+               if (!hubdev->hdi_flush_nasid_list.widget_p)
+                       BUG();
+
+               for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
+                       size = DEV_PER_WIDGET *
+                               sizeof(struct sn_flush_device_kernel);
+                       sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
+                       if (!sn_flush_device_kernel)
+                               BUG();
+
+                       dev_entry = sn_flush_device_kernel;
+                       for (device = 0; device < DEV_PER_WIDGET;
+                            device++,dev_entry++) {
+                               size = sizeof(struct sn_flush_device_common);
+                               dev_entry->common = kzalloc(size, GFP_KERNEL);
+                               if (!dev_entry->common)
+                                       BUG();
+
+                               if (sn_prom_feature_available(
+                                                      PRF_DEVICE_FLUSH_LIST))
+                                       status = sal_get_device_dmaflush_list(
+                                                    nasid, widget, device,
+                                                    (u64)(dev_entry->common));
+                               else
+#ifdef XEN
+                                       BUG();
+#else
+                                       status = sn_device_fixup_war(nasid,
+                                                    widget, device,
+                                                    dev_entry->common);
+#endif
+                               if (status != SALRET_OK)
+                                       panic("SAL call failed: %s\n",
+                                             ia64_sal_strerror(status));
+
+                               spin_lock_init(&dev_entry->sfdl_flush_lock);
+                       }
+
+                       if (sn_flush_device_kernel)
+                               hubdev->hdi_flush_nasid_list.widget_p[widget] =
+                                                      sn_flush_device_kernel;
+               }
+       }
+}
+
+/*
+ * sn_pci_window_fixup() - Create a pci_window for each device resource.
+ *                        Until ACPI support is added, we need this code
+ *                        to setup pci_windows for use by
+ *                        pcibios_bus_to_resource(),
+ *                        pcibios_resource_to_bus(), etc.
+ */
+static void
+sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
+                   s64 * pci_addrs)
+{
+       struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
+       unsigned int i;
+       unsigned int idx;
+       unsigned int new_count;
+       struct pci_window *new_window;
+
+       if (count == 0)
+               return;
+       idx = controller->windows;
+       new_count = controller->windows + count;
+       new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
+       if (new_window == NULL)
+               BUG();
+       if (controller->window) {
+               memcpy(new_window, controller->window,
+                      sizeof(struct pci_window) * controller->windows);
+               kfree(controller->window);
+       }
+
+       /* Setup a pci_window for each device resource. */
+       for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+               if (pci_addrs[i] == -1)
+                       continue;
+
+               new_window[idx].offset = dev->resource[i].start - pci_addrs[i];
+               new_window[idx].resource = dev->resource[i];
+               idx++;
+       }
+
+       controller->windows = new_count;
+       controller->window = new_window;
+}
+
+void sn_pci_unfixup_slot(struct pci_dev *dev)
+{
+       struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
+
+       sn_irq_unfixup(dev);
+       pci_dev_put(host_pci_dev);
+       pci_dev_put(dev);
+}
+
+/*
+ * sn_pci_fixup_slot() - This routine sets up a slot's resources
+ * consistent with the Linux PCI abstraction layer.  Resources acquired
+ * from our PCI provider include PIO maps to BAR space and interrupt
+ * objects.
+ */
+void sn_pci_fixup_slot(struct pci_dev *dev)
+{
+       unsigned int count = 0;
+       int idx;
+       int segment = pci_domain_nr(dev->bus);
+       int status = 0;
+       struct pcibus_bussoft *bs;
+       struct pci_bus *host_pci_bus;
+       struct pci_dev *host_pci_dev;
+       struct pcidev_info *pcidev_info;
+       s64 pci_addrs[PCI_ROM_RESOURCE + 1];
+       struct sn_irq_info *sn_irq_info;
+       unsigned long size;
+       unsigned int bus_no, devfn;
+
+       pci_dev_get(dev); /* for the sysdata pointer */
+       pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
+       if (!pcidev_info)
+               BUG();          /* Cannot afford to run out of memory */
+
+       sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
+       if (!sn_irq_info)
+               BUG();          /* Cannot afford to run out of memory */
+
+       /* Call to retrieve pci device information needed by kernel. */
+       status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number, 
+                                    dev->devfn,
+                                    (u64) __pa(pcidev_info),
+                                    (u64) __pa(sn_irq_info));
+       if (status)
+               BUG(); /* Cannot get platform pci device information */
+
+       /* Add pcidev_info to list in sn_pci_controller struct */
+       list_add_tail(&pcidev_info->pdi_list,
+                     &(SN_PCI_CONTROLLER(dev->bus)->pcidev_info));
+
+       /* Copy over PIO Mapped Addresses */
+       for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
+               unsigned long start, end, addr;
+
+               if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
+                       pci_addrs[idx] = -1;
+                       continue;
+               }
+
+               start = dev->resource[idx].start;
+               end = dev->resource[idx].end;
+               size = end - start;
+               if (size == 0) {
+                       pci_addrs[idx] = -1;
+                       continue;
+               }
+               pci_addrs[idx] = start;
+               count++;
+               addr = pcidev_info->pdi_pio_mapped_addr[idx];
+               addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
+               dev->resource[idx].start = addr;
+               dev->resource[idx].end = addr + size;
+               if (dev->resource[idx].flags & IORESOURCE_IO)
+                       dev->resource[idx].parent = &ioport_resource;
+               else
+                       dev->resource[idx].parent = &iomem_resource;
+       }
+       /* Create a pci_window in the pci_controller struct for
+        * each device resource.
+        */
+       if (count > 0)
+               sn_pci_window_fixup(dev, count, pci_addrs);
+
+       /*
+        * Using the PROMs values for the PCI host bus, get the Linux
+        * PCI host_pci_dev struct and set up host bus linkages
+        */
+
+       bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
+       devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
+       host_pci_bus = pci_find_bus(segment, bus_no);
+       host_pci_dev = pci_get_slot(host_pci_bus, devfn);
+
+       pcidev_info->host_pci_dev = host_pci_dev;
+       pcidev_info->pdi_linux_pcidev = dev;
+       pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
+       bs = SN_PCIBUS_BUSSOFT(dev->bus);
+       pcidev_info->pdi_pcibus_info = bs;
+
+       if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
+               SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
+       } else {
+               SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
+       }
+
+       /* Only set up IRQ stuff if this device has a host bus context */
+       if (bs && sn_irq_info->irq_irq) {
+               pcidev_info->pdi_sn_irq_info = sn_irq_info;
+               dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
+               sn_irq_fixup(dev, sn_irq_info);
+       } else {
+               pcidev_info->pdi_sn_irq_info = NULL;
+               kfree(sn_irq_info);
+       }
+}
+
+/*
+ * sn_pci_controller_fixup() - This routine sets up a bus's resources
+ * consistent with the Linux PCI abstraction layer.
+ */
+void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
+{
+       int status;
+       int nasid, cnode;
+       struct pci_controller *controller;
+       struct sn_pci_controller *sn_controller;
+       struct pcibus_bussoft *prom_bussoft_ptr;
+       struct hubdev_info *hubdev_info;
+       void *provider_soft;
+       struct sn_pcibus_provider *provider;
+
+       status = sal_get_pcibus_info((u64) segment, (u64) busnum,
+                                    (u64) ia64_tpa(&prom_bussoft_ptr));
+       if (status > 0)
+               return;         /*bus # does not exist */
+       prom_bussoft_ptr = __va(prom_bussoft_ptr);
+
+       /* Allocate a sn_pci_controller, which has a pci_controller struct
+        * as the first member.
+        */
+       sn_controller = kzalloc(sizeof(struct sn_pci_controller), GFP_KERNEL);
+       if (!sn_controller)
+               BUG();
+       INIT_LIST_HEAD(&sn_controller->pcidev_info);
+       controller = &sn_controller->pci_controller;
+       controller->segment = segment;
+
+       if (bus == NULL) {
+               bus = pci_scan_bus(busnum, &pci_root_ops, controller);
+               if (bus == NULL)
+                       goto error_return; /* error, or bus already scanned */
+               bus->sysdata = NULL;
+       }
+
+       if (bus->sysdata)
+               goto error_return; /* sysdata already alloc'd */
+
+       /*
+        * Per-provider fixup.  Copies the contents from prom to local
+        * area and links SN_PCIBUS_BUSSOFT().
+        */
+
+       if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES)
+               goto error_return; /* unsupported asic type */
+
+       if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
+               goto error_return; /* no further fixup necessary */
+
+       provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
+       if (provider == NULL)
+               goto error_return; /* no provider registerd for this asic */
+
+       bus->sysdata = controller;
+       if (provider->bus_fixup)
+               provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, 
controller);
+       else
+               provider_soft = NULL;
+
+       if (provider_soft == NULL) {
+               /* fixup failed or not applicable */
+               bus->sysdata = NULL;
+               goto error_return;
+       }
+
+       /*
+        * Setup pci_windows for legacy IO and MEM space.
+        * (Temporary until ACPI support is in place.)
+        */
+       controller->window = kcalloc(2, sizeof(struct pci_window), GFP_KERNEL);
+       if (controller->window == NULL)
+               BUG();
+       controller->window[0].offset = prom_bussoft_ptr->bs_legacy_io;
+       controller->window[0].resource.name = "legacy_io";
+       controller->window[0].resource.flags = IORESOURCE_IO;
+       controller->window[0].resource.start = prom_bussoft_ptr->bs_legacy_io;
+       controller->window[0].resource.end =
+           controller->window[0].resource.start + 0xffff;
+       controller->window[0].resource.parent = &ioport_resource;
+       controller->window[1].offset = prom_bussoft_ptr->bs_legacy_mem;
+       controller->window[1].resource.name = "legacy_mem";
+       controller->window[1].resource.flags = IORESOURCE_MEM;
+       controller->window[1].resource.start = prom_bussoft_ptr->bs_legacy_mem;
+       controller->window[1].resource.end =
+           controller->window[1].resource.start + (1024 * 1024) - 1;
+       controller->window[1].resource.parent = &iomem_resource;
+       controller->windows = 2;
+
+       /*
+        * Generic bus fixup goes here.  Don't reference prom_bussoft_ptr
+        * after this point.
+        */
+
+       PCI_CONTROLLER(bus)->platform_data = provider_soft;
+       nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
+       cnode = nasid_to_cnodeid(nasid);
+       hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+       SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
+           &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
+
+       /*
+        * If the node information we obtained during the fixup phase is invalid
+        * then set controller->node to -1 (undetermined)
+        */
+       if (controller->node >= num_online_nodes()) {
+               struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
+
+               printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
+                                   "L_IO=%lx L_MEM=%lx BASE=%lx\n",
+                       b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
+                       b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
+               printk(KERN_WARNING "on node %d but only %d nodes online."
+                       "Association set to undetermined.\n",
+                       controller->node, num_online_nodes());
+               controller->node = -1;
+       }
+       return;
+
+error_return:
+
+       kfree(sn_controller);
+       return;
+}
+
+void sn_bus_store_sysdata(struct pci_dev *dev)
+{
+       struct sysdata_el *element;
+
+       element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
+       if (!element) {
+               dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
+               return;
+       }
+       element->sysdata = SN_PCIDEV_INFO(dev);
+       list_add(&element->entry, &sn_sysdata_list);
+}
+
+void sn_bus_free_sysdata(void)
+{
+       struct sysdata_el *element;
+       struct list_head *list, *safe;
+
+       list_for_each_safe(list, safe, &sn_sysdata_list) {
+               element = list_entry(list, struct sysdata_el, entry);
+               list_del(&element->entry);
+               list_del(&(((struct pcidev_info *)
+                            (element->sysdata))->pdi_list));
+               kfree(element->sysdata);
+               kfree(element);
+       }
+       return;
+}
+#endif
+
+/*
+ * Ugly hack to get PCI setup until we have a proper ACPI namespace.
+ */
+
+#define PCI_BUSES_TO_SCAN 256
+
+static int __init sn_pci_init(void)
+{
+#ifndef XEN
+       int i, j;
+       struct pci_dev *pci_dev = NULL;
+#endif
+
+       if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
+               return 0;
+
+#ifndef XEN
+       /*
+        * prime sn_pci_provider[].  Individial provider init routines will
+        * override their respective default entries.
+        */
+
+       for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
+               sn_pci_provider[i] = &sn_pci_default_provider;
+
+       pcibr_init_provider();
+       tioca_init_provider();
+       tioce_init_provider();
+#endif
+
+       /*
+        * This is needed to avoid bounce limit checks in the blk layer
+        */
+       ia64_max_iommu_merge_mask = ~PAGE_MASK;
+#ifndef XEN
+       sn_fixup_ionodes();
+#endif
+       sn_irq_lh_init();
+       INIT_LIST_HEAD(&sn_sysdata_list);
+#ifndef XEN
+       sn_init_cpei_timer();
+
+#ifdef CONFIG_PROC_FS
+       register_sn_procfs();
+#endif
+
+       /* busses are not known yet ... */
+       for (i = 0; i <= max_segment_number; i++)
+               for (j = 0; j <= max_pcibus_number; j++)
+                       sn_pci_controller_fixup(i, j, NULL);
+
+       /*
+        * Generic Linux PCI Layer has created the pci_bus and pci_dev 
+        * structures - time for us to add our SN PLatform specific 
+        * information.
+        */
+
+       while ((pci_dev =
+               pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL)
+               sn_pci_fixup_slot(pci_dev);
+#endif
+
+       sn_ioif_inited = 1;     /* sn I/O infrastructure now initialized */
+
+       return 0;
+}
+
+/*
+ * hubdev_init_node() - Creates the HUB data structure and link them to it's 
+ *     own NODE specific data area.
+ */
+void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
+{
+       struct hubdev_info *hubdev_info;
+       int size;
+#ifndef XEN
+       pg_data_t *pg;
+#else
+       struct pglist_data *pg;
+#endif
+
+       size = sizeof(struct hubdev_info);
+
+       if (node >= num_online_nodes()) /* Headless/memless IO nodes */
+               pg = NODE_DATA(0);
+       else
+               pg = NODE_DATA(node);
+
+       hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
+
+       npda->pdinfo = (void *)hubdev_info;
+}
+
+geoid_t
+cnodeid_get_geoid(cnodeid_t cnode)
+{
+       struct hubdev_info *hubdev;
+
+       hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+       return hubdev->hdi_geoid;
+}
+
+#ifndef XEN
+void sn_generate_path(struct pci_bus *pci_bus, char *address)
+{
+       nasid_t nasid;
+       cnodeid_t cnode;
+       geoid_t geoid;
+       moduleid_t moduleid;
+       u16 bricktype;
+
+       nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
+       cnode = nasid_to_cnodeid(nasid);
+       geoid = cnodeid_get_geoid(cnode);
+       moduleid = geo_module(geoid);
+
+       sprintf(address, "module_%c%c%c%c%.2d",
+               '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
+               '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
+               '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
+               MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
+
+       /* Tollhouse requires slot id to be displayed */
+       bricktype = MODULE_GET_BTYPE(moduleid);
+       if ((bricktype == L1_BRICKTYPE_191010) ||
+           (bricktype == L1_BRICKTYPE_1932))
+                       sprintf(address, "%s^%d", address, geo_slot(geoid));
+}
+#endif
+
+#ifdef XEN
+__initcall(sn_pci_init);
+#else
+subsys_initcall(sn_pci_init);
+#endif
+#ifndef XEN
+EXPORT_SYMBOL(sn_pci_fixup_slot);
+EXPORT_SYMBOL(sn_pci_unfixup_slot);
+EXPORT_SYMBOL(sn_pci_controller_fixup);
+EXPORT_SYMBOL(sn_bus_store_sysdata);
+EXPORT_SYMBOL(sn_bus_free_sysdata);
+EXPORT_SYMBOL(sn_generate_path);
+#endif
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux-xen/sn/kernel/iomv.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux-xen/sn/kernel/iomv.c  Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,82 @@
+/* 
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#ifndef XEN
+#include <asm/vga.h>
+#endif
+#include <asm/sn/nodepda.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/shub_mmr.h>
+
+#define IS_LEGACY_VGA_IOPORT(p) \
+       (((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df))
+
+#ifdef XEN
+#define vga_console_iobase     0
+#endif
+
+/**
+ * sn_io_addr - convert an in/out port to an i/o address
+ * @port: port to convert
+ *
+ * Legacy in/out instructions are converted to ld/st instructions
+ * on IA64.  This routine will convert a port number into a valid 
+ * SN i/o address.  Used by sn_in*() and sn_out*().
+ */
+void *sn_io_addr(unsigned long port)
+{
+       if (!IS_RUNNING_ON_SIMULATOR()) {
+               if (IS_LEGACY_VGA_IOPORT(port))
+                       port += vga_console_iobase;
+               /* On sn2, legacy I/O ports don't point at anything */
+               if (port < (64 * 1024))
+                       return NULL;
+               return ((void *)(port | __IA64_UNCACHED_OFFSET));
+       } else {
+               /* but the simulator uses them... */
+               unsigned long addr;
+
+               /*
+                * word align port, but need more than 10 bits
+                * for accessing registers in bedrock local block
+                * (so we don't do port&0xfff)
+                */
+               addr = (is_shub2() ? 0xc00000028c000000UL : 
0xc0000087cc000000UL) | ((port >> 2) << 12);
+               if ((port >= 0x1f0 && port <= 0x1f7) || port == 0x3f6 || port 
== 0x3f7)
+                       addr |= port;
+               return (void *)addr;
+       }
+}
+
+EXPORT_SYMBOL(sn_io_addr);
+
+/**
+ * __sn_mmiowb - I/O space memory barrier
+ *
+ * See include/asm-ia64/io.h and Documentation/DocBook/deviceiobook.tmpl
+ * for details.
+ *
+ * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
+ * See PV 871084 for details about the WAR about zero value.
+ *
+ */
+void __sn_mmiowb(void)
+{
+       volatile unsigned long *adr = pda->pio_write_status_addr;
+       unsigned long val = pda->pio_write_status_val;
+
+       while ((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val)
+               cpu_relax();
+}
+
+EXPORT_SYMBOL(__sn_mmiowb);
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux-xen/sn/kernel/irq.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux-xen/sn/kernel/irq.c   Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,542 @@
+/*
+ * Platform dependent support for SGI SN
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#ifdef XEN
+#include <linux/pci.h>
+#include <asm/hw_irq.h>
+#endif
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#ifndef XEN
+#include <asm/sn/pcidev.h>
+#endif
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/sn_sal.h>
+
+#ifdef XEN
+#define move_native_irq(foo)   do {} while(0)
+#endif
+
+static void force_interrupt(int irq);
+#ifndef XEN
+static void register_intr_pda(struct sn_irq_info *sn_irq_info);
+static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
+#endif
+
+int sn_force_interrupt_flag = 1;
+extern int sn_ioif_inited;
+struct list_head **sn_irq_lh;
+static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
+
+u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
+                                    struct sn_irq_info *sn_irq_info,
+                                    int req_irq, nasid_t req_nasid,
+                                    int req_slice)
+{
+       struct ia64_sal_retval ret_stuff;
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+
+       SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
+                       (u64) SAL_INTR_ALLOC, (u64) local_nasid,
+                       (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
+                       (u64) req_nasid, (u64) req_slice);
+
+       return ret_stuff.status;
+}
+
+void sn_intr_free(nasid_t local_nasid, int local_widget,
+                               struct sn_irq_info *sn_irq_info)
+{
+       struct ia64_sal_retval ret_stuff;
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+
+       SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
+                       (u64) SAL_INTR_FREE, (u64) local_nasid,
+                       (u64) local_widget, (u64) sn_irq_info->irq_irq,
+                       (u64) sn_irq_info->irq_cookie, 0, 0);
+}
+
+static unsigned int sn_startup_irq(unsigned int irq)
+{
+       return 0;
+}
+
+static void sn_shutdown_irq(unsigned int irq)
+{
+}
+
+static void sn_disable_irq(unsigned int irq)
+{
+}
+
+static void sn_enable_irq(unsigned int irq)
+{
+}
+
+static void sn_ack_irq(unsigned int irq)
+{
+       u64 event_occurred, mask;
+
+       irq = irq & 0xff;
+       event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
+       mask = event_occurred & SH_ALL_INT_MASK;
+       HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
+       __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
+
+       move_native_irq(irq);
+}
+
+static void sn_end_irq(unsigned int irq)
+{
+       int ivec;
+       u64 event_occurred;
+
+       ivec = irq & 0xff;
+       if (ivec == SGI_UART_VECTOR) {
+               event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR 
(SH_EVENT_OCCURRED));
+               /* If the UART bit is set here, we may have received an
+                * interrupt from the UART that the driver missed.  To
+                * make sure, we IPI ourselves to force us to look again.
+                */
+               if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
+                       platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
+                                         IA64_IPI_DM_INT, 0);
+               }
+       }
+       __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
+       if (sn_force_interrupt_flag)
+               force_interrupt(irq);
+}
+
+#ifndef XEN
+static void sn_irq_info_free(struct rcu_head *head);
+
+struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
+                                      nasid_t nasid, int slice)
+{
+       int vector;
+       int cpuphys;
+       int64_t bridge;
+       int local_widget, status;
+       nasid_t local_nasid;
+       struct sn_irq_info *new_irq_info;
+       struct sn_pcibus_provider *pci_provider;
+
+       new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
+       if (new_irq_info == NULL)
+               return NULL;
+
+       memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
+
+       bridge = (u64) new_irq_info->irq_bridge;
+       if (!bridge) {
+               kfree(new_irq_info);
+               return NULL; /* irq is not a device interrupt */
+       }
+
+       local_nasid = NASID_GET(bridge);
+
+       if (local_nasid & 1)
+               local_widget = TIO_SWIN_WIDGETNUM(bridge);
+       else
+               local_widget = SWIN_WIDGETNUM(bridge);
+
+       vector = sn_irq_info->irq_irq;
+       /* Free the old PROM new_irq_info structure */
+       sn_intr_free(local_nasid, local_widget, new_irq_info);
+       /* Update kernels new_irq_info with new target info */
+       unregister_intr_pda(new_irq_info);
+
+       /* allocate a new PROM new_irq_info struct */
+       status = sn_intr_alloc(local_nasid, local_widget,
+                              new_irq_info, vector,
+                              nasid, slice);
+
+       /* SAL call failed */
+       if (status) {
+               kfree(new_irq_info);
+               return NULL;
+       }
+
+       cpuphys = nasid_slice_to_cpuid(nasid, slice);
+       new_irq_info->irq_cpuid = cpuphys;
+       register_intr_pda(new_irq_info);
+
+       pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
+
+       /*
+        * If this represents a line interrupt, target it.  If it's
+        * an msi (irq_int_bit < 0), it's already targeted.
+        */
+       if (new_irq_info->irq_int_bit >= 0 &&
+           pci_provider && pci_provider->target_interrupt)
+               (pci_provider->target_interrupt)(new_irq_info);
+
+       spin_lock(&sn_irq_info_lock);
+#ifdef XEN
+       list_replace(&sn_irq_info->list, &new_irq_info->list);
+#else
+       list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
+#endif
+       spin_unlock(&sn_irq_info_lock);
+#ifndef XEN
+       call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+#endif
+
+#ifdef CONFIG_SMP
+       set_irq_affinity_info((vector & 0xff), cpuphys, 0);
+#endif
+
+       return new_irq_info;
+}
+
+static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
+{
+       struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
+       nasid_t nasid;
+       int slice;
+
+       nasid = cpuid_to_nasid(first_cpu(mask));
+       slice = cpuid_to_slice(first_cpu(mask));
+
+       list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
+                                sn_irq_lh[irq], list)
+               (void)sn_retarget_vector(sn_irq_info, nasid, slice);
+}
+#endif
+
+struct hw_interrupt_type irq_type_sn = {
+#ifndef XEN
+       .name           = "SN hub",
+#endif
+       .startup        = sn_startup_irq,
+       .shutdown       = sn_shutdown_irq,
+       .enable         = sn_enable_irq,
+       .disable        = sn_disable_irq,
+       .ack            = sn_ack_irq,
+       .end            = sn_end_irq,
+#ifndef XEN
+       .set_affinity   = sn_set_affinity_irq
+#endif
+};
+
+unsigned int sn_local_vector_to_irq(u8 vector)
+{
+       return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
+}
+
+void sn_irq_init(void)
+{
+#ifndef XEN
+       int i;
+       irq_desc_t *base_desc = irq_desc;
+
+       ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
+       ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
+
+       for (i = 0; i < NR_IRQS; i++) {
+               if (base_desc[i].chip == &no_irq_type) {
+                       base_desc[i].chip = &irq_type_sn;
+               }
+       }
+#endif
+}
+
+#ifndef XEN
+static void register_intr_pda(struct sn_irq_info *sn_irq_info)
+{
+       int irq = sn_irq_info->irq_irq;
+       int cpu = sn_irq_info->irq_cpuid;
+
+       if (pdacpu(cpu)->sn_last_irq < irq) {
+               pdacpu(cpu)->sn_last_irq = irq;
+       }
+
+       if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq)
+               pdacpu(cpu)->sn_first_irq = irq;
+}
+
+static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
+{
+       int irq = sn_irq_info->irq_irq;
+       int cpu = sn_irq_info->irq_cpuid;
+       struct sn_irq_info *tmp_irq_info;
+       int i, foundmatch;
+
+#ifndef XEN
+       rcu_read_lock();
+#else
+       spin_lock(&sn_irq_info_lock);
+#endif
+       if (pdacpu(cpu)->sn_last_irq == irq) {
+               foundmatch = 0;
+               for (i = pdacpu(cpu)->sn_last_irq - 1;
+                    i && !foundmatch; i--) {
+#ifdef XEN
+                       list_for_each_entry(tmp_irq_info,
+                                               sn_irq_lh[i],
+                                               list) {
+#else
+                       list_for_each_entry_rcu(tmp_irq_info,
+                                               sn_irq_lh[i],
+                                               list) {
+#endif
+                               if (tmp_irq_info->irq_cpuid == cpu) {
+                                       foundmatch = 1;
+                                       break;
+                               }
+                       }
+               }
+               pdacpu(cpu)->sn_last_irq = i;
+       }
+
+       if (pdacpu(cpu)->sn_first_irq == irq) {
+               foundmatch = 0;
+               for (i = pdacpu(cpu)->sn_first_irq + 1;
+                    i < NR_IRQS && !foundmatch; i++) {
+#ifdef XEN
+                       list_for_each_entry(tmp_irq_info,
+                                               sn_irq_lh[i],
+                                               list) {
+#else
+                       list_for_each_entry_rcu(tmp_irq_info,
+                                               sn_irq_lh[i],
+                                               list) {
+#endif
+                               if (tmp_irq_info->irq_cpuid == cpu) {
+                                       foundmatch = 1;
+                                       break;
+                               }
+                       }
+               }
+               pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
+       }
+#ifndef XEN
+       rcu_read_unlock();
+#else
+       spin_unlock(&sn_irq_info_lock);
+#endif
+}
+#endif /* XEN */
+
+#ifndef XEN
+static void sn_irq_info_free(struct rcu_head *head)
+{
+       struct sn_irq_info *sn_irq_info;
+
+       sn_irq_info = container_of(head, struct sn_irq_info, rcu);
+       kfree(sn_irq_info);
+}
+#endif
+
+#ifndef XEN
+void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
+{
+       nasid_t nasid = sn_irq_info->irq_nasid;
+       int slice = sn_irq_info->irq_slice;
+       int cpu = nasid_slice_to_cpuid(nasid, slice);
+
+       pci_dev_get(pci_dev);
+       sn_irq_info->irq_cpuid = cpu;
+       sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
+
+       /* link it into the sn_irq[irq] list */
+       spin_lock(&sn_irq_info_lock);
+#ifdef XEN
+       list_add(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+#else
+       list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+#endif
+#ifndef XEN
+       reserve_irq_vector(sn_irq_info->irq_irq);
+#endif
+       spin_unlock(&sn_irq_info_lock);
+
+       register_intr_pda(sn_irq_info);
+}
+
+void sn_irq_unfixup(struct pci_dev *pci_dev)
+{
+       struct sn_irq_info *sn_irq_info;
+
+       /* Only cleanup IRQ stuff if this device has a host bus context */
+       if (!SN_PCIDEV_BUSSOFT(pci_dev))
+               return;
+
+       sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
+       if (!sn_irq_info)
+               return;
+       if (!sn_irq_info->irq_irq) {
+               kfree(sn_irq_info);
+               return;
+       }
+
+       unregister_intr_pda(sn_irq_info);
+       spin_lock(&sn_irq_info_lock);
+#ifdef XEN
+       list_del(&sn_irq_info->list);
+#else
+       list_del_rcu(&sn_irq_info->list);
+#endif
+       spin_unlock(&sn_irq_info_lock);
+       if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
+               free_irq_vector(sn_irq_info->irq_irq);
+#ifndef XEN
+       call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+#endif
+       pci_dev_put(pci_dev);
+
+}
+#endif
+
+static inline void
+sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
+{
+       struct sn_pcibus_provider *pci_provider;
+
+       pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
+       if (pci_provider && pci_provider->force_interrupt)
+               (*pci_provider->force_interrupt)(sn_irq_info);
+}
+
+static void force_interrupt(int irq)
+{
+       struct sn_irq_info *sn_irq_info;
+
+#ifndef XEN
+       if (!sn_ioif_inited)
+               return;
+#endif
+
+#ifdef XEN
+       spin_lock(&sn_irq_info_lock);
+#else
+       rcu_read_lock();
+#endif
+#ifdef XEN
+       list_for_each_entry(sn_irq_info, sn_irq_lh[irq], list)
+#else
+       list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
+#endif
+               sn_call_force_intr_provider(sn_irq_info);
+
+#ifdef XEN
+       spin_unlock(&sn_irq_info_lock);
+#else
+       rcu_read_unlock();
+#endif
+}
+
+#ifndef XEN
+/*
+ * Check for lost interrupts.  If the PIC int_status reg. says that
+ * an interrupt has been sent, but not handled, and the interrupt
+ * is not pending in either the cpu irr regs or in the soft irr regs,
+ * and the interrupt is not in service, then the interrupt may have
+ * been lost.  Force an interrupt on that pin.  It is possible that
+ * the interrupt is in flight, so we may generate a spurious interrupt,
+ * but we should never miss a real lost interrupt.
+ */
+static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
+{
+       u64 regval;
+       struct pcidev_info *pcidev_info;
+       struct pcibus_info *pcibus_info;
+
+       /*
+        * Bridge types attached to TIO (anything but PIC) do not need this WAR
+        * since they do not target Shub II interrupt registers.  If that
+        * ever changes, this check needs to accomodate.
+        */
+       if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
+               return;
+
+       pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+       if (!pcidev_info)
+               return;
+
+       pcibus_info =
+           (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
+           pdi_pcibus_info;
+       regval = pcireg_intr_status_get(pcibus_info);
+
+       if (!ia64_get_irr(irq_to_vector(irq))) {
+               if (!test_bit(irq, pda->sn_in_service_ivecs)) {
+                       regval &= 0xff;
+                       if (sn_irq_info->irq_int_bit & regval &
+                           sn_irq_info->irq_last_intr) {
+                               regval &= ~(sn_irq_info->irq_int_bit & regval);
+                               sn_call_force_intr_provider(sn_irq_info);
+                       }
+               }
+       }
+       sn_irq_info->irq_last_intr = regval;
+}
+#endif
+
+void sn_lb_int_war_check(void)
+{
+#ifndef XEN
+       struct sn_irq_info *sn_irq_info;
+       int i;
+
+#ifdef XEN
+       if (pda->sn_first_irq == 0)
+#else
+       if (!sn_ioif_inited || pda->sn_first_irq == 0)
+#endif
+               return;
+
+#ifdef XEN
+       spin_lock(&sn_irq_info_lock);
+#else
+       rcu_read_lock();
+#endif
+       for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
+#ifdef XEN
+               list_for_each_entry(sn_irq_info, sn_irq_lh[i], list) {
+#else
+               list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
+#endif
+                       sn_check_intr(i, sn_irq_info);
+               }
+       }
+#ifdef XEN
+       spin_unlock(&sn_irq_info_lock);
+#else
+       rcu_read_unlock();
+#endif
+#endif
+}
+
+void __init sn_irq_lh_init(void)
+{
+       int i;
+
+       sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL);
+       if (!sn_irq_lh)
+               panic("SN PCI INIT: Failed to allocate memory for PCI init\n");
+
+       for (i = 0; i < NR_IRQS; i++) {
+               sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+               if (!sn_irq_lh[i])
+                       panic("SN PCI INIT: Failed IRQ memory allocation\n");
+
+               INIT_LIST_HEAD(sn_irq_lh[i]);
+       }
+}
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux-xen/sn/kernel/setup.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux-xen/sn/kernel/setup.c Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,808 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#ifndef XEN
+#include <linux/kdev_t.h>
+#endif
+#include <linux/string.h>
+#ifndef XEN
+#include <linux/screen_info.h>
+#endif
+#include <linux/console.h>
+#include <linux/timex.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/serial.h>
+#include <linux/irq.h>
+#include <linux/bootmem.h>
+#include <linux/mmzone.h>
+#include <linux/interrupt.h>
+#include <linux/acpi.h>
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#ifndef XEN
+#include <linux/root_dev.h>
+#endif
+#include <linux/nodemask.h>
+#include <linux/pm.h>
+#include <linux/efi.h>
+
+#include <asm/io.h>
+#include <asm/sal.h>
+#include <asm/machvec.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#ifndef XEN
+#include <asm/vga.h>
+#endif
+#include <asm/sn/arch.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/leds.h>
+#ifndef XEN
+#include <asm/sn/bte.h>
+#endif
+#include <asm/sn/shub_mmr.h>
+#ifndef XEN
+#include <asm/sn/clksupport.h>
+#endif
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/sn_feature_sets.h>
+#ifndef XEN
+#include "xtalk/xwidgetdev.h"
+#include "xtalk/hubdev.h"
+#else
+#include "asm/sn/xwidgetdev.h"
+#include "asm/sn/hubdev.h"
+#endif
+#include <asm/sn/klconfig.h>
+#ifdef XEN
+#include <asm/sn/shubio.h>
+
+/* Xen has no clue about NUMA ....  grrrr */
+#define pxm_to_node(foo)               0
+#define node_to_pxm(foo)               0
+#define numa_node_id()                 0
+#endif
+
+
+DEFINE_PER_CPU(struct pda_s, pda_percpu);
+
+#define MAX_PHYS_MEMORY                (1UL << IA64_MAX_PHYS_BITS)     /* Max 
physical address supported */
+
+extern void bte_init_node(nodepda_t *, cnodeid_t);
+
+extern void sn_timer_init(void);
+extern unsigned long last_time_offset;
+extern void (*ia64_mark_idle) (int);
+extern void snidle(int);
+extern unsigned long long (*ia64_printk_clock)(void);
+
+unsigned long sn_rtc_cycles_per_second;
+EXPORT_SYMBOL(sn_rtc_cycles_per_second);
+
+DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
+EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
+
+DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
+EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
+
+DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
+EXPORT_PER_CPU_SYMBOL(__sn_nodepda);
+
+char sn_system_serial_number_string[128];
+EXPORT_SYMBOL(sn_system_serial_number_string);
+u64 sn_partition_serial_number;
+EXPORT_SYMBOL(sn_partition_serial_number);
+u8 sn_partition_id;
+EXPORT_SYMBOL(sn_partition_id);
+u8 sn_system_size;
+EXPORT_SYMBOL(sn_system_size);
+u8 sn_sharing_domain_size;
+EXPORT_SYMBOL(sn_sharing_domain_size);
+u8 sn_coherency_id;
+EXPORT_SYMBOL(sn_coherency_id);
+u8 sn_region_size;
+EXPORT_SYMBOL(sn_region_size);
+int sn_prom_type;      /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
+
+short physical_node_map[MAX_NUMALINK_NODES];
+static unsigned long sn_prom_features[MAX_PROM_FEATURE_SETS];
+
+EXPORT_SYMBOL(physical_node_map);
+
+int num_cnodes;
+
+static void sn_init_pdas(char **);
+static void build_cnode_tables(void);
+
+static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
+
+#ifndef XEN
+/*
+ * The format of "screen_info" is strange, and due to early i386-setup
+ * code. This is just enough to make the console code think we're on a
+ * VGA color display.
+ */
+struct screen_info sn_screen_info = {
+       .orig_x = 0,
+       .orig_y = 0,
+       .orig_video_mode = 3,
+       .orig_video_cols = 80,
+       .orig_video_ega_bx = 3,
+       .orig_video_lines = 25,
+       .orig_video_isVGA = 1,
+       .orig_video_points = 16
+};
+#endif
+
+/*
+ * This routine can only be used during init, since
+ * smp_boot_data is an init data structure.
+ * We have to use smp_boot_data.cpu_phys_id to find
+ * the physical id of the processor because the normal
+ * cpu_physical_id() relies on data structures that
+ * may not be initialized yet.
+ */
+
+static int __init pxm_to_nasid(int pxm)
+{
+       int i;
+       int nid;
+
+       nid = pxm_to_node(pxm);
+       for (i = 0; i < num_node_memblks; i++) {
+               if (node_memblk[i].nid == nid) {
+                       return NASID_GET(node_memblk[i].start_paddr);
+               }
+       }
+       return -1;
+}
+
+/**
+ * early_sn_setup - early setup routine for SN platforms
+ *
+ * Sets up an initial console to aid debugging.  Intended primarily
+ * for bringup.  See start_kernel() in init/main.c.
+ */
+
+void __init early_sn_setup(void)
+{
+       efi_system_table_t *efi_systab;
+       efi_config_table_t *config_tables;
+       struct ia64_sal_systab *sal_systab;
+       struct ia64_sal_desc_entry_point *ep;
+       char *p;
+       int i, j;
+
+       /*
+        * Parse enough of the SAL tables to locate the SAL entry point. Since, 
console
+        * IO on SN2 is done via SAL calls, early_printk won't work without 
this.
+        *
+        * This code duplicates some of the ACPI table parsing that is in efi.c 
& sal.c.
+        * Any changes to those file may have to be made hereas well.
+        */
+       efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
+       config_tables = __va(efi_systab->tables);
+       for (i = 0; i < efi_systab->nr_tables; i++) {
+               if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) ==
+                   0) {
+                       sal_systab = __va(config_tables[i].table);
+                       p = (char *)(sal_systab + 1);
+                       for (j = 0; j < sal_systab->entry_count; j++) {
+                               if (*p == SAL_DESC_ENTRY_POINT) {
+                                       ep = (struct ia64_sal_desc_entry_point
+                                             *)p;
+                                       ia64_sal_handler_init(__va
+                                                             (ep->sal_proc),
+                                                             __va(ep->gp));
+                                       return;
+                               }
+                               p += SAL_DESC_SIZE(*p);
+                       }
+               }
+       }
+       /* Uh-oh, SAL not available?? */
+       printk(KERN_ERR "failed to find SAL entry point\n");
+}
+
+extern int platform_intr_list[];
+static int __initdata shub_1_1_found;
+
+/*
+ * sn_check_for_wars
+ *
+ * Set flag for enabling shub specific wars
+ */
+
+static inline int __init is_shub_1_1(int nasid)
+{
+       unsigned long id;
+       int rev;
+
+       if (is_shub2())
+               return 0;
+       id = REMOTE_HUB_L(nasid, SH1_SHUB_ID);
+       rev = (id & SH1_SHUB_ID_REVISION_MASK) >> SH1_SHUB_ID_REVISION_SHFT;
+       return rev <= 2;
+}
+
+static void __init sn_check_for_wars(void)
+{
+       int cnode;
+
+       if (is_shub2()) {
+               /* none yet */
+       } else {
+               for_each_online_node(cnode) {
+                       if (is_shub_1_1(cnodeid_to_nasid(cnode)))
+                               shub_1_1_found = 1;
+               }
+       }
+}
+
+#ifndef XEN
+/*
+ * Scan the EFI PCDP table (if it exists) for an acceptable VGA console
+ * output device.  If one exists, pick it and set sn_legacy_{io,mem} to
+ * reflect the bus offsets needed to address it.
+ *
+ * Since pcdp support in SN is not supported in the 2.4 kernel (or at least
+ * the one lbs is based on) just declare the needed structs here.
+ *
+ * Reference spec http://www.dig64.org/specifications/DIG64_PCDPv20.pdf
+ *
+ * Returns 0 if no acceptable vga is found, !0 otherwise.
+ *
+ * Note:  This stuff is duped here because Altix requires the PCDP to
+ * locate a usable VGA device due to lack of proper ACPI support.  Structures
+ * could be used from drivers/firmware/pcdp.h, but it was decided that moving
+ * this file to a more public location just for Altix use was undesireable.
+ */
+
+struct hcdp_uart_desc {
+       u8      pad[45];
+};
+
+struct pcdp {
+       u8      signature[4];   /* should be 'HCDP' */
+       u32     length;
+       u8      rev;            /* should be >=3 for pcdp, <3 for hcdp */
+       u8      sum;
+       u8      oem_id[6];
+       u64     oem_tableid;
+       u32     oem_rev;
+       u32     creator_id;
+       u32     creator_rev;
+       u32     num_type0;
+       struct hcdp_uart_desc uart[0];  /* num_type0 of these */
+       /* pcdp descriptors follow */
+}  __attribute__((packed));
+
+struct pcdp_device_desc {
+       u8      type;
+       u8      primary;
+       u16     length;
+       u16     index;
+       /* interconnect specific structure follows */
+       /* device specific structure follows that */
+}  __attribute__((packed));
+
+struct pcdp_interface_pci {
+       u8      type;           /* 1 == pci */
+       u8      reserved;
+       u16     length;
+       u8      segment;
+       u8      bus;
+       u8      dev;
+       u8      fun;
+       u16     devid;
+       u16     vendid;
+       u32     acpi_interrupt;
+       u64     mmio_tra;
+       u64     ioport_tra;
+       u8      flags;
+       u8      translation;
+}  __attribute__((packed));
+
+struct pcdp_vga_device {
+       u8      num_eas_desc;
+       /* ACPI Extended Address Space Desc follows */
+}  __attribute__((packed));
+
+/* from pcdp_device_desc.primary */
+#define PCDP_PRIMARY_CONSOLE   0x01
+
+/* from pcdp_device_desc.type */
+#define PCDP_CONSOLE_INOUT     0x0
+#define PCDP_CONSOLE_DEBUG     0x1
+#define PCDP_CONSOLE_OUT       0x2
+#define PCDP_CONSOLE_IN                0x3
+#define PCDP_CONSOLE_TYPE_VGA  0x8
+
+#define PCDP_CONSOLE_VGA       (PCDP_CONSOLE_TYPE_VGA | PCDP_CONSOLE_OUT)
+
+/* from pcdp_interface_pci.type */
+#define PCDP_IF_PCI            1
+
+/* from pcdp_interface_pci.translation */
+#define PCDP_PCI_TRANS_IOPORT  0x02
+#define PCDP_PCI_TRANS_MMIO    0x01
+
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
+static void
+sn_scan_pcdp(void)
+{
+       u8 *bp;
+       struct pcdp *pcdp;
+       struct pcdp_device_desc device;
+       struct pcdp_interface_pci if_pci;
+       extern struct efi efi;
+
+       if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
+               return;         /* no hcdp/pcdp table */
+
+       pcdp = __va(efi.hcdp);
+
+       if (pcdp->rev < 3)
+               return;         /* only support PCDP (rev >= 3) */
+
+       for (bp = (u8 *)&pcdp->uart[pcdp->num_type0];
+            bp < (u8 *)pcdp + pcdp->length;
+            bp += device.length) {
+               memcpy(&device, bp, sizeof(device));
+               if (! (device.primary & PCDP_PRIMARY_CONSOLE))
+                       continue;       /* not primary console */
+
+               if (device.type != PCDP_CONSOLE_VGA)
+                       continue;       /* not VGA descriptor */
+
+               memcpy(&if_pci, bp+sizeof(device), sizeof(if_pci));
+               if (if_pci.type != PCDP_IF_PCI)
+                       continue;       /* not PCI interconnect */
+
+               if (if_pci.translation & PCDP_PCI_TRANS_IOPORT)
+                       vga_console_iobase =
+                               if_pci.ioport_tra | __IA64_UNCACHED_OFFSET;
+
+               if (if_pci.translation & PCDP_PCI_TRANS_MMIO)
+                       vga_console_membase =
+                               if_pci.mmio_tra | __IA64_UNCACHED_OFFSET;
+
+               break; /* once we find the primary, we're done */
+       }
+}
+#endif
+
+static unsigned long sn2_rtc_initial;
+
+static unsigned long long ia64_sn2_printk_clock(void)
+{
+       unsigned long rtc_now = rtc_time();
+
+       return (rtc_now - sn2_rtc_initial) *
+               (1000000000 / sn_rtc_cycles_per_second);
+}
+#endif
+
+/**
+ * sn_setup - SN platform setup routine
+ * @cmdline_p: kernel command line
+ *
+ * Handles platform setup for SN machines.  This includes determining
+ * the RTC frequency (via a SAL call), initializing secondary CPUs, and
+ * setting up per-node data areas.  The console is also initialized here.
+ */
+#ifdef XEN
+void __cpuinit sn_cpu_init(void);
+#endif
+
+void __init sn_setup(char **cmdline_p)
+{
+#ifndef XEN
+       long status, ticks_per_sec, drift;
+#else
+       unsigned long status, ticks_per_sec, drift;
+#endif
+       u32 version = sn_sal_rev();
+#ifndef XEN
+       extern void sn_cpu_init(void);
+
+       sn2_rtc_initial = rtc_time();
+       ia64_sn_plat_set_error_handling_features();     // obsolete
+       ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV);
+       ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES);
+
+
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
+       /*
+        * Handle SN vga console.
+        *
+        * SN systems do not have enough ACPI table information
+        * being passed from prom to identify VGA adapters and the legacy
+        * addresses to access them.  Until that is done, SN systems rely
+        * on the PCDP table to identify the primary VGA console if one
+        * exists.
+        *
+        * However, kernel PCDP support is optional, and even if it is built
+        * into the kernel, it will not be used if the boot cmdline contains
+        * console= directives.
+        *
+        * So, to work around this mess, we duplicate some of the PCDP code
+        * here so that the primary VGA console (as defined by PCDP) will
+        * work on SN systems even if a different console (e.g. serial) is
+        * selected on the boot line (or CONFIG_EFI_PCDP is off).
+        */
+
+       if (! vga_console_membase)
+               sn_scan_pcdp();
+
+       if (vga_console_membase) {
+               /* usable vga ... make tty0 the preferred default console */
+               if (!strstr(*cmdline_p, "console="))
+                       add_preferred_console("tty", 0, NULL);
+       } else {
+               printk(KERN_DEBUG "SGI: Disabling VGA console\n");
+               if (!strstr(*cmdline_p, "console="))
+                       add_preferred_console("ttySG", 0, NULL);
+#ifdef CONFIG_DUMMY_CONSOLE
+               conswitchp = &dummy_con;
+#else
+               conswitchp = NULL;
+#endif                         /* CONFIG_DUMMY_CONSOLE */
+       }
+#endif                         /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */
+
+       MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
+#endif
+
+       /*
+        * Build the tables for managing cnodes.
+        */
+       build_cnode_tables();
+
+       status =
+           ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
+                              &drift);
+       if (status != 0 || ticks_per_sec < 100000) {
+               printk(KERN_WARNING
+                      "unable to determine platform RTC clock frequency, 
guessing.\n");
+               /* PROM gives wrong value for clock freq. so guess */
+               sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
+       } else
+               sn_rtc_cycles_per_second = ticks_per_sec;
+#ifndef XEN
+
+       platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
+
+       ia64_printk_clock = ia64_sn2_printk_clock;
+#endif
+
+       printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF);
+
+       /*
+        * we set the default root device to /dev/hda
+        * to make simulation easy
+        */
+#ifndef XEN
+       ROOT_DEV = Root_HDA1;
+#endif
+
+       /*
+        * Create the PDAs and NODEPDAs for all the cpus.
+        */
+       sn_init_pdas(cmdline_p);
+
+#ifndef XEN
+       ia64_mark_idle = &snidle;
+#endif
+
+       /*
+        * For the bootcpu, we do this here. All other cpus will make the
+        * call as part of cpu_init in slave cpu initialization.
+        */
+       sn_cpu_init();
+
+#ifndef XEN
+#ifdef CONFIG_SMP
+       init_smp_config();
+#endif
+       screen_info = sn_screen_info;
+
+       sn_timer_init();
+
+       /*
+        * set pm_power_off to a SAL call to allow
+        * sn machines to power off. The SAL call can be replaced
+        * by an ACPI interface call when ACPI is fully implemented
+        * for sn.
+        */
+       pm_power_off = ia64_sn_power_down;
+       current->thread.flags |= IA64_THREAD_MIGRATION;
+#endif
+}
+
+/**
+ * sn_init_pdas - setup node data areas
+ *
+ * One time setup for Node Data Area.  Called by sn_setup().
+ */
+static void __init sn_init_pdas(char **cmdline_p)
+{
+       cnodeid_t cnode;
+
+       /*
+        * Allocate & initalize the nodepda for each node.
+        */
+       for_each_online_node(cnode) {
+               nodepdaindr[cnode] =
+                   alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
+               memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
+               memset(nodepdaindr[cnode]->phys_cpuid, -1,
+                   sizeof(nodepdaindr[cnode]->phys_cpuid));
+               spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
+       }
+
+       /*
+        * Allocate & initialize nodepda for TIOs.  For now, put them on node 0.
+        */
+       for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) {
+               nodepdaindr[cnode] =
+                   alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
+               memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
+       }
+
+       /*
+        * Now copy the array of nodepda pointers to each nodepda.
+        */
+       for (cnode = 0; cnode < num_cnodes; cnode++)
+               memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
+                      sizeof(nodepdaindr));
+
+#ifndef XEN
+       /*
+        * Set up IO related platform-dependent nodepda fields.
+        * The following routine actually sets up the hubinfo struct
+        * in nodepda.
+        */
+       for_each_online_node(cnode) {
+               bte_init_node(nodepdaindr[cnode], cnode);
+       }
+
+       /*
+        * Initialize the per node hubdev.  This includes IO Nodes and
+        * headless/memless nodes.
+        */
+       for (cnode = 0; cnode < num_cnodes; cnode++) {
+               hubdev_init_node(nodepdaindr[cnode], cnode);
+       }
+#endif
+}
+
+/**
+ * sn_cpu_init - initialize per-cpu data areas
+ * @cpuid: cpuid of the caller
+ *
+ * Called during cpu initialization on each cpu as it starts.
+ * Currently, initializes the per-cpu data area for SNIA.
+ * Also sets up a few fields in the nodepda.  Also known as
+ * platform_cpu_init() by the ia64 machvec code.
+ */
+void __cpuinit sn_cpu_init(void)
+{
+       int cpuid;
+       int cpuphyid;
+       int nasid;
+       int subnode;
+       int slice;
+       int cnode;
+       int i;
+       static int wars_have_been_checked;
+
+       cpuid = smp_processor_id();
+#ifndef XEN
+       if (cpuid == 0 && IS_MEDUSA()) {
+               if (ia64_sn_is_fake_prom())
+                       sn_prom_type = 2;
+               else
+                       sn_prom_type = 1;
+               printk(KERN_INFO "Running on medusa with %s PROM\n",
+                      (sn_prom_type == 1) ? "real" : "fake");
+       }
+#endif
+
+       memset(pda, 0, sizeof(pda));
+       if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
+                               &sn_hub_info->nasid_bitmask,
+                               &sn_hub_info->nasid_shift,
+                               &sn_system_size, &sn_sharing_domain_size,
+                               &sn_partition_id, &sn_coherency_id,
+                               &sn_region_size))
+               BUG();
+       sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
+
+       /*
+        * Don't check status. The SAL call is not supported on all PROMs
+        * but a failure is harmless.
+        */
+       (void) ia64_sn_set_cpu_number(cpuid);
+
+       /*
+        * The boot cpu makes this call again after platform initialization is
+        * complete.
+        */
+       if (nodepdaindr[0] == NULL)
+               return;
+
+       for (i = 0; i < MAX_PROM_FEATURE_SETS; i++)
+               if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0)
+                       break;
+
+       cpuphyid = get_sapicid();
+
+       if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
+               BUG();
+
+       for (i=0; i < MAX_NUMNODES; i++) {
+               if (nodepdaindr[i]) {
+                       nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
+                       nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
+                       nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
+               }
+       }
+
+       cnode = nasid_to_cnodeid(nasid);
+
+       sn_nodepda = nodepdaindr[cnode];
+
+       pda->led_address =
+           (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
+       pda->led_state = LED_ALWAYS_SET;
+       pda->hb_count = HZ / 2;
+       pda->hb_state = 0;
+       pda->idle_flag = 0;
+
+       if (cpuid != 0) {
+               /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */
+               memcpy(sn_cnodeid_to_nasid,
+                      (&per_cpu(__sn_cnodeid_to_nasid, 0)),
+                      sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
+       }
+
+       /*
+        * Check for WARs.
+        * Only needs to be done once, on BSP.
+        * Has to be done after loop above, because it uses this cpu's
+        * sn_cnodeid_to_nasid table which was just initialized if this
+        * isn't cpu 0.
+        * Has to be done before assignment below.
+        */
+       if (!wars_have_been_checked) {
+               sn_check_for_wars();
+               wars_have_been_checked = 1;
+       }
+       sn_hub_info->shub_1_1_found = shub_1_1_found;
+
+       /*
+        * Set up addresses of PIO/MEM write status registers.
+        */
+       {
+               u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, 
SH1_PIO_WRITE_STATUS_1, 0};
+               u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2,
+                       SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3};
+               u64 *pio;
+               pio = is_shub1() ? pio1 : pio2;
+               pda->pio_write_status_addr =
+                  (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]);
+               pda->pio_write_status_val = is_shub1() ? 
SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
+       }
+
+#ifndef XEN  /* local_node_data is not allocated .... yet */
+       /*
+        * WAR addresses for SHUB 1.x.
+        */
+       if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
+               int buddy_nasid;
+               buddy_nasid =
+                   cnodeid_to_nasid(numa_node_id() ==
+                                    num_online_nodes() - 1 ? 0 : 
numa_node_id() + 1);
+               pda->pio_shub_war_cam_addr =
+                   (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
+                                                             
SH1_PI_CAM_CONTROL);
+       }
+#endif
+}
+
+/*
+ * Build tables for converting between NASIDs and cnodes.
+ */
+static inline int __init board_needs_cnode(int type)
+{
+       return (type == KLTYPE_SNIA || type == KLTYPE_TIO);
+}
+
+void __init build_cnode_tables(void)
+{
+       int nasid;
+       int node;
+       lboard_t *brd;
+
+       memset(physical_node_map, -1, sizeof(physical_node_map));
+       memset(sn_cnodeid_to_nasid, -1,
+                       sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
+
+       /*
+        * First populate the tables with C/M bricks. This ensures that
+        * cnode == node for all C & M bricks.
+        */
+       for_each_online_node(node) {
+               nasid = pxm_to_nasid(node_to_pxm(node));
+               sn_cnodeid_to_nasid[node] = nasid;
+               physical_node_map[nasid] = node;
+       }
+
+       /*
+        * num_cnodes is total number of C/M/TIO bricks. Because of the 256 node
+        * limit on the number of nodes, we can't use the generic node numbers 
+        * for this. Note that num_cnodes is incremented below as TIOs or
+        * headless/memoryless nodes are discovered.
+        */
+       num_cnodes = num_online_nodes();
+
+       /* fakeprom does not support klgraph */
+       if (IS_RUNNING_ON_FAKE_PROM())
+               return;
+
+       /* Find TIOs & headless/memoryless nodes and add them to the tables */
+       for_each_online_node(node) {
+               kl_config_hdr_t *klgraph_header;
+               nasid = cnodeid_to_nasid(node);
+               klgraph_header = ia64_sn_get_klconfig_addr(nasid);
+               if (klgraph_header == NULL)
+                       BUG();
+               brd = NODE_OFFSET_TO_LBOARD(nasid, 
klgraph_header->ch_board_info);
+               while (brd) {
+                       if (board_needs_cnode(brd->brd_type) && 
physical_node_map[brd->brd_nasid] < 0) {
+                               sn_cnodeid_to_nasid[num_cnodes] = 
brd->brd_nasid;
+                               physical_node_map[brd->brd_nasid] = 
num_cnodes++;
+                       }
+                       brd = find_lboard_next(brd);
+               }
+       }
+}
+
+int
+nasid_slice_to_cpuid(int nasid, int slice)
+{
+       long cpu;
+
+       for (cpu = 0; cpu < NR_CPUS; cpu++)
+               if (cpuid_to_nasid(cpu) == nasid &&
+                                       cpuid_to_slice(cpu) == slice)
+                       return cpu;
+
+       return -1;
+}
+
+int sn_prom_feature_available(int id)
+{
+       if (id >= BITS_PER_LONG * MAX_PROM_FEATURE_SETS)
+               return 0;
+       return test_bit(id, sn_prom_features);
+}
+EXPORT_SYMBOL(sn_prom_feature_available);
+
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c       Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,548 @@
+/*
+ * SN2 Platform specific SMP Support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/nodemask.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/sal.h>
+#include <asm/system.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/tlb.h>
+#include <asm/numa.h>
+#include <asm/hw_irq.h>
+#include <asm/current.h>
+#ifdef XEN
+#include <asm/sn/arch.h>
+#endif
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/rw_mmr.h>
+
+DEFINE_PER_CPU(struct ptc_stats, ptcstats);
+DECLARE_PER_CPU(struct ptc_stats, ptcstats);
+
+static  __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
+
+extern unsigned long
+sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
+                              volatile unsigned long *, unsigned long,
+                              volatile unsigned long *, unsigned long);
+void
+sn2_ptc_deadlock_recovery(short *, short, short, int,
+                         volatile unsigned long *, unsigned long,
+                         volatile unsigned long *, unsigned long);
+
+/*
+ * Note: some is the following is captured here to make degugging easier
+ * (the macros make more sense if you see the debug patch - not posted)
+ */
+#define sn2_ptctest    0
+#define local_node_uses_ptc_ga(sh1)    ((sh1) ? 1 : 0)
+#define max_active_pio(sh1)            ((sh1) ? 32 : 7)
+#define reset_max_active_on_deadlock() 1
+#ifndef XEN
+#define PTC_LOCK(sh1)                  ((sh1) ? &sn2_global_ptc_lock : 
&sn_nodepda->ptc_lock)
+#else
+#define PTC_LOCK(sh1)                  &sn2_global_ptc_lock
+#endif
+
+struct ptc_stats {
+       unsigned long ptc_l;
+       unsigned long change_rid;
+       unsigned long shub_ptc_flushes;
+       unsigned long nodes_flushed;
+       unsigned long deadlocks;
+       unsigned long deadlocks2;
+       unsigned long lock_itc_clocks;
+       unsigned long shub_itc_clocks;
+       unsigned long shub_itc_clocks_max;
+       unsigned long shub_ptc_flushes_not_my_mm;
+};
+
+#define sn2_ptctest    0
+
+static inline unsigned long wait_piowc(void)
+{
+       volatile unsigned long *piows;
+       unsigned long zeroval, ws;
+
+       piows = pda->pio_write_status_addr;
+       zeroval = pda->pio_write_status_val;
+       do {
+               cpu_relax();
+       } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) 
!= zeroval);
+       return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0;
+}
+
+#ifndef XEN  /* No idea if Xen will ever support this */
+/**
+ * sn_migrate - SN-specific task migration actions
+ * @task: Task being migrated to new CPU
+ *
+ * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order.
+ * Context switching user threads which have memory-mapped MMIO may cause
+ * PIOs to issue from seperate CPUs, thus the PIO writes must be drained
+ * from the previous CPU's Shub before execution resumes on the new CPU.
+ */
+void sn_migrate(struct task_struct *task)
+{
+       pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu);
+       volatile unsigned long *adr = last_pda->pio_write_status_addr;
+       unsigned long val = last_pda->pio_write_status_val;
+
+       /* Drain PIO writes from old CPU's Shub */
+       while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK)
+                       != val))
+               cpu_relax();
+}
+
+void sn_tlb_migrate_finish(struct mm_struct *mm)
+{
+       /* flush_tlb_mm is inefficient if more than 1 users of mm */
+#ifndef XEN
+       if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
+#else
+       if (mm == &current->arch.mm && mm && atomic_read(&mm->mm_users) == 1)
+#endif
+               flush_tlb_mm(mm);
+}
+#endif
+
+/**
+ * sn2_global_tlb_purge - globally purge translation cache of virtual address 
range
+ * @mm: mm_struct containing virtual address range
+ * @start: start of virtual address range
+ * @end: end of virtual address range
+ * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits 
& 0xfc))
+ *
+ * Purges the translation caches of all processors of the given virtual address
+ * range.
+ *
+ * Note:
+ *     - cpu_vm_mask is a bit mask that indicates which cpus have loaded the 
context.
+ *     - cpu_vm_mask is converted into a nodemask of the nodes containing the
+ *       cpus in cpu_vm_mask.
+ *     - if only one bit is set in cpu_vm_mask & it is the current cpu & the
+ *       process is purging its own virtual address range, then only the
+ *       local TLB needs to be flushed. This flushing can be done using
+ *       ptc.l. This is the common case & avoids the global spinlock.
+ *     - if multiple cpus have loaded the context, then flushing has to be
+ *       done with ptc.g/MMRs under protection of the global ptc_lock.
+ */
+
+#ifdef XEN  /* Xen is soooooooo stupid! */
+// static cpumask_t mask_all = CPU_MASK_ALL;
+#endif
+
+void
+#ifndef XEN
+sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
+#else
+sn2_global_tlb_purge(unsigned long start,
+#endif
+                    unsigned long end, unsigned long nbits)
+{
+       int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid;
+#ifndef XEN
+       int mymm = (mm == current->active_mm && mm == current->mm);
+#else
+       // struct mm_struct *mm;
+       int mymm = 0;
+#endif
+       int use_cpu_ptcga;
+       volatile unsigned long *ptc0, *ptc1;
+       unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr 
= 0;
+       short nasids[MAX_NUMNODES], nix;
+       nodemask_t nodes_flushed;
+       int active, max_active, deadlock;
+
+       nodes_clear(nodes_flushed);
+       i = 0;
+
+#ifndef XEN  /* One day Xen will grow up! */
+       for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
+               cnode = cpu_to_node(cpu);
+               node_set(cnode, nodes_flushed);
+               lcpu = cpu;
+               i++;
+       }
+#else
+       for_each_cpu(cpu) {
+               cnode = cpu_to_node(cpu);
+               node_set(cnode, nodes_flushed);
+               lcpu = cpu;
+               i++;
+       }
+#endif
+
+       if (i == 0)
+               return;
+
+       preempt_disable();
+
+       if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) {
+               do {
+                       ia64_ptcl(start, nbits << 2);
+                       start += (1UL << nbits);
+               } while (start < end);
+               ia64_srlz_i();
+               __get_cpu_var(ptcstats).ptc_l++;
+               preempt_enable();
+               return;
+       }
+
+#ifndef XEN
+       if (atomic_read(&mm->mm_users) == 1 && mymm) {
+#ifndef XEN  /* I hate Xen! */
+               flush_tlb_mm(mm);
+#else
+               flush_tlb_mask(mask_all);
+#endif
+               __get_cpu_var(ptcstats).change_rid++;
+               preempt_enable();
+               return;
+       }
+#endif
+
+       itc = ia64_get_itc();
+       nix = 0;
+       for_each_node_mask(cnode, nodes_flushed)
+               nasids[nix++] = cnodeid_to_nasid(cnode);
+
+#ifndef XEN
+       rr_value = (mm->context << 3) | REGION_NUMBER(start);
+#else
+       rr_value = REGION_NUMBER(start);
+#endif
+
+       shub1 = is_shub1();
+       if (shub1) {
+               data0 = (1UL << SH1_PTC_0_A_SHFT) |
+                       (nbits << SH1_PTC_0_PS_SHFT) |
+                       (rr_value << SH1_PTC_0_RID_SHFT) |
+                       (1UL << SH1_PTC_0_START_SHFT);
+#ifndef XEN
+               ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
+               ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
+#else
+               ptc0 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
+               ptc1 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
+#endif
+       } else {
+               data0 = (1UL << SH2_PTC_A_SHFT) |
+                       (nbits << SH2_PTC_PS_SHFT) |
+                       (1UL << SH2_PTC_START_SHFT);
+#ifndef XEN
+               ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC + 
+#else
+               ptc0 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC + 
+#endif
+                       (rr_value << SH2_PTC_RID_SHFT));
+               ptc1 = NULL;
+       }
+       
+
+       mynasid = get_nasid();
+       use_cpu_ptcga = local_node_uses_ptc_ga(shub1);
+       max_active = max_active_pio(shub1);
+
+       itc = ia64_get_itc();
+       spin_lock_irqsave(PTC_LOCK(shub1), flags);
+       itc2 = ia64_get_itc();
+
+       __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc;
+       __get_cpu_var(ptcstats).shub_ptc_flushes++;
+       __get_cpu_var(ptcstats).nodes_flushed += nix;
+       if (!mymm)
+                __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++;
+
+       if (use_cpu_ptcga && !mymm) {
+               old_rr = ia64_get_rr(start);
+               ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8));
+               ia64_srlz_d();
+       }
+
+       wait_piowc();
+       do {
+               if (shub1)
+                       data1 = start | (1UL << SH1_PTC_1_START_SHFT);
+               else
+                       data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & 
SH2_PTC_ADDR_MASK);
+               deadlock = 0;
+               active = 0;
+               for (ibegin = 0, i = 0; i < nix; i++) {
+                       nasid = nasids[i];
+                       if (use_cpu_ptcga && unlikely(nasid == mynasid)) {
+                               ia64_ptcga(start, nbits << 2);
+                               ia64_srlz_i();
+                       } else {
+                               ptc0 = CHANGE_NASID(nasid, ptc0);
+                               if (ptc1)
+                                       ptc1 = CHANGE_NASID(nasid, ptc1);
+                               pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, 
data1);
+                               active++;
+                       }
+                       if (active >= max_active || i == (nix - 1)) {
+                               if ((deadlock = wait_piowc())) {
+                                       sn2_ptc_deadlock_recovery(nasids, 
ibegin, i, mynasid, ptc0, data0, ptc1, data1);
+                                       if (reset_max_active_on_deadlock())
+                                               max_active = 1;
+                               }
+                               active = 0;
+                               ibegin = i + 1;
+                       }
+               }
+               start += (1UL << nbits);
+       } while (start < end);
+
+       itc2 = ia64_get_itc() - itc2;
+       __get_cpu_var(ptcstats).shub_itc_clocks += itc2;
+       if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
+               __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2;
+
+       if (old_rr) {
+               ia64_set_rr(start, old_rr);
+               ia64_srlz_d();
+       }
+
+       spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
+
+       preempt_enable();
+}
+
+/*
+ * sn2_ptc_deadlock_recovery
+ *
+ * Recover from PTC deadlocks conditions. Recovery requires stepping thru each 
+ * TLB flush transaction.  The recovery sequence is somewhat tricky & is
+ * coded in assembly language.
+ */
+
+void
+sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid,
+                         volatile unsigned long *ptc0, unsigned long data0,
+                         volatile unsigned long *ptc1, unsigned long data1)
+{
+       short nasid, i;
+       unsigned long *piows, zeroval, n;
+
+       __get_cpu_var(ptcstats).deadlocks++;
+
+       piows = (unsigned long *) pda->pio_write_status_addr;
+       zeroval = pda->pio_write_status_val;
+
+
+       for (i=ib; i <= ie; i++) {
+               nasid = nasids[i];
+               if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid)
+                       continue;
+               ptc0 = CHANGE_NASID(nasid, ptc0);
+               if (ptc1)
+                       ptc1 = CHANGE_NASID(nasid, ptc1);
+
+               n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, 
piows, zeroval);
+               __get_cpu_var(ptcstats).deadlocks2 += n;
+       }
+
+}
+
+/**
+ * sn_send_IPI_phys - send an IPI to a Nasid and slice
+ * @nasid: nasid to receive the interrupt (may be outside partition)
+ * @physid: physical cpuid to receive the interrupt.
+ * @vector: command to send
+ * @delivery_mode: delivery mechanism
+ *
+ * Sends an IPI (interprocessor interrupt) to the processor specified by
+ * @physid
+ *
+ * @delivery_mode can be one of the following
+ *
+ * %IA64_IPI_DM_INT - pend an interrupt
+ * %IA64_IPI_DM_PMI - pend a PMI
+ * %IA64_IPI_DM_NMI - pend an NMI
+ * %IA64_IPI_DM_INIT - pend an INIT interrupt
+ */
+void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode)
+{
+       long val;
+       unsigned long flags = 0;
+       volatile long *p;
+
+       p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
+       val = (1UL << SH_IPI_INT_SEND_SHFT) |
+           (physid << SH_IPI_INT_PID_SHFT) |
+           ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) |
+           ((long)vector << SH_IPI_INT_IDX_SHFT) |
+           (0x000feeUL << SH_IPI_INT_BASE_SHFT);
+
+       mb();
+       if (enable_shub_wars_1_1()) {
+               spin_lock_irqsave(&sn2_global_ptc_lock, flags);
+       }
+       pio_phys_write_mmr(p, val);
+       if (enable_shub_wars_1_1()) {
+               wait_piowc();
+               spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
+       }
+
+}
+
+EXPORT_SYMBOL(sn_send_IPI_phys);
+
+/**
+ * sn2_send_IPI - send an IPI to a processor
+ * @cpuid: target of the IPI
+ * @vector: command to send
+ * @delivery_mode: delivery mechanism
+ * @redirect: redirect the IPI?
+ *
+ * Sends an IPI (InterProcessor Interrupt) to the processor specified by
+ * @cpuid.  @vector specifies the command to send, while @delivery_mode can 
+ * be one of the following
+ *
+ * %IA64_IPI_DM_INT - pend an interrupt
+ * %IA64_IPI_DM_PMI - pend a PMI
+ * %IA64_IPI_DM_NMI - pend an NMI
+ * %IA64_IPI_DM_INIT - pend an INIT interrupt
+ */
+void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
+{
+       long physid;
+       int nasid;
+
+       physid = cpu_physical_id(cpuid);
+#ifdef XEN
+       if (!sn_nodepda) {
+               ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
+       } else
+#endif
+       nasid = cpuid_to_nasid(cpuid);
+
+       /* the following is used only when starting cpus at boot time */
+       if (unlikely(nasid == -1))
+               ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
+
+       sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
+}
+
+#ifdef CONFIG_PROC_FS
+
+#define PTC_BASENAME   "sgi_sn/ptc_statistics"
+
+static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
+{
+       if (*offset < NR_CPUS)
+               return offset;
+       return NULL;
+}
+
+static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * 
offset)
+{
+       (*offset)++;
+       if (*offset < NR_CPUS)
+               return offset;
+       return NULL;
+}
+
+static void sn2_ptc_seq_stop(struct seq_file *file, void *data)
+{
+}
+
+static int sn2_ptc_seq_show(struct seq_file *file, void *data)
+{
+       struct ptc_stats *stat;
+       int cpu;
+
+       cpu = *(loff_t *) data;
+
+       if (!cpu) {
+               seq_printf(file,
+                          "# cpu ptc_l newrid ptc_flushes nodes_flushed 
deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2\n");
+               seq_printf(file, "# ptctest %d\n", sn2_ptctest);
+       }
+
+       if (cpu < NR_CPUS && cpu_online(cpu)) {
+               stat = &per_cpu(ptcstats, cpu);
+               seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld 
%ld\n", cpu, stat->ptc_l,
+                               stat->change_rid, stat->shub_ptc_flushes, 
stat->nodes_flushed,
+                               stat->deadlocks,
+                               1000 * stat->lock_itc_clocks / 
per_cpu(cpu_info, cpu).cyc_per_usec,
+                               1000 * stat->shub_itc_clocks / 
per_cpu(cpu_info, cpu).cyc_per_usec,
+                               1000 * stat->shub_itc_clocks_max / 
per_cpu(cpu_info, cpu).cyc_per_usec,
+                               stat->shub_ptc_flushes_not_my_mm,
+                               stat->deadlocks2);
+       }
+       return 0;
+}
+
+static struct seq_operations sn2_ptc_seq_ops = {
+       .start = sn2_ptc_seq_start,
+       .next = sn2_ptc_seq_next,
+       .stop = sn2_ptc_seq_stop,
+       .show = sn2_ptc_seq_show
+};
+
+static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &sn2_ptc_seq_ops);
+}
+
+static struct file_operations proc_sn2_ptc_operations = {
+       .open = sn2_ptc_proc_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release,
+};
+
+static struct proc_dir_entry *proc_sn2_ptc;
+
+static int __init sn2_ptc_init(void)
+{
+       if (!ia64_platform_is("sn2"))
+               return 0;
+
+       if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) {
+               printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
+               return -EINVAL;
+       }
+       proc_sn2_ptc->proc_fops = &proc_sn2_ptc_operations;
+       spin_lock_init(&sn2_global_ptc_lock);
+       return 0;
+}
+
+static void __exit sn2_ptc_exit(void)
+{
+       remove_proc_entry(PTC_BASENAME, NULL);
+}
+
+module_init(sn2_ptc_init);
+module_exit(sn2_ptc_exit);
+#endif /* CONFIG_PROC_FS */
+
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/Makefile
--- a/xen/arch/ia64/linux/Makefile      Wed Dec 20 08:53:42 2006 -0700
+++ b/xen/arch/ia64/linux/Makefile      Wed Dec 20 14:55:02 2006 -0700
@@ -1,3 +1,7 @@ obj-y += bitop.o
+subdir-y += dig
+subdir-y += hp
+subdir-y += sn
+
 obj-y += bitop.o
 obj-y += clear_page.o
 obj-y += cmdline.o
@@ -23,6 +27,7 @@ obj-y += __moddi3.o
 obj-y += __moddi3.o
 obj-y += __umoddi3.o
 obj-y += carta_random.o
+obj-y += io.o
 
 ## variants of divide/modulo
 ## see files in xen/arch/ia64/linux/lib (linux/arch/ia64/lib)
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/README.origin
--- a/xen/arch/ia64/linux/README.origin Wed Dec 20 08:53:42 2006 -0700
+++ b/xen/arch/ia64/linux/README.origin Wed Dec 20 14:55:02 2006 -0700
@@ -27,3 +27,6 @@ strlen.S              -> linux/arch/ia64/lib/strlen.
 
 # The files below are from Linux-2.6.16.33
 carta_random.S         -> linux/arch/ia64/lib/carta_random.S
+
+# The files below are from Linux-2.6.19
+io.c                   -> linux/arch/ia64/lib/io.c
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/dig/Makefile
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/dig/Makefile  Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,1 @@
+obj-y += machvec.o
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/dig/README.origin
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/dig/README.origin     Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,7 @@
+Source files in this directory are identical copies of linux-2.6.19 files:
+
+NOTE: DO NOT commit changes to these files!   If a file
+needs to be changed, move it to ../linux-xen and follow
+the instructions in the README there.
+
+machvec.c              -> linux/arch/ia64/dig/machvec.c
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/dig/machvec.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/dig/machvec.c Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,3 @@
+#define MACHVEC_PLATFORM_NAME          dig
+#define MACHVEC_PLATFORM_HEADER                <asm/machvec_dig.h>
+#include <asm/machvec_init.h>
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/hp/Makefile
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/hp/Makefile   Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,1 @@
+subdir-y += zx1
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/hp/zx1/Makefile
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/hp/zx1/Makefile       Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,1 @@
+obj-y += hpzx1_machvec.o
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/hp/zx1/README.origin
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/hp/zx1/README.origin  Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,7 @@
+Source files in this directory are identical copies of linux-2.6.19 files:
+
+NOTE: DO NOT commit changes to these files!   If a file
+needs to be changed, move it to ../linux-xen and follow
+the instructions in the README there.
+
+hpzx1_machvec.c                -> linux/arch/ia64/hp/zx1/hpzx1_machvec.c
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c        Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,3 @@
+#define MACHVEC_PLATFORM_NAME          hpzx1
+#define MACHVEC_PLATFORM_HEADER                <asm/machvec_hpzx1.h>
+#include <asm/machvec_init.h>
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/io.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/io.c  Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,164 @@
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ * This needs to be optimized.
+ */
+void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
+{
+       char *dst = to;
+
+       while (count) {
+               count--;
+               *dst++ = readb(from++);
+       }
+}
+EXPORT_SYMBOL(memcpy_fromio);
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ * This needs to be optimized.
+ */
+void memcpy_toio(volatile void __iomem *to, const void *from, long count)
+{
+       const char *src = from;
+
+       while (count) {
+               count--;
+               writeb(*src++, to++);
+       }
+}
+EXPORT_SYMBOL(memcpy_toio);
+
+/*
+ * "memset" on IO memory space.
+ * This needs to be optimized.
+ */
+void memset_io(volatile void __iomem *dst, int c, long count)
+{
+       unsigned char ch = (char)(c & 0xff);
+
+       while (count) {
+               count--;
+               writeb(ch, dst);
+               dst++;
+       }
+}
+EXPORT_SYMBOL(memset_io);
+
+#ifdef CONFIG_IA64_GENERIC
+
+#undef __ia64_inb
+#undef __ia64_inw
+#undef __ia64_inl
+#undef __ia64_outb
+#undef __ia64_outw
+#undef __ia64_outl
+#undef __ia64_readb
+#undef __ia64_readw
+#undef __ia64_readl
+#undef __ia64_readq
+#undef __ia64_readb_relaxed
+#undef __ia64_readw_relaxed
+#undef __ia64_readl_relaxed
+#undef __ia64_readq_relaxed
+#undef __ia64_writeb
+#undef __ia64_writew
+#undef __ia64_writel
+#undef __ia64_writeq
+#undef __ia64_mmiowb
+
+unsigned int
+__ia64_inb (unsigned long port)
+{
+       return ___ia64_inb(port);
+}
+
+unsigned int
+__ia64_inw (unsigned long port)
+{
+       return ___ia64_inw(port);
+}
+
+unsigned int
+__ia64_inl (unsigned long port)
+{
+       return ___ia64_inl(port);
+}
+
+void
+__ia64_outb (unsigned char val, unsigned long port)
+{
+       ___ia64_outb(val, port);
+}
+
+void
+__ia64_outw (unsigned short val, unsigned long port)
+{
+       ___ia64_outw(val, port);
+}
+
+void
+__ia64_outl (unsigned int val, unsigned long port)
+{
+       ___ia64_outl(val, port);
+}
+
+unsigned char
+__ia64_readb (void __iomem *addr)
+{
+       return ___ia64_readb (addr);
+}
+
+unsigned short
+__ia64_readw (void __iomem *addr)
+{
+       return ___ia64_readw (addr);
+}
+
+unsigned int
+__ia64_readl (void __iomem *addr)
+{
+       return ___ia64_readl (addr);
+}
+
+unsigned long
+__ia64_readq (void __iomem *addr)
+{
+       return ___ia64_readq (addr);
+}
+
+unsigned char
+__ia64_readb_relaxed (void __iomem *addr)
+{
+       return ___ia64_readb (addr);
+}
+
+unsigned short
+__ia64_readw_relaxed (void __iomem *addr)
+{
+       return ___ia64_readw (addr);
+}
+
+unsigned int
+__ia64_readl_relaxed (void __iomem *addr)
+{
+       return ___ia64_readl (addr);
+}
+
+unsigned long
+__ia64_readq_relaxed (void __iomem *addr)
+{
+       return ___ia64_readq (addr);
+}
+
+void
+__ia64_mmiowb(void)
+{
+       ___ia64_mmiowb();
+}
+
+#endif /* CONFIG_IA64_GENERIC */
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/sn/Makefile
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/sn/Makefile   Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,2 @@
+subdir-y += kernel
+subdir-y += pci
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/sn/kernel/Makefile
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/sn/kernel/Makefile    Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,3 @@
+obj-y += machvec.o
+obj-y += pio_phys.o
+obj-y += ptc_deadlock.o
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/sn/kernel/README.origin
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/sn/kernel/README.origin       Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,9 @@
+Source files in this directory are identical copies of linux-2.6.19 files:
+
+NOTE: DO NOT commit changes to these files!   If a file
+needs to be changed, move it to ../linux-xen and follow
+the instructions in the README there.
+
+machvec.c              -> linux/arch/ia64/sn/kernel/machvec.c
+pio_phys.S             -> linux/arch/ia64/sn/kernel/pio_phys.S
+ptc_deadlock.S         -> linux/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/sn/kernel/machvec.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/sn/kernel/machvec.c   Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,11 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2002-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+#define MACHVEC_PLATFORM_NAME  sn2
+#define MACHVEC_PLATFORM_HEADER        <asm/machvec_sn2.h>
+#include <asm/machvec_init.h>
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/sn/kernel/pio_phys.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/sn/kernel/pio_phys.S  Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,71 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ *
+ * This file contains macros used to access MMR registers via
+ * uncached physical addresses.
+ *      pio_phys_read_mmr  - read an MMR
+ *      pio_phys_write_mmr - write an MMR
+ *      pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
+ *              Second MMR will be skipped if address is NULL
+ *
+ * Addresses passed to these routines should be uncached physical addresses
+ *     ie., 0x80000....
+ */
+
+
+
+#include <asm/asmmacro.h>
+#include <asm/page.h>
+
+GLOBAL_ENTRY(pio_phys_read_mmr)
+       .prologue
+       .regstk 1,0,0,0
+       .body
+       mov r2=psr
+       rsm psr.i | psr.dt
+       ;;
+       srlz.d
+       ld8.acq r8=[r32]
+       ;;
+       mov psr.l=r2;;
+       srlz.d
+       br.ret.sptk.many rp
+END(pio_phys_read_mmr)
+
+GLOBAL_ENTRY(pio_phys_write_mmr)
+       .prologue
+       .regstk 2,0,0,0
+       .body
+       mov r2=psr
+       rsm psr.i | psr.dt
+       ;;
+       srlz.d
+       st8.rel [r32]=r33
+       ;;
+       mov psr.l=r2;;
+       srlz.d
+       br.ret.sptk.many rp
+END(pio_phys_write_mmr)
+
+GLOBAL_ENTRY(pio_atomic_phys_write_mmrs)
+       .prologue
+       .regstk 4,0,0,0
+       .body
+       mov r2=psr
+       cmp.ne p9,p0=r34,r0;
+       rsm psr.i | psr.dt | psr.ic
+       ;;
+       srlz.d
+       st8.rel [r32]=r33
+(p9)   st8.rel [r34]=r35
+       ;;
+       mov psr.l=r2;;
+       srlz.d
+       br.ret.sptk.many rp
+END(pio_atomic_phys_write_mmrs)
+
+
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S      Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,92 @@
+/* 
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <asm/types.h>
+#include <asm/sn/shub_mmr.h>
+
+#define DEADLOCKBIT    SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
+#define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK
+#define ALIAS_OFFSET   8
+
+
+       .global sn2_ptc_deadlock_recovery_core
+       .proc   sn2_ptc_deadlock_recovery_core
+
+sn2_ptc_deadlock_recovery_core:
+       .regstk 6,0,0,0
+
+       ptc0     = in0
+       data0    = in1
+       ptc1     = in2
+       data1    = in3
+       piowc    = in4
+       zeroval  = in5
+       piowcphy = r30
+       psrsave  = r2
+       scr1     = r16
+       scr2     = r17
+       mask     = r18
+
+
+       extr.u  piowcphy=piowc,0,61;;   // Convert piowc to uncached physical 
address
+       dep     piowcphy=-1,piowcphy,63,1
+       movl    mask=WRITECOUNTMASK
+       mov     r8=r0
+
+1:
+       cmp.ne  p8,p9=r0,ptc1           // Test for shub type (ptc1 non-null on 
shub1)
+                                       // p8 = 1 if shub1, p9 = 1 if shub2
+
+       add     scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias 
register 
+       mov     scr1=7;;                // Clear DEADLOCK, WRITE_ERROR, 
MULTI_WRITE_ERROR
+(p8)   st8.rel [scr2]=scr1;;
+(p9)   ld8.acq scr1=[scr2];;
+
+5:     ld8.acq scr1=[piowc];;          // Wait for PIOs to complete.
+       hint    @pause
+       and     scr2=scr1,mask;;        // mask of writecount bits
+       cmp.ne  p6,p0=zeroval,scr2
+(p6)   br.cond.sptk 5b
+       
+
+
+       ////////////// BEGIN PHYSICAL MODE ////////////////////
+       mov psrsave=psr                 // Disable IC (no PMIs)
+       rsm psr.i | psr.dt | psr.ic;;
+       srlz.i;;
+
+       st8.rel [ptc0]=data0            // Write PTC0 & wait for completion.
+
+5:     ld8.acq scr1=[piowcphy];;       // Wait for PIOs to complete.
+       hint    @pause
+       and     scr2=scr1,mask;;        // mask of writecount bits
+       cmp.ne  p6,p0=zeroval,scr2
+(p6)   br.cond.sptk 5b;;
+
+       tbit.nz p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK
+(p7)   cmp.ne p7,p0=r0,ptc1;;          // Test for non-null ptc1
+       
+(p7)   st8.rel [ptc1]=data1;;          // Now write PTC1.
+
+5:     ld8.acq scr1=[piowcphy];;       // Wait for PIOs to complete.
+       hint    @pause
+       and     scr2=scr1,mask;;        // mask of writecount bits
+       cmp.ne  p6,p0=zeroval,scr2
+(p6)   br.cond.sptk 5b
+       
+       tbit.nz p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK
+
+       mov psr.l=psrsave;;             // Reenable IC
+       srlz.i;;
+       ////////////// END   PHYSICAL MODE ////////////////////
+
+(p8)   add     r8=1,r8
+(p8)   br.cond.spnt 1b;;               // Repeat if DEADLOCK occurred.
+
+       br.ret.sptk     rp
+       .endp sn2_ptc_deadlock_recovery_core
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/sn/pci/Makefile
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/sn/pci/Makefile       Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,1 @@
+subdir-y += pcibr
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/linux/sn/pci/pcibr/Makefile
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/sn/pci/pcibr/Makefile Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,1 @@
+obj-y += pcibr_reg.o
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/arch/ia64/linux/sn/pci/pcibr/README.origin
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/sn/pci/pcibr/README.origin    Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,7 @@
+Source files in this directory are identical copies of linux-2.6.19 files:
+
+NOTE: DO NOT commit changes to these files!   If a file
+needs to be changed, move it to ../linux-xen and follow
+the instructions in the README there.
+
+pcibr_reg.c            -> linux/arch/ia64/sn/pci/pcibr/pcibr_reg.c
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c      Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,285 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <asm/sn/io.h>
+#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/pic.h>
+#include <asm/sn/tiocp.h>
+
+union br_ptr {
+       struct tiocp tio;
+       struct pic pic;
+};
+
+/*
+ * Control Register Access -- Read/Write                            0000_0020
+ */
+void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
+{
+       union br_ptr __iomem *ptr = (union br_ptr __iomem 
*)pcibus_info->pbi_buscommon.bs_base;
+
+       if (pcibus_info) {
+               switch (pcibus_info->pbi_bridge_type) {
+               case PCIBR_BRIDGETYPE_TIOCP:
+                       __sn_clrq_relaxed(&ptr->tio.cp_control, bits);
+                       break;
+               case PCIBR_BRIDGETYPE_PIC:
+                       __sn_clrq_relaxed(&ptr->pic.p_wid_control, bits);
+                       break;
+               default:
+                       panic
+                           ("pcireg_control_bit_clr: unknown bridgetype bridge 
0x%p",
+                            ptr);
+               }
+       }
+}
+
+void pcireg_control_bit_set(struct pcibus_info *pcibus_info, u64 bits)
+{
+       union br_ptr __iomem *ptr = (union br_ptr __iomem 
*)pcibus_info->pbi_buscommon.bs_base;
+
+       if (pcibus_info) {
+               switch (pcibus_info->pbi_bridge_type) {
+               case PCIBR_BRIDGETYPE_TIOCP:
+                       __sn_setq_relaxed(&ptr->tio.cp_control, bits);
+                       break;
+               case PCIBR_BRIDGETYPE_PIC:
+                       __sn_setq_relaxed(&ptr->pic.p_wid_control, bits);
+                       break;
+               default:
+                       panic
+                           ("pcireg_control_bit_set: unknown bridgetype bridge 
0x%p",
+                            ptr);
+               }
+       }
+}
+
+/*
+ * PCI/PCIX Target Flush Register Access -- Read Only              0000_0050
+ */
+u64 pcireg_tflush_get(struct pcibus_info *pcibus_info)
+{
+       union br_ptr __iomem *ptr = (union br_ptr __iomem 
*)pcibus_info->pbi_buscommon.bs_base;
+       u64 ret = 0;
+
+       if (pcibus_info) {
+               switch (pcibus_info->pbi_bridge_type) {
+               case PCIBR_BRIDGETYPE_TIOCP:
+                       ret = __sn_readq_relaxed(&ptr->tio.cp_tflush);
+                       break;
+               case PCIBR_BRIDGETYPE_PIC:
+                       ret = __sn_readq_relaxed(&ptr->pic.p_wid_tflush);
+                       break;
+               default:
+                       panic
+                           ("pcireg_tflush_get: unknown bridgetype bridge 
0x%p",
+                            ptr);
+               }
+       }
+
+       /* Read of the Target Flush should always return zero */
+       if (ret != 0)
+               panic("pcireg_tflush_get:Target Flush failed\n");
+
+       return ret;
+}
+
+/*
+ * Interrupt Status Register Access -- Read Only                   0000_0100
+ */
+u64 pcireg_intr_status_get(struct pcibus_info * pcibus_info)
+{
+       union br_ptr __iomem *ptr = (union br_ptr __iomem 
*)pcibus_info->pbi_buscommon.bs_base;
+       u64 ret = 0;
+
+       if (pcibus_info) {
+               switch (pcibus_info->pbi_bridge_type) {
+               case PCIBR_BRIDGETYPE_TIOCP:
+                       ret = __sn_readq_relaxed(&ptr->tio.cp_int_status);
+                       break;
+               case PCIBR_BRIDGETYPE_PIC:
+                       ret = __sn_readq_relaxed(&ptr->pic.p_int_status);
+                       break;
+               default:
+                       panic
+                           ("pcireg_intr_status_get: unknown bridgetype bridge 
0x%p",
+                            ptr);
+               }
+       }
+       return ret;
+}
+
+/*
+ * Interrupt Enable Register Access -- Read/Write                   0000_0108
+ */
+void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
+{
+       union br_ptr __iomem *ptr = (union br_ptr __iomem 
*)pcibus_info->pbi_buscommon.bs_base;
+
+       if (pcibus_info) {
+               switch (pcibus_info->pbi_bridge_type) {
+               case PCIBR_BRIDGETYPE_TIOCP:
+                       __sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits);
+                       break;
+               case PCIBR_BRIDGETYPE_PIC:
+                       __sn_clrq_relaxed(&ptr->pic.p_int_enable, bits);
+                       break;
+               default:
+                       panic
+                           ("pcireg_intr_enable_bit_clr: unknown bridgetype 
bridge 0x%p",
+                            ptr);
+               }
+       }
+}
+
+void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, u64 bits)
+{
+       union br_ptr __iomem *ptr = (union br_ptr __iomem 
*)pcibus_info->pbi_buscommon.bs_base;
+
+       if (pcibus_info) {
+               switch (pcibus_info->pbi_bridge_type) {
+               case PCIBR_BRIDGETYPE_TIOCP:
+                       __sn_setq_relaxed(&ptr->tio.cp_int_enable, bits);
+                       break;
+               case PCIBR_BRIDGETYPE_PIC:
+                       __sn_setq_relaxed(&ptr->pic.p_int_enable, bits);
+                       break;
+               default:
+                       panic
+                           ("pcireg_intr_enable_bit_set: unknown bridgetype 
bridge 0x%p",
+                            ptr);
+               }
+       }
+}
+
+/*
+ * Intr Host Address Register (int_addr) -- Read/Write  0000_0130 - 0000_0168
+ */
+void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
+                              u64 addr)
+{
+       union br_ptr __iomem *ptr = (union br_ptr __iomem 
*)pcibus_info->pbi_buscommon.bs_base;
+
+       if (pcibus_info) {
+               switch (pcibus_info->pbi_bridge_type) {
+               case PCIBR_BRIDGETYPE_TIOCP:
+                       __sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n],
+                           TIOCP_HOST_INTR_ADDR);
+                       __sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n],
+                           (addr & TIOCP_HOST_INTR_ADDR));
+                       break;
+               case PCIBR_BRIDGETYPE_PIC:
+                       __sn_clrq_relaxed(&ptr->pic.p_int_addr[int_n],
+                           PIC_HOST_INTR_ADDR);
+                       __sn_setq_relaxed(&ptr->pic.p_int_addr[int_n],
+                           (addr & PIC_HOST_INTR_ADDR));
+                       break;
+               default:
+                       panic
+                           ("pcireg_intr_addr_addr_get: unknown bridgetype 
bridge 0x%p",
+                            ptr);
+               }
+       }
+}
+
+/*
+ * Force Interrupt Register Access -- Write Only       0000_01C0 - 0000_01F8
+ */
+void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
+{
+       union br_ptr __iomem *ptr = (union br_ptr __iomem 
*)pcibus_info->pbi_buscommon.bs_base;
+
+       if (pcibus_info) {
+               switch (pcibus_info->pbi_bridge_type) {
+               case PCIBR_BRIDGETYPE_TIOCP:
+                       writeq(1, &ptr->tio.cp_force_pin[int_n]);
+                       break;
+               case PCIBR_BRIDGETYPE_PIC:
+                       writeq(1, &ptr->pic.p_force_pin[int_n]);
+                       break;
+               default:
+                       panic
+                           ("pcireg_force_intr_set: unknown bridgetype bridge 
0x%p",
+                            ptr);
+               }
+       }
+}
+
+/*
+ * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
+ */
+u64 pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
+{
+       union br_ptr __iomem *ptr = (union br_ptr __iomem 
*)pcibus_info->pbi_buscommon.bs_base;
+       u64 ret = 0;
+
+       if (pcibus_info) {
+               switch (pcibus_info->pbi_bridge_type) {
+               case PCIBR_BRIDGETYPE_TIOCP:
+                       ret =
+                           __sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]);
+                       break;
+               case PCIBR_BRIDGETYPE_PIC:
+                       ret =
+                           __sn_readq_relaxed(&ptr->pic.p_wr_req_buf[device]);
+                       break;
+               default:
+                     panic("pcireg_wrb_flush_get: unknown bridgetype bridge 
0x%p", ptr);
+               }
+
+       }
+       /* Read of the Write Buffer Flush should always return zero */
+       return ret;
+}
+
+void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
+                       u64 val)
+{
+       union br_ptr __iomem *ptr = (union br_ptr __iomem 
*)pcibus_info->pbi_buscommon.bs_base;
+
+       if (pcibus_info) {
+               switch (pcibus_info->pbi_bridge_type) {
+               case PCIBR_BRIDGETYPE_TIOCP:
+                       writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]);
+                       break;
+               case PCIBR_BRIDGETYPE_PIC:
+                       writeq(val, &ptr->pic.p_int_ate_ram[ate_index]);
+                       break;
+               default:
+                       panic
+                           ("pcireg_int_ate_set: unknown bridgetype bridge 
0x%p",
+                            ptr);
+               }
+       }
+}
+
+u64 __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int 
ate_index)
+{
+       union br_ptr __iomem *ptr = (union br_ptr __iomem 
*)pcibus_info->pbi_buscommon.bs_base;
+       u64 __iomem *ret = NULL;
+
+       if (pcibus_info) {
+               switch (pcibus_info->pbi_bridge_type) {
+               case PCIBR_BRIDGETYPE_TIOCP:
+                       ret = &ptr->tio.cp_int_ate_ram[ate_index];
+                       break;
+               case PCIBR_BRIDGETYPE_PIC:
+                       ret = &ptr->pic.p_int_ate_ram[ate_index];
+                       break;
+               default:
+                       panic
+                           ("pcireg_int_ate_addr: unknown bridgetype bridge 
0x%p",
+                            ptr);
+               }
+       }
+       return ret;
+}
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/xen/irq.c
--- a/xen/arch/ia64/xen/irq.c   Wed Dec 20 08:53:42 2006 -0700
+++ b/xen/arch/ia64/xen/irq.c   Wed Dec 20 14:55:02 2006 -0700
@@ -47,6 +47,13 @@
 
 #include <xen/event.h>
 #define apicid_to_phys_cpu_present(x)  1
+
+#ifdef CONFIG_IA64_GENERIC
+unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
+{
+       return (unsigned int) vec;
+}
+#endif
 
 /*
  * Linux has a controller-independent x86 interrupt architecture.
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/arch/ia64/xen/vhpt.c
--- a/xen/arch/ia64/xen/vhpt.c  Wed Dec 20 08:53:42 2006 -0700
+++ b/xen/arch/ia64/xen/vhpt.c  Wed Dec 20 14:55:02 2006 -0700
@@ -20,9 +20,6 @@
 #include <asm/vcpu.h>
 #include <asm/vcpumask.h>
 #include <asm/vmmu.h>
-
-/* Defined in tlb.c  */
-extern void ia64_global_tlb_purge(u64 start, u64 end, u64 nbits);
 
 extern long running_on_sim;
 
@@ -364,7 +361,7 @@ void domain_flush_vtlb_range (struct dom
        // ptc.ga has release semantics.
 
        /* ptc.ga  */
-       ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
+       platform_global_tlb_purge(vadr, vadr + addr_range, PAGE_SHIFT);
        perfc_incrc(domain_flush_vtlb_range);
 }
 
@@ -442,7 +439,8 @@ __domain_flush_vtlb_track_entry(struct d
                perfc_incrc(domain_flush_vtlb_local);
        } else {
                /* ptc.ga has release semantics. */
-               ia64_global_tlb_purge(vaddr, vaddr + PAGE_SIZE, PAGE_SHIFT);
+               platform_global_tlb_purge(vaddr, vaddr + PAGE_SIZE,
+                                         PAGE_SHIFT);
                perfc_incrc(domain_flush_vtlb_global);
        }
 
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h     Wed Dec 20 08:53:42 2006 -0700
+++ b/xen/include/asm-ia64/config.h     Wed Dec 20 14:55:02 2006 -0700
@@ -8,8 +8,9 @@
 // manufactured from component pieces
 
 // defined in linux/arch/ia64/defconfig
-//#define      CONFIG_IA64_GENERIC
-#define        CONFIG_IA64_HP_SIM
+#define        CONFIG_IA64_GENERIC
+#define CONFIG_HZ      32
+
 #define        CONFIG_IA64_L1_CACHE_SHIFT 7
 // needed by include/asm-ia64/page.h
 #define        CONFIG_IA64_PAGE_SIZE_16KB      // 4KB doesn't work?!?
@@ -145,14 +146,6 @@ extern int smp_num_siblings;
 // avoid redefining task_struct in asm/current.h
 #define task_struct vcpu
 
-// linux/include/asm-ia64/machvec.h (linux/arch/ia64/lib/io.c)
-#define platform_inb   __ia64_inb
-#define platform_inw   __ia64_inw
-#define platform_inl   __ia64_inl
-#define platform_outb  __ia64_outb
-#define platform_outw  __ia64_outw
-#define platform_outl  __ia64_outl
-
 #include <xen/cache.h>
 #ifndef CONFIG_SMP
 #define __cacheline_aligned_in_smp
@@ -206,6 +199,16 @@ void sort_main_extable(void);
 // Deprivated linux inf and put here for short time compatibility
 #define kmalloc(s, t) xmalloc_bytes((s))
 #define kfree(s) xfree((s))
+#define kzalloc(size, flags)                           \
+({                                                     \
+       unsigned char *mem;                             \
+       mem = (unsigned char *)xmalloc_bytes(size);     \
+       if (mem)                                        \
+               memset(mem, 0, size);                   \
+       (void *)mem;                                    \
+})
+#define kcalloc(n, size, flags)                kzalloc(n * size, flags)
+#define alloc_bootmem_node(pg, size)   xmalloc_bytes(size)
 
 // see common/keyhandler.c
 #define        nop()   asm volatile ("nop 0")
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-null/linux/dmapool.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-null/linux/dmapool.h   Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,1 @@
+/* This file is intentionally left empty. */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-null/linux/ioport.h
--- a/xen/include/asm-ia64/linux-null/linux/ioport.h    Wed Dec 20 08:53:42 
2006 -0700
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-/* This file is intentionally left empty. */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-null/linux/rwsem.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-null/linux/rwsem.h     Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,1 @@
+/* This file is intentionally left empty. */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/README.origin
--- a/xen/include/asm-ia64/linux-xen/asm/README.origin  Wed Dec 20 08:53:42 
2006 -0700
+++ b/xen/include/asm-ia64/linux-xen/asm/README.origin  Wed Dec 20 14:55:02 
2006 -0700
@@ -34,3 +34,10 @@ iosapic.h            -> linux/include/asm-ia64/ios
 # The files below are from Linux-2.6.16.33
 perfmon.h              -> linux/include/asm-ia64/perfmon.h
 perfmon_default_smpl.h -> linux/include/asm-ia64/perfmon_default_smpl.h
+
+# The files below are from Linux-2.6.19
+machvec.h              -> linux/include/asm-ia64/machvec.h
+machvec_dig.h          -> linux/include/asm-ia64/machvec_dig.h
+machvec_sn2.h          -> linux/include/asm-ia64/machvec_sn2.h
+machvec_hpzx1.h                -> linux/include/asm-ia64/machvec_hpzx1.h
+machvec_pci.h          -> linux/include/asm-ia64/pci.h
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/machvec.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/machvec.h      Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,498 @@
+/*
+ * Machine vector for IA-64.
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@xxxxxxxxxxxx>
+ * Copyright (C) Vijay Chander <vijay@xxxxxxxxxxxx>
+ * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
+ *     David Mosberger-Tang <davidm@xxxxxxxxxx>
+ */
+#ifndef _ASM_IA64_MACHVEC_H
+#define _ASM_IA64_MACHVEC_H
+
+#include <linux/config.h>
+#include <linux/types.h>
+
+/* forward declarations: */
+struct device;
+struct pt_regs;
+struct scatterlist;
+struct page;
+struct mm_struct;
+struct pci_bus;
+
+typedef void ia64_mv_setup_t (char **);
+typedef void ia64_mv_cpu_init_t (void);
+typedef void ia64_mv_irq_init_t (void);
+typedef void ia64_mv_send_ipi_t (int, int, int, int);
+typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *);
+typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, 
unsigned long);
+typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
+typedef unsigned int ia64_mv_local_vector_to_irq (u8);
+typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
+typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
+                                      u8 size);
+typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
+                                       u8 size);
+
+/* DMA-mapping interface: */
+typedef void ia64_mv_dma_init (void);
+typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t 
*, int);
+typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, 
dma_addr_t);
+typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, 
int);
+typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, 
int);
+typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, 
int);
+typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, 
int);
+typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, 
size_t, int);
+typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist 
*, int, int);
+typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, 
size_t, int);
+typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct 
scatterlist *, int, int);
+typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
+typedef int ia64_mv_dma_supported (struct device *, u64);
+
+/*
+ * WARNING: The legacy I/O space is _architected_.  Platforms are
+ * expected to follow this architected model (see Section 10.7 in the
+ * IA-64 Architecture Software Developer's Manual).  Unfortunately,
+ * some broken machines do not follow that model, which is why we have
+ * to make the inX/outX operations part of the machine vector.
+ * Platform designers should follow the architected model whenever
+ * possible.
+ */
+typedef unsigned int ia64_mv_inb_t (unsigned long);
+typedef unsigned int ia64_mv_inw_t (unsigned long);
+typedef unsigned int ia64_mv_inl_t (unsigned long);
+typedef void ia64_mv_outb_t (unsigned char, unsigned long);
+typedef void ia64_mv_outw_t (unsigned short, unsigned long);
+typedef void ia64_mv_outl_t (unsigned int, unsigned long);
+typedef void ia64_mv_mmiowb_t (void);
+typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
+typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
+typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
+typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
+typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
+typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
+typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
+typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
+
+static inline void
+machvec_noop (void)
+{
+}
+
+static inline void
+machvec_noop_mm (struct mm_struct *mm)
+{
+}
+
+#ifdef XEN
+#include <xen/lib.h>
+/*
+ * These should never get called, they just fill out the machine
+ * vectors and make the compiler happy.
+ */
+static inline void*
+machvec_noop_dma_alloc_coherent (struct device *dev, size_t size,
+                                 dma_addr_t *addr, int dir)
+{
+       panic("%s() called", __FUNCTION__);
+       return (void *)0;
+}
+
+static inline void
+machvec_noop_dma_free_coherent (struct device *dev, size_t size,
+                                void *vaddr, dma_addr_t handle)
+{
+       panic("%s() called", __FUNCTION__);
+}
+
+static inline dma_addr_t
+machvec_noop_dma_map_single (struct device *dev, void *addr,
+                             size_t size, int dir)
+{
+       panic("%s() called", __FUNCTION__);
+       return (dma_addr_t)0;
+}
+
+static inline void
+machvec_noop_dma_unmap_single (struct device *dev, dma_addr_t vaddr,
+                               size_t size, int dir)
+{
+       panic("%s() called", __FUNCTION__);
+}
+
+static inline int
+machvec_noop_dma_map_sg (struct device *dev, struct scatterlist *sglist,
+                         int nents, int dir)
+{
+       panic("%s() called", __FUNCTION__);
+       return 0;
+}
+
+static inline void
+machvec_noop_dma_unmap_sg (struct device *dev, struct scatterlist *sglist,
+                           int nents, int dir)
+{
+       panic("%s() called", __FUNCTION__);
+}
+
+static inline void
+machvec_noop_dma_sync_single_for_cpu (struct device *dev, dma_addr_t vaddr,
+                                      size_t size, int dir)
+{
+       panic("%s() called", __FUNCTION__);
+}
+
+#define machvec_noop_dma_sync_single_for_device                \
+       machvec_noop_dma_sync_single_for_cpu
+
+static inline void
+machvec_noop_dma_sync_sg_for_cpu (struct device *dev,
+                                  struct scatterlist *sglist,
+                                  int nents, int dir)
+{
+       panic("%s() called", __FUNCTION__);
+}
+
+#define machvec_noop_dma_sync_sg_for_device            \
+       machvec_noop_dma_sync_sg_for_cpu
+
+static inline int
+machvec_noop_dma_mapping_error (dma_addr_t dma_addr)
+{
+       panic("%s() called", __FUNCTION__);
+       return 1;
+}
+
+static inline int
+machvec_noop_dma_supported (struct device *dev, u64 mask)
+{
+       panic("%s() called", __FUNCTION__);
+       return 0;
+}
+
+static inline char*
+machvec_noop_pci_get_legacy_mem (struct pci_bus *bus)
+{
+       panic("%s() called", __FUNCTION__);
+       return 0;
+}
+
+static inline int
+machvec_noop_pci_legacy_read (struct pci_bus *bus, u16 port, u32 *val, u8 size)
+{
+       panic("%s() called", __FUNCTION__);
+       return 0;
+}
+
+static inline int
+machvec_noop_pci_legacy_write (struct pci_bus *bus, u16 port, u32 val, u8 size)
+{
+       panic("%s() called", __FUNCTION__);
+       return 0;
+}
+#endif
+
+extern void machvec_setup (char **);
+extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
+extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
+extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, 
int);
+extern void machvec_tlb_migrate_finish (struct mm_struct *);
+
+# if defined (CONFIG_IA64_HP_SIM)
+#  include <asm/machvec_hpsim.h>
+# elif defined (CONFIG_IA64_DIG)
+#  include <asm/machvec_dig.h>
+# elif defined (CONFIG_IA64_HP_ZX1)
+#  include <asm/machvec_hpzx1.h>
+# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
+#  include <asm/machvec_hpzx1_swiotlb.h>
+# elif defined (CONFIG_IA64_SGI_SN2)
+#  include <asm/machvec_sn2.h>
+# elif defined (CONFIG_IA64_GENERIC)
+
+# ifdef MACHVEC_PLATFORM_HEADER
+#  include MACHVEC_PLATFORM_HEADER
+# else
+#  define platform_name                ia64_mv.name
+#  define platform_setup       ia64_mv.setup
+#  define platform_cpu_init    ia64_mv.cpu_init
+#  define platform_irq_init    ia64_mv.irq_init
+#  define platform_send_ipi    ia64_mv.send_ipi
+#  define platform_timer_interrupt     ia64_mv.timer_interrupt
+#  define platform_global_tlb_purge    ia64_mv.global_tlb_purge
+#  define platform_tlb_migrate_finish  ia64_mv.tlb_migrate_finish
+#  define platform_dma_init            ia64_mv.dma_init
+#  define platform_dma_alloc_coherent  ia64_mv.dma_alloc_coherent
+#  define platform_dma_free_coherent   ia64_mv.dma_free_coherent
+#  define platform_dma_map_single      ia64_mv.dma_map_single
+#  define platform_dma_unmap_single    ia64_mv.dma_unmap_single
+#  define platform_dma_map_sg          ia64_mv.dma_map_sg
+#  define platform_dma_unmap_sg                ia64_mv.dma_unmap_sg
+#  define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
+#  define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
+#  define platform_dma_sync_single_for_device 
ia64_mv.dma_sync_single_for_device
+#  define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
+#  define platform_dma_mapping_error           ia64_mv.dma_mapping_error
+#  define platform_dma_supported       ia64_mv.dma_supported
+#  define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
+#  define platform_pci_get_legacy_mem  ia64_mv.pci_get_legacy_mem
+#  define platform_pci_legacy_read     ia64_mv.pci_legacy_read
+#  define platform_pci_legacy_write    ia64_mv.pci_legacy_write
+#  define platform_inb         ia64_mv.inb
+#  define platform_inw         ia64_mv.inw
+#  define platform_inl         ia64_mv.inl
+#  define platform_outb                ia64_mv.outb
+#  define platform_outw                ia64_mv.outw
+#  define platform_outl                ia64_mv.outl
+#  define platform_mmiowb      ia64_mv.mmiowb
+#  define platform_readb        ia64_mv.readb
+#  define platform_readw        ia64_mv.readw
+#  define platform_readl        ia64_mv.readl
+#  define platform_readq        ia64_mv.readq
+#  define platform_readb_relaxed        ia64_mv.readb_relaxed
+#  define platform_readw_relaxed        ia64_mv.readw_relaxed
+#  define platform_readl_relaxed        ia64_mv.readl_relaxed
+#  define platform_readq_relaxed        ia64_mv.readq_relaxed
+# endif
+
+/* __attribute__((__aligned__(16))) is required to make size of the
+ * structure multiple of 16 bytes.
+ * This will fillup the holes created because of section 3.3.1 in
+ * Software Conventions guide.
+ */
+struct ia64_machine_vector {
+       const char *name;
+       ia64_mv_setup_t *setup;
+       ia64_mv_cpu_init_t *cpu_init;
+       ia64_mv_irq_init_t *irq_init;
+       ia64_mv_send_ipi_t *send_ipi;
+       ia64_mv_timer_interrupt_t *timer_interrupt;
+       ia64_mv_global_tlb_purge_t *global_tlb_purge;
+       ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
+       ia64_mv_dma_init *dma_init;
+       ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
+       ia64_mv_dma_free_coherent *dma_free_coherent;
+       ia64_mv_dma_map_single *dma_map_single;
+       ia64_mv_dma_unmap_single *dma_unmap_single;
+       ia64_mv_dma_map_sg *dma_map_sg;
+       ia64_mv_dma_unmap_sg *dma_unmap_sg;
+       ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
+       ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
+       ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
+       ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
+       ia64_mv_dma_mapping_error *dma_mapping_error;
+       ia64_mv_dma_supported *dma_supported;
+       ia64_mv_local_vector_to_irq *local_vector_to_irq;
+       ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
+       ia64_mv_pci_legacy_read_t *pci_legacy_read;
+       ia64_mv_pci_legacy_write_t *pci_legacy_write;
+       ia64_mv_inb_t *inb;
+       ia64_mv_inw_t *inw;
+       ia64_mv_inl_t *inl;
+       ia64_mv_outb_t *outb;
+       ia64_mv_outw_t *outw;
+       ia64_mv_outl_t *outl;
+       ia64_mv_mmiowb_t *mmiowb;
+       ia64_mv_readb_t *readb;
+       ia64_mv_readw_t *readw;
+       ia64_mv_readl_t *readl;
+       ia64_mv_readq_t *readq;
+       ia64_mv_readb_relaxed_t *readb_relaxed;
+       ia64_mv_readw_relaxed_t *readw_relaxed;
+       ia64_mv_readl_relaxed_t *readl_relaxed;
+       ia64_mv_readq_relaxed_t *readq_relaxed;
+} __attribute__((__aligned__(16))); /* align attrib? see above comment */
+
+#define MACHVEC_INIT(name)                     \
+{                                              \
+       #name,                                  \
+       platform_setup,                         \
+       platform_cpu_init,                      \
+       platform_irq_init,                      \
+       platform_send_ipi,                      \
+       platform_timer_interrupt,               \
+       platform_global_tlb_purge,              \
+       platform_tlb_migrate_finish,            \
+       platform_dma_init,                      \
+       platform_dma_alloc_coherent,            \
+       platform_dma_free_coherent,             \
+       platform_dma_map_single,                \
+       platform_dma_unmap_single,              \
+       platform_dma_map_sg,                    \
+       platform_dma_unmap_sg,                  \
+       platform_dma_sync_single_for_cpu,       \
+       platform_dma_sync_sg_for_cpu,           \
+       platform_dma_sync_single_for_device,    \
+       platform_dma_sync_sg_for_device,        \
+       platform_dma_mapping_error,                     \
+       platform_dma_supported,                 \
+       platform_local_vector_to_irq,           \
+       platform_pci_get_legacy_mem,            \
+       platform_pci_legacy_read,               \
+       platform_pci_legacy_write,              \
+       platform_inb,                           \
+       platform_inw,                           \
+       platform_inl,                           \
+       platform_outb,                          \
+       platform_outw,                          \
+       platform_outl,                          \
+       platform_mmiowb,                        \
+       platform_readb,                         \
+       platform_readw,                         \
+       platform_readl,                         \
+       platform_readq,                         \
+       platform_readb_relaxed,                 \
+       platform_readw_relaxed,                 \
+       platform_readl_relaxed,                 \
+       platform_readq_relaxed,                 \
+}
+
+extern struct ia64_machine_vector ia64_mv;
+extern void machvec_init (const char *name);
+
+# else
+#  error Unknown configuration.  Update asm-ia64/machvec.h.
+# endif /* CONFIG_IA64_GENERIC */
+
+/*
+ * Declare default routines which aren't declared anywhere else:
+ */
+extern ia64_mv_dma_init                        swiotlb_init;
+extern ia64_mv_dma_alloc_coherent      swiotlb_alloc_coherent;
+extern ia64_mv_dma_free_coherent       swiotlb_free_coherent;
+extern ia64_mv_dma_map_single          swiotlb_map_single;
+extern ia64_mv_dma_unmap_single                swiotlb_unmap_single;
+extern ia64_mv_dma_map_sg              swiotlb_map_sg;
+extern ia64_mv_dma_unmap_sg            swiotlb_unmap_sg;
+extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
+extern ia64_mv_dma_sync_sg_for_cpu     swiotlb_sync_sg_for_cpu;
+extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
+extern ia64_mv_dma_sync_sg_for_device  swiotlb_sync_sg_for_device;
+extern ia64_mv_dma_mapping_error       swiotlb_dma_mapping_error;
+extern ia64_mv_dma_supported           swiotlb_dma_supported;
+
+/*
+ * Define default versions so we can extend machvec for new platforms without 
having
+ * to update the machvec files for all existing platforms.
+ */
+#ifndef platform_setup
+# define platform_setup                        machvec_setup
+#endif
+#ifndef platform_cpu_init
+# define platform_cpu_init             machvec_noop
+#endif
+#ifndef platform_irq_init
+# define platform_irq_init             machvec_noop
+#endif
+
+#ifndef platform_send_ipi
+# define platform_send_ipi             ia64_send_ipi   /* default to 
architected version */
+#endif
+#ifndef platform_timer_interrupt
+# define platform_timer_interrupt      machvec_timer_interrupt
+#endif
+#ifndef platform_global_tlb_purge
+# define platform_global_tlb_purge     ia64_global_tlb_purge /* default to 
architected version */
+#endif
+#ifndef platform_tlb_migrate_finish
+# define platform_tlb_migrate_finish   machvec_noop_mm
+#endif
+#ifndef platform_dma_init
+# define platform_dma_init             swiotlb_init
+#endif
+#ifndef platform_dma_alloc_coherent
+# define platform_dma_alloc_coherent   swiotlb_alloc_coherent
+#endif
+#ifndef platform_dma_free_coherent
+# define platform_dma_free_coherent    swiotlb_free_coherent
+#endif
+#ifndef platform_dma_map_single
+# define platform_dma_map_single       swiotlb_map_single
+#endif
+#ifndef platform_dma_unmap_single
+# define platform_dma_unmap_single     swiotlb_unmap_single
+#endif
+#ifndef platform_dma_map_sg
+# define platform_dma_map_sg           swiotlb_map_sg
+#endif
+#ifndef platform_dma_unmap_sg
+# define platform_dma_unmap_sg         swiotlb_unmap_sg
+#endif
+#ifndef platform_dma_sync_single_for_cpu
+# define platform_dma_sync_single_for_cpu      swiotlb_sync_single_for_cpu
+#endif
+#ifndef platform_dma_sync_sg_for_cpu
+# define platform_dma_sync_sg_for_cpu          swiotlb_sync_sg_for_cpu
+#endif
+#ifndef platform_dma_sync_single_for_device
+# define platform_dma_sync_single_for_device   swiotlb_sync_single_for_device
+#endif
+#ifndef platform_dma_sync_sg_for_device
+# define platform_dma_sync_sg_for_device       swiotlb_sync_sg_for_device
+#endif
+#ifndef platform_dma_mapping_error
+# define platform_dma_mapping_error            swiotlb_dma_mapping_error
+#endif
+#ifndef platform_dma_supported
+# define  platform_dma_supported       swiotlb_dma_supported
+#endif
+#ifndef platform_local_vector_to_irq
+# define platform_local_vector_to_irq  __ia64_local_vector_to_irq
+#endif
+#ifndef platform_pci_get_legacy_mem
+# define platform_pci_get_legacy_mem   ia64_pci_get_legacy_mem
+#endif
+#ifndef platform_pci_legacy_read
+# define platform_pci_legacy_read      ia64_pci_legacy_read
+#endif
+#ifndef platform_pci_legacy_write
+# define platform_pci_legacy_write     ia64_pci_legacy_write
+#endif
+#ifndef platform_inb
+# define platform_inb          __ia64_inb
+#endif
+#ifndef platform_inw
+# define platform_inw          __ia64_inw
+#endif
+#ifndef platform_inl
+# define platform_inl          __ia64_inl
+#endif
+#ifndef platform_outb
+# define platform_outb         __ia64_outb
+#endif
+#ifndef platform_outw
+# define platform_outw         __ia64_outw
+#endif
+#ifndef platform_outl
+# define platform_outl         __ia64_outl
+#endif
+#ifndef platform_mmiowb
+# define platform_mmiowb       __ia64_mmiowb
+#endif
+#ifndef platform_readb
+# define platform_readb                __ia64_readb
+#endif
+#ifndef platform_readw
+# define platform_readw                __ia64_readw
+#endif
+#ifndef platform_readl
+# define platform_readl                __ia64_readl
+#endif
+#ifndef platform_readq
+# define platform_readq                __ia64_readq
+#endif
+#ifndef platform_readb_relaxed
+# define platform_readb_relaxed        __ia64_readb_relaxed
+#endif
+#ifndef platform_readw_relaxed
+# define platform_readw_relaxed        __ia64_readw_relaxed
+#endif
+#ifndef platform_readl_relaxed
+# define platform_readl_relaxed        __ia64_readl_relaxed
+#endif
+#ifndef platform_readq_relaxed
+# define platform_readq_relaxed        __ia64_readq_relaxed
+#endif
+
+#endif /* _ASM_IA64_MACHVEC_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/machvec_dig.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/machvec_dig.h  Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,46 @@
+#ifndef _ASM_IA64_MACHVEC_DIG_h
+#define _ASM_IA64_MACHVEC_DIG_h
+
+extern ia64_mv_setup_t dig_setup;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name          "dig"
+#ifdef XEN
+/*
+ * All the World is a PC .... yay! yay! yay!
+ */
+extern ia64_mv_setup_t hpsim_setup;
+#define platform_setup                         hpsim_setup
+
+#define platform_dma_init                      machvec_noop
+#define platform_dma_alloc_coherent            machvec_noop_dma_alloc_coherent
+#define platform_dma_free_coherent             machvec_noop_dma_free_coherent
+#define platform_dma_map_single                        
machvec_noop_dma_map_single
+#define platform_dma_unmap_single              machvec_noop_dma_unmap_single
+#define platform_dma_map_sg                    machvec_noop_dma_map_sg
+#define platform_dma_unmap_sg                  machvec_noop_dma_unmap_sg
+#define platform_dma_sync_single_for_cpu       \
+       machvec_noop_dma_sync_single_for_cpu
+#define platform_dma_sync_sg_for_cpu           \
+       machvec_noop_dma_sync_sg_for_cpu
+#define platform_dma_sync_single_for_device    \
+       machvec_noop_dma_sync_single_for_device
+#define platform_dma_sync_sg_for_device                \
+       machvec_noop_dma_sync_sg_for_device
+#define platform_dma_mapping_error             machvec_noop_dma_mapping_error
+#define platform_dma_supported                 machvec_noop_dma_supported
+
+#define platform_pci_get_legacy_mem            machvec_noop_pci_get_legacy_mem
+#define platform_pci_legacy_read               machvec_noop_pci_legacy_read
+#define platform_pci_legacy_write              machvec_noop_pci_legacy_write
+#else
+#define platform_setup         dig_setup
+#endif
+
+#endif /* _ASM_IA64_MACHVEC_DIG_h */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h        Wed Dec 20 
14:55:02 2006 -0700
@@ -0,0 +1,66 @@
+#ifndef _ASM_IA64_MACHVEC_HPZX1_h
+#define _ASM_IA64_MACHVEC_HPZX1_h
+
+extern ia64_mv_setup_t                 dig_setup;
+extern ia64_mv_dma_alloc_coherent      sba_alloc_coherent;
+extern ia64_mv_dma_free_coherent       sba_free_coherent;
+extern ia64_mv_dma_map_single          sba_map_single;
+extern ia64_mv_dma_unmap_single                sba_unmap_single;
+extern ia64_mv_dma_map_sg              sba_map_sg;
+extern ia64_mv_dma_unmap_sg            sba_unmap_sg;
+extern ia64_mv_dma_supported           sba_dma_supported;
+extern ia64_mv_dma_mapping_error       sba_dma_mapping_error;
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name                          "hpzx1"
+#ifdef XEN
+extern ia64_mv_setup_t hpsim_setup;
+extern ia64_mv_irq_init_t hpsim_irq_init;
+#define platform_setup                         hpsim_setup
+#define platform_irq_init                      hpsim_irq_init
+
+#define platform_dma_init                      machvec_noop
+#define platform_dma_alloc_coherent            machvec_noop_dma_alloc_coherent
+#define platform_dma_free_coherent             machvec_noop_dma_free_coherent
+#define platform_dma_map_single                        
machvec_noop_dma_map_single
+#define platform_dma_unmap_single              machvec_noop_dma_unmap_single
+#define platform_dma_map_sg                    machvec_noop_dma_map_sg
+#define platform_dma_unmap_sg                  machvec_noop_dma_unmap_sg
+#define platform_dma_sync_single_for_cpu       \
+       machvec_noop_dma_sync_single_for_cpu
+#define platform_dma_sync_sg_for_cpu           \
+       machvec_noop_dma_sync_sg_for_cpu
+#define platform_dma_sync_single_for_device    \
+       machvec_noop_dma_sync_single_for_device
+#define platform_dma_sync_sg_for_device                \
+       machvec_noop_dma_sync_sg_for_device
+#define platform_dma_mapping_error             machvec_noop_dma_mapping_error
+#define platform_dma_supported                 machvec_noop_dma_supported
+
+#define platform_pci_get_legacy_mem            machvec_noop_pci_get_legacy_mem
+#define platform_pci_legacy_read               machvec_noop_pci_legacy_read
+#define platform_pci_legacy_write              machvec_noop_pci_legacy_write
+#else
+#define platform_setup                         dig_setup
+#define platform_dma_init                      machvec_noop
+#define platform_dma_alloc_coherent            sba_alloc_coherent
+#define platform_dma_free_coherent             sba_free_coherent
+#define platform_dma_map_single                        sba_map_single
+#define platform_dma_unmap_single              sba_unmap_single
+#define platform_dma_map_sg                    sba_map_sg
+#define platform_dma_unmap_sg                  sba_unmap_sg
+#define platform_dma_sync_single_for_cpu       machvec_dma_sync_single
+#define platform_dma_sync_sg_for_cpu           machvec_dma_sync_sg
+#define platform_dma_sync_single_for_device    machvec_dma_sync_single
+#define platform_dma_sync_sg_for_device                machvec_dma_sync_sg
+#define platform_dma_supported                 sba_dma_supported
+#define platform_dma_mapping_error             sba_dma_mapping_error
+#endif
+
+#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h  Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2002-2003,2006 Silicon Graphics, Inc.  All Rights Reserved.
+ * 
+ * This program is free software; you can redistribute it and/or modify it 
+ * under the terms of version 2 of the GNU General Public License 
+ * as published by the Free Software Foundation.
+ * 
+ * This program is distributed in the hope that it would be useful, but 
+ * WITHOUT ANY WARRANTY; without even the implied warranty of 
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 
+ * 
+ * Further, this software is distributed without any warranty that it is 
+ * free of the rightful claim of any third person regarding infringement 
+ * or the like.  Any license provided herein, whether implied or 
+ * otherwise, applies only to this software file.  Patent licenses, if 
+ * any, provided herein do not apply to combinations of this program with 
+ * other software, or any other product whatsoever.
+ * 
+ * You should have received a copy of the GNU General Public 
+ * License along with this program; if not, write the Free Software 
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * 
+ * For further information regarding this notice, see: 
+ * 
+ * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ */
+
+#ifndef _ASM_IA64_MACHVEC_SN2_H
+#define _ASM_IA64_MACHVEC_SN2_H
+
+extern ia64_mv_setup_t sn_setup;
+extern ia64_mv_cpu_init_t sn_cpu_init;
+extern ia64_mv_irq_init_t sn_irq_init;
+extern ia64_mv_send_ipi_t sn2_send_IPI;
+extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
+extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
+extern ia64_mv_tlb_migrate_finish_t    sn_tlb_migrate_finish;
+extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
+extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
+extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read;
+extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write;
+extern ia64_mv_inb_t __sn_inb;
+extern ia64_mv_inw_t __sn_inw;
+extern ia64_mv_inl_t __sn_inl;
+extern ia64_mv_outb_t __sn_outb;
+extern ia64_mv_outw_t __sn_outw;
+extern ia64_mv_outl_t __sn_outl;
+extern ia64_mv_mmiowb_t __sn_mmiowb;
+extern ia64_mv_readb_t __sn_readb;
+extern ia64_mv_readw_t __sn_readw;
+extern ia64_mv_readl_t __sn_readl;
+extern ia64_mv_readq_t __sn_readq;
+extern ia64_mv_readb_t __sn_readb_relaxed;
+extern ia64_mv_readw_t __sn_readw_relaxed;
+extern ia64_mv_readl_t __sn_readl_relaxed;
+extern ia64_mv_readq_t __sn_readq_relaxed;
+extern ia64_mv_dma_alloc_coherent      sn_dma_alloc_coherent;
+extern ia64_mv_dma_free_coherent       sn_dma_free_coherent;
+extern ia64_mv_dma_map_single          sn_dma_map_single;
+extern ia64_mv_dma_unmap_single                sn_dma_unmap_single;
+extern ia64_mv_dma_map_sg              sn_dma_map_sg;
+extern ia64_mv_dma_unmap_sg            sn_dma_unmap_sg;
+extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
+extern ia64_mv_dma_sync_sg_for_cpu     sn_dma_sync_sg_for_cpu;
+extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
+extern ia64_mv_dma_sync_sg_for_device  sn_dma_sync_sg_for_device;
+extern ia64_mv_dma_mapping_error       sn_dma_mapping_error;
+extern ia64_mv_dma_supported           sn_dma_supported;
+#ifndef XEN
+extern ia64_mv_migrate_t               sn_migrate;
+extern ia64_mv_setup_msi_irq_t         sn_setup_msi_irq;
+extern ia64_mv_teardown_msi_irq_t      sn_teardown_msi_irq;
+#endif
+
+
+/*
+ * This stuff has dual use!
+ *
+ * For a generic kernel, the macros are used to initialize the
+ * platform's machvec structure.  When compiling a non-generic kernel,
+ * the macros are used directly.
+ */
+#define platform_name                  "sn2"
+#define platform_setup                 sn_setup
+#define platform_cpu_init              sn_cpu_init
+#define platform_irq_init              sn_irq_init
+#define platform_send_ipi              sn2_send_IPI
+#ifndef XEN
+#define platform_timer_interrupt       sn_timer_interrupt
+#endif
+#define platform_global_tlb_purge       sn2_global_tlb_purge
+#ifndef XEN
+#define platform_tlb_migrate_finish    sn_tlb_migrate_finish
+#endif
+#define platform_pci_fixup             sn_pci_fixup
+#define platform_inb                   __sn_inb
+#define platform_inw                   __sn_inw
+#define platform_inl                   __sn_inl
+#define platform_outb                  __sn_outb
+#define platform_outw                  __sn_outw
+#define platform_outl                  __sn_outl
+#define platform_mmiowb                        __sn_mmiowb
+#define platform_readb                 __sn_readb
+#define platform_readw                 __sn_readw
+#define platform_readl                 __sn_readl
+#define platform_readq                 __sn_readq
+#define platform_readb_relaxed         __sn_readb_relaxed
+#define platform_readw_relaxed         __sn_readw_relaxed
+#define platform_readl_relaxed         __sn_readl_relaxed
+#define platform_readq_relaxed         __sn_readq_relaxed
+#define platform_local_vector_to_irq   sn_local_vector_to_irq
+#ifdef XEN
+#define platform_pci_get_legacy_mem    machvec_noop_pci_get_legacy_mem
+#define platform_pci_legacy_read       machvec_noop_pci_legacy_read
+#define platform_pci_legacy_write      machvec_noop_pci_legacy_write
+#else
+#define platform_pci_get_legacy_mem    sn_pci_get_legacy_mem
+#define platform_pci_legacy_read       sn_pci_legacy_read
+#define platform_pci_legacy_write      sn_pci_legacy_write
+#endif
+#define platform_dma_init              machvec_noop
+#ifdef XEN
+#define platform_dma_alloc_coherent    machvec_noop_dma_alloc_coherent
+#define platform_dma_free_coherent     machvec_noop_dma_free_coherent
+#define platform_dma_map_single                machvec_noop_dma_map_single
+#define platform_dma_unmap_single      machvec_noop_dma_unmap_single
+#define platform_dma_map_sg            machvec_noop_dma_map_sg
+#define platform_dma_unmap_sg          machvec_noop_dma_unmap_sg
+#define platform_dma_sync_single_for_cpu       \
+       machvec_noop_dma_sync_single_for_cpu
+#define platform_dma_sync_sg_for_cpu           \
+       machvec_noop_dma_sync_sg_for_cpu
+#define platform_dma_sync_single_for_device    \
+       machvec_noop_dma_sync_single_for_device
+#define platform_dma_sync_sg_for_device        
machvec_noop_dma_sync_sg_for_device
+#define platform_dma_mapping_error     machvec_noop_dma_mapping_error
+#define platform_dma_supported         machvec_noop_dma_supported
+#else
+#define platform_dma_alloc_coherent    sn_dma_alloc_coherent
+#define platform_dma_free_coherent     sn_dma_free_coherent
+#define platform_dma_map_single                sn_dma_map_single
+#define platform_dma_unmap_single      sn_dma_unmap_single
+#define platform_dma_map_sg            sn_dma_map_sg
+#define platform_dma_unmap_sg          sn_dma_unmap_sg
+#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
+#define platform_dma_sync_sg_for_cpu   sn_dma_sync_sg_for_cpu
+#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
+#define platform_dma_sync_sg_for_device        sn_dma_sync_sg_for_device
+#define platform_dma_mapping_error             sn_dma_mapping_error
+#define platform_dma_supported         sn_dma_supported
+#define platform_migrate               sn_migrate
+#endif
+
+#ifndef XEN
+#ifdef CONFIG_PCI_MSI
+#define platform_setup_msi_irq         sn_setup_msi_irq
+#define platform_teardown_msi_irq      sn_teardown_msi_irq
+#else
+#define platform_setup_msi_irq         ((ia64_mv_setup_msi_irq_t*)NULL)
+#define platform_teardown_msi_irq      ((ia64_mv_teardown_msi_irq_t*)NULL)
+#endif
+#endif
+
+#include <asm/sn/io.h>
+
+#endif /* _ASM_IA64_MACHVEC_SN2_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/linux-xen/asm/page.h
--- a/xen/include/asm-ia64/linux-xen/asm/page.h Wed Dec 20 08:53:42 2006 -0700
+++ b/xen/include/asm-ia64/linux-xen/asm/page.h Wed Dec 20 14:55:02 2006 -0700
@@ -11,6 +11,16 @@
 
 #include <asm/intrinsics.h>
 #include <asm/types.h>
+
+#ifdef XEN  /* This will go away with newer upstream */
+#define RGN_SHIFT      61
+#define RGN_BASE(r)    (r << RGN_SHIFT)
+#define RGN_BITS       RGN_BASE(-1)
+#define RGN_HPAGE      REGION_HPAGE
+#ifndef CONFIG_HUGETLB_PAGE
+# define REGION_HPAGE  (4UL)
+#endif
+#endif
 
 /*
  * PAGE_SHIFT determines the actual kernel page size.
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/linux-xen/asm/pci.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/pci.h  Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,185 @@
+#ifndef _ASM_IA64_PCI_H
+#define _ASM_IA64_PCI_H
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#ifdef XEN
+#include <linux/ioport.h>
+#endif
+
+#include <asm/io.h>
+#ifndef XEN
+#include <asm/scatterlist.h>
+#endif
+
+/*
+ * Can be used to override the logic in pci_scan_bus for skipping 
already-configured bus
+ * numbers - to be used for buggy BIOSes or architectures with incomplete PCI 
setup by the
+ * loader.
+ */
+#define pcibios_assign_all_busses()     0
+#define pcibios_scan_all_fns(a, b)     0
+
+#define PCIBIOS_MIN_IO         0x1000
+#define PCIBIOS_MIN_MEM                0x10000000
+
+void pcibios_config_init(void);
+
+struct pci_dev;
+
+/*
+ * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct 
correspondence
+ * between device bus addresses and CPU physical addresses.  Platforms with a 
hardware I/O
+ * MMU _must_ turn this off to suppress the bounce buffer handling code in the 
block and
+ * network device layers.  Platforms with separate bus address spaces _must_ 
turn this off
+ * and provide a device DMA mapping implementation that takes care of the 
necessary
+ * address translation.
+ *
+ * For now, the ia64 platforms which may have separate/multiple bus address 
spaces all
+ * have I/O MMUs which support the merging of physically discontiguous 
buffers, so we can
+ * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS.
+ */
+extern unsigned long ia64_max_iommu_merge_mask;
+#define PCI_DMA_BUS_IS_PHYS    (ia64_max_iommu_merge_mask == ~0UL)
+
+static inline void
+pcibios_set_master (struct pci_dev *dev)
+{
+       /* No special bus mastering setup handling */
+}
+
+static inline void
+pcibios_penalize_isa_irq (int irq, int active)
+{
+       /* We don't do dynamic PCI IRQ allocation */
+}
+
+#define HAVE_ARCH_PCI_MWI 1
+extern int pcibios_prep_mwi (struct pci_dev *);
+
+#ifndef XEN
+#include <asm-generic/pci-dma-compat.h>
+#endif
+
+/* pci_unmap_{single,page} is not a nop, thus... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)      \
+       dma_addr_t ADDR_NAME;
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)                \
+       __u32 LEN_NAME;
+#define pci_unmap_addr(PTR, ADDR_NAME)                 \
+       ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)                \
+       (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME)                   \
+       ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)          \
+       (((PTR)->LEN_NAME) = (VAL))
+
+/* The ia64 platform always supports 64-bit addressing. */
+#define pci_dac_dma_supported(pci_dev, mask)           (1)
+#define pci_dac_page_to_dma(dev,pg,off,dir)            ((dma_addr_t) 
page_to_bus(pg) + (off))
+#define pci_dac_dma_to_page(dev,dma_addr)              
(virt_to_page(bus_to_virt(dma_addr)))
+#define pci_dac_dma_to_offset(dev,dma_addr)            offset_in_page(dma_addr)
+#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir)  do { } while (0)
+#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir)       do { 
mb(); } while (0)
+
+#define sg_dma_len(sg)         ((sg)->dma_length)
+#define sg_dma_address(sg)     ((sg)->dma_address)
+
+#ifdef CONFIG_PCI
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+                                       enum pci_dma_burst_strategy *strat,
+                                       unsigned long *strategy_parameter)
+{
+       unsigned long cacheline_size;
+       u8 byte;
+
+       pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
+       if (byte == 0)
+               cacheline_size = 1024;
+       else
+               cacheline_size = (int) byte * 4;
+
+       *strat = PCI_DMA_BURST_MULTIPLE;
+       *strategy_parameter = cacheline_size;
+}
+#endif
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct 
*vma,
+                               enum pci_mmap_state mmap_state, int 
write_combine);
+#define HAVE_PCI_LEGACY
+extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
+                                     struct vm_area_struct *vma);
+#ifndef XEN
+extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
+                                 size_t count);
+extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
+                                  size_t count);
+extern int pci_mmap_legacy_mem(struct kobject *kobj,
+                              struct bin_attribute *attr,
+                              struct vm_area_struct *vma);
+#endif
+
+#define pci_get_legacy_mem platform_pci_get_legacy_mem
+#define pci_legacy_read platform_pci_legacy_read
+#define pci_legacy_write platform_pci_legacy_write
+
+struct pci_window {
+       struct resource resource;
+       u64 offset;
+};
+
+struct pci_controller {
+       void *acpi_handle;
+       void *iommu;
+       int segment;
+       int node;               /* nearest node with memory or -1 for global 
allocation */
+
+       unsigned int windows;
+       struct pci_window *window;
+
+       void *platform_data;
+};
+
+#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
+#define pci_domain_nr(busdev)    (PCI_CONTROLLER(busdev)->segment)
+
+extern struct pci_ops pci_root_ops;
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+       return (pci_domain_nr(bus) != 0);
+}
+
+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
+{
+}
+
+extern void pcibios_resource_to_bus(struct pci_dev *dev,
+               struct pci_bus_region *region, struct resource *res);
+
+extern void pcibios_bus_to_resource(struct pci_dev *dev,
+               struct resource *res, struct pci_bus_region *region);
+
+#ifndef XEN
+static inline struct resource *
+pcibios_select_root(struct pci_dev *pdev, struct resource *res)
+{
+       struct resource *root = NULL;
+
+       if (res->flags & IORESOURCE_IO)
+               root = &ioport_resource;
+       if (res->flags & IORESOURCE_MEM)
+               root = &iomem_resource;
+
+       return root;
+}
+#endif
+
+#define pcibios_scan_all_fns(a, b)     0
+
+#endif /* _ASM_IA64_PCI_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/sn/README.origin
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/README.origin       Wed Dec 20 
14:55:02 2006 -0700
@@ -0,0 +1,16 @@
+# Source files in this directory are near-identical copies of linux-2.6.19
+# files:
+
+# NOTE: ALL changes to these files should be clearly marked
+# (e.g. with #ifdef XEN or XEN in a comment) so that they can be
+# easily updated to future versions of the corresponding Linux files.
+
+addrs.h                        -> linux/include/asm-ia64/sn/addrs.h
+arch.h                 -> linux/include/asm-ia64/sn/arch.h
+hubdev.h               -> linux/arch/ia64/sn/include/xtalk/hubdev.h
+intr.h                 -> linux/include/asm-ia64/sn/intr.h
+io.h                   -> linux/include/asm-ia64/sn/io.h
+nodepda.h              -> linux/include/asm-ia64/sn/nodepda.h
+pcibr_provider.h       -> linux/include/asm-ia64/sn/pcibr_provider.h
+rw_mmr.h               -> linux/include/asm-ia64/sn/rw_mmr.h
+types.h                        -> linux/include/asm-ia64/sn/types.h
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/sn/addrs.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/addrs.h     Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,299 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1992-1999,2001-2005 Silicon Graphics, Inc. All rights 
reserved.
+ */
+
+#ifndef _ASM_IA64_SN_ADDRS_H
+#define _ASM_IA64_SN_ADDRS_H
+
+#include <asm/percpu.h>
+#include <asm/sn/types.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/pda.h>
+
+/*
+ *  Memory/SHUB Address Format:
+ *  +-+---------+--+--------------+
+ *  |0|  NASID  |AS| NodeOffset   |
+ *  +-+---------+--+--------------+
+ *
+ *  NASID: (low NASID bit is 0) Memory and SHUB MMRs
+ *   AS: 2-bit Address Space Identifier. Used only if low NASID bit is 0
+ *     00: Local Resources and MMR space
+ *           Top bit of NodeOffset
+ *               0: Local resources space
+ *                  node id:
+ *                        0: IA64/NT compatibility space
+ *                        2: Local MMR Space
+ *                        4: Local memory, regardless of local node id
+ *               1: Global MMR space
+ *     01: GET space.
+ *     10: AMO space.
+ *     11: Cacheable memory space.
+ *
+ *   NodeOffset: byte offset
+ *
+ *
+ *  TIO address format:
+ *  +-+----------+--+--------------+
+ *  |0|  NASID   |AS| Nodeoffset   |
+ *  +-+----------+--+--------------+
+ *
+ *  NASID: (low NASID bit is 1) TIO
+ *   AS: 2-bit Chiplet Identifier
+ *     00: TIO LB (Indicates TIO MMR access.)
+ *     01: TIO ICE (indicates coretalk space access.)
+ * 
+ *   NodeOffset: top bit must be set.
+ *
+ *
+ * Note that in both of the above address formats, the low
+ * NASID bit indicates if the reference is to the SHUB or TIO MMRs.
+ */
+
+
+/*
+ * Define basic shift & mask constants for manipulating NASIDs and AS values.
+ */
+#define NASID_BITMASK          (sn_hub_info->nasid_bitmask)
+#define NASID_SHIFT            (sn_hub_info->nasid_shift)
+#define AS_SHIFT               (sn_hub_info->as_shift)
+#define AS_BITMASK             0x3UL
+
+#define NASID_MASK              ((u64)NASID_BITMASK << NASID_SHIFT)
+#define AS_MASK                        ((u64)AS_BITMASK << AS_SHIFT)
+
+
+/*
+ * AS values. These are the same on both SHUB1 & SHUB2.
+ */
+#define AS_GET_VAL             1UL
+#define AS_AMO_VAL             2UL
+#define AS_CAC_VAL             3UL
+#define AS_GET_SPACE           (AS_GET_VAL << AS_SHIFT)
+#define AS_AMO_SPACE           (AS_AMO_VAL << AS_SHIFT)
+#define AS_CAC_SPACE           (AS_CAC_VAL << AS_SHIFT)
+
+
+/* 
+ * Virtual Mode Local & Global MMR space.  
+ */
+#define SH1_LOCAL_MMR_OFFSET   0x8000000000UL
+#define SH2_LOCAL_MMR_OFFSET   0x0200000000UL
+#define LOCAL_MMR_OFFSET       (is_shub2() ? SH2_LOCAL_MMR_OFFSET : 
SH1_LOCAL_MMR_OFFSET)
+#define LOCAL_MMR_SPACE                (__IA64_UNCACHED_OFFSET | 
LOCAL_MMR_OFFSET)
+#define LOCAL_PHYS_MMR_SPACE   (RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET)
+
+#define SH1_GLOBAL_MMR_OFFSET  0x0800000000UL
+#define SH2_GLOBAL_MMR_OFFSET  0x0300000000UL
+#define GLOBAL_MMR_OFFSET      (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : 
SH1_GLOBAL_MMR_OFFSET)
+#define GLOBAL_MMR_SPACE       (__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET)
+
+/*
+ * Physical mode addresses
+ */
+#define GLOBAL_PHYS_MMR_SPACE  (RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET)
+
+
+/*
+ * Clear region & AS bits.
+ */
+#define TO_PHYS_MASK           (~(RGN_BITS | AS_MASK))
+
+
+/*
+ * Misc NASID manipulation.
+ */
+#define NASID_SPACE(n)         ((u64)(n) << NASID_SHIFT)
+#define REMOTE_ADDR(n,a)       (NASID_SPACE(n) | (a))
+#define NODE_OFFSET(x)         ((x) & (NODE_ADDRSPACE_SIZE - 1))
+#define NODE_ADDRSPACE_SIZE     (1UL << AS_SHIFT)
+#define NASID_GET(x)           (int) (((u64) (x) >> NASID_SHIFT) & 
NASID_BITMASK)
+#define LOCAL_MMR_ADDR(a)      (LOCAL_MMR_SPACE | (a))
+#define GLOBAL_MMR_ADDR(n,a)   (GLOBAL_MMR_SPACE | REMOTE_ADDR(n,a))
+#define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a))
+#define GLOBAL_CAC_ADDR(n,a)   (CAC_BASE | REMOTE_ADDR(n,a))
+#define CHANGE_NASID(n,x)      ((void *)(((u64)(x) & ~NASID_MASK) | 
NASID_SPACE(n)))
+#define IS_TIO_NASID(n)                ((n) & 1)
+
+
+/* non-II mmr's start at top of big window space (4G) */
+#define BWIN_TOP               0x0000000100000000UL
+
+/*
+ * general address defines
+ */
+#define CAC_BASE               (PAGE_OFFSET | AS_CAC_SPACE)
+#define AMO_BASE               (__IA64_UNCACHED_OFFSET | AS_AMO_SPACE)
+#define AMO_PHYS_BASE          (RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE)
+#define GET_BASE               (PAGE_OFFSET | AS_GET_SPACE)
+
+/*
+ * Convert Memory addresses between various addressing modes.
+ */
+#define TO_PHYS(x)             (TO_PHYS_MASK & (x))
+#define TO_CAC(x)              (CAC_BASE     | TO_PHYS(x))
+#if defined(CONFIG_SGI_SN) || defined(XEN)
+#define TO_AMO(x)              (AMO_BASE     | TO_PHYS(x))
+#define TO_GET(x)              (GET_BASE     | TO_PHYS(x))
+#else
+#define TO_AMO(x)              ({ BUG(); x; })
+#define TO_GET(x)              ({ BUG(); x; })
+#endif
+
+/*
+ * Covert from processor physical address to II/TIO physical address:
+ *     II - squeeze out the AS bits
+ *     TIO- requires a chiplet id in bits 38-39.  For DMA to memory,
+ *           the chiplet id is zero.  If we implement TIO-TIO dma, we might 
need
+ *           to insert a chiplet id into this macro.  However, it is our belief
+ *           right now that this chiplet id will be ICE, which is also zero.
+ */
+#define SH1_TIO_PHYS_TO_DMA(x)                                                 
\
+       ((((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x))
+
+#define SH2_NETWORK_BANK_OFFSET(x)                                     \
+        ((u64)(x) & ((1UL << (sn_hub_info->nasid_shift - 4)) -1))
+
+#define SH2_NETWORK_BANK_SELECT(x)                                     \
+        ((((u64)(x) & (0x3UL << (sn_hub_info->nasid_shift - 4)))       \
+               >> (sn_hub_info->nasid_shift - 4)) << 36)
+
+#define SH2_NETWORK_ADDRESS(x)                                                 
\
+       (SH2_NETWORK_BANK_OFFSET(x) | SH2_NETWORK_BANK_SELECT(x))
+
+#define SH2_TIO_PHYS_TO_DMA(x)                                                 
\
+        (((u64)(NASID_GET(x)) << 40) |         SH2_NETWORK_ADDRESS(x))
+
+#define PHYS_TO_TIODMA(x)                                              \
+       (is_shub1() ? SH1_TIO_PHYS_TO_DMA(x) : SH2_TIO_PHYS_TO_DMA(x))
+
+#define PHYS_TO_DMA(x)                                                 \
+       ((((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
+
+
+/*
+ * Macros to test for address type.
+ */
+#define IS_AMO_ADDRESS(x)      (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE)
+#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == 
AMO_PHYS_BASE)
+
+
+/*
+ * The following definitions pertain to the IO special address
+ * space.  They define the location of the big and little windows
+ * of any given node.
+ */
+#define BWIN_SIZE_BITS                 29      /* big window size: 512M */
+#define TIO_BWIN_SIZE_BITS             30      /* big window size: 1G */
+#define NODE_SWIN_BASE(n, w)           ((w == 0) ? NODE_BWIN_BASE((n), 
SWIN0_BIGWIN) \
+               : RAW_NODE_SWIN_BASE(n, w))
+#define TIO_SWIN_BASE(n, w)            (TIO_IO_BASE(n) + \
+                                           ((u64) (w) << TIO_SWIN_SIZE_BITS))
+#define NODE_IO_BASE(n)                        (GLOBAL_MMR_SPACE | 
NASID_SPACE(n))
+#define TIO_IO_BASE(n)                  (__IA64_UNCACHED_OFFSET | 
NASID_SPACE(n))
+#define BWIN_SIZE                      (1UL << BWIN_SIZE_BITS)
+#define NODE_BWIN_BASE0(n)             (NODE_IO_BASE(n) + BWIN_SIZE)
+#define NODE_BWIN_BASE(n, w)           (NODE_BWIN_BASE0(n) + ((u64) (w) << 
BWIN_SIZE_BITS))
+#define RAW_NODE_SWIN_BASE(n, w)       (NODE_IO_BASE(n) + ((u64) (w) << 
SWIN_SIZE_BITS))
+#define BWIN_WIDGET_MASK               0x7
+#define BWIN_WINDOWNUM(x)              (((x) >> BWIN_SIZE_BITS) & 
BWIN_WIDGET_MASK)
+#define SH1_IS_BIG_WINDOW_ADDR(x)      ((x) & BWIN_TOP)
+
+#define TIO_BWIN_WINDOW_SELECT_MASK    0x7
+#define TIO_BWIN_WINDOWNUM(x)          (((x) >> TIO_BWIN_SIZE_BITS) & 
TIO_BWIN_WINDOW_SELECT_MASK)
+
+#define TIO_HWIN_SHIFT_BITS            33
+#define TIO_HWIN(x)                    (NODE_OFFSET(x) >> TIO_HWIN_SHIFT_BITS)
+
+/*
+ * The following definitions pertain to the IO special address
+ * space.  They define the location of the big and little windows
+ * of any given node.
+ */
+
+#define SWIN_SIZE_BITS                 24
+#define        SWIN_WIDGET_MASK                0xF
+
+#define TIO_SWIN_SIZE_BITS             28
+#define TIO_SWIN_SIZE                  (1UL << TIO_SWIN_SIZE_BITS)
+#define TIO_SWIN_WIDGET_MASK           0x3
+
+/*
+ * Convert smallwindow address to xtalk address.
+ *
+ * 'addr' can be physical or virtual address, but will be converted
+ * to Xtalk address in the range 0 -> SWINZ_SIZEMASK
+ */
+#define        SWIN_WIDGETNUM(x)               (((x)  >> SWIN_SIZE_BITS) & 
SWIN_WIDGET_MASK)
+#define TIO_SWIN_WIDGETNUM(x)          (((x)  >> TIO_SWIN_SIZE_BITS) & 
TIO_SWIN_WIDGET_MASK)
+
+
+/*
+ * The following macros produce the correct base virtual address for
+ * the hub registers. The REMOTE_HUB_* macro produce
+ * the address for the specified hub's registers.  The intent is
+ * that the appropriate PI, MD, NI, or II register would be substituted
+ * for x.
+ *
+ *   WARNING:
+ *     When certain Hub chip workaround are defined, it's not sufficient
+ *     to dereference the *_HUB_ADDR() macros.  You should instead use
+ *     HUB_L() and HUB_S() if you must deal with pointers to hub registers.
+ *     Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
+ *     They're always safe.
+ */
+/* Shub1 TIO & MMR addressing macros */
+#define SH1_TIO_IOSPACE_ADDR(n,x)                                      \
+       GLOBAL_MMR_ADDR(n,x)
+
+#define SH1_REMOTE_BWIN_MMR(n,x)                                       \
+       GLOBAL_MMR_ADDR(n,x)
+
+#define SH1_REMOTE_SWIN_MMR(n,x)                                       \
+       (NODE_SWIN_BASE(n,1) + 0x800000UL + (x))
+
+#define SH1_REMOTE_MMR(n,x)                                            \
+       (SH1_IS_BIG_WINDOW_ADDR(x) ? SH1_REMOTE_BWIN_MMR(n,x) :         \
+               SH1_REMOTE_SWIN_MMR(n,x))
+
+/* Shub1 TIO & MMR addressing macros */
+#define SH2_TIO_IOSPACE_ADDR(n,x)                                      \
+       ((__IA64_UNCACHED_OFFSET | REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2)))
+
+#define SH2_REMOTE_MMR(n,x)                                            \
+       GLOBAL_MMR_ADDR(n,x)
+
+
+/* TIO & MMR addressing macros that work on both shub1 & shub2 */
+#define TIO_IOSPACE_ADDR(n,x)                                          \
+       ((u64 *)(is_shub1() ? SH1_TIO_IOSPACE_ADDR(n,x) :               \
+                SH2_TIO_IOSPACE_ADDR(n,x)))
+
+#define SH_REMOTE_MMR(n,x)                                             \
+       (is_shub1() ? SH1_REMOTE_MMR(n,x) : SH2_REMOTE_MMR(n,x))
+
+#define REMOTE_HUB_ADDR(n,x)                                           \
+       (IS_TIO_NASID(n) ?  ((volatile u64*)TIO_IOSPACE_ADDR(n,x)) :    \
+        ((volatile u64*)SH_REMOTE_MMR(n,x)))
+
+
+#define HUB_L(x)                       (*((volatile typeof(*x) *)x))
+#define        HUB_S(x,d)                      (*((volatile typeof(*x) *)x) = 
(d))
+
+#define REMOTE_HUB_L(n, a)             HUB_L(REMOTE_HUB_ADDR((n), (a)))
+#define REMOTE_HUB_S(n, a, d)          HUB_S(REMOTE_HUB_ADDR((n), (a)), (d))
+
+/*
+ * Coretalk address breakdown
+ */
+#define CTALK_NASID_SHFT               40
+#define CTALK_NASID_MASK               (0x3FFFULL << CTALK_NASID_SHFT)
+#define CTALK_CID_SHFT                 38
+#define CTALK_CID_MASK                 (0x3ULL << CTALK_CID_SHFT)
+#define CTALK_NODE_OFFSET              0x3FFFFFFFFF
+
+#endif /* _ASM_IA64_SN_ADDRS_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/sn/arch.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/arch.h      Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,92 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI specific setup.
+ *
+ * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc.  All rights 
reserved.
+ * Copyright (C) 1999 Ralf Baechle (ralf@xxxxxxx)
+ */
+#ifndef _ASM_IA64_SN_ARCH_H
+#define _ASM_IA64_SN_ARCH_H
+
+#ifndef XEN
+#include <linux/numa.h>
+#include <asm/types.h>
+#include <asm/percpu.h>
+#include <asm/sn/types.h>
+#endif
+#include <asm/sn/sn_cpuid.h>
+
+/*
+ * This is the maximum number of NUMALINK nodes that can be part of a single
+ * SSI kernel. This number includes C-brick, M-bricks, and TIOs. Nodes in
+ * remote partitions are NOT included in this number.
+ * The number of compact nodes cannot exceed size of a coherency domain.
+ * The purpose of this define is to specify a node count that includes
+ * all C/M/TIO nodes in an SSI system.
+ *
+ * SGI system can currently support up to 256 C/M nodes plus additional TIO 
nodes.
+ *
+ *     Note: ACPI20 has an architectural limit of 256 nodes. When we upgrade
+ *     to ACPI3.0, this limit will be removed. The notion of "compact nodes"
+ *     should be deleted and TIOs should be included in MAX_NUMNODES.
+ */
+#define MAX_TIO_NODES          MAX_NUMNODES
+#define MAX_COMPACT_NODES      (MAX_NUMNODES + MAX_TIO_NODES)
+
+/*
+ * Maximum number of nodes in all partitions and in all coherency domains.
+ * This is the total number of nodes accessible in the numalink fabric. It
+ * includes all C & M bricks, plus all TIOs.
+ *
+ * This value is also the value of the maximum number of NASIDs in the numalink
+ * fabric.
+ */
+#define MAX_NUMALINK_NODES     16384
+
+/*
+ * The following defines attributes of the HUB chip. These attributes are
+ * frequently referenced. They are kept in the per-cpu data areas of each cpu.
+ * They are kept together in a struct to minimize cache misses.
+ */
+struct sn_hub_info_s {
+       u8 shub2;
+       u8 nasid_shift;
+       u8 as_shift;
+       u8 shub_1_1_found;
+       u16 nasid_bitmask;
+};
+DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
+#define sn_hub_info    (&__get_cpu_var(__sn_hub_info))
+#ifndef XEN
+#define is_shub2()     (sn_hub_info->shub2)
+#define is_shub1()     (sn_hub_info->shub2 == 0)
+#else
+#define is_shub2()     0
+#define is_shub1()     1
+#endif
+
+/*
+ * Use this macro to test if shub 1.1 wars should be enabled
+ */
+#define enable_shub_wars_1_1() (sn_hub_info->shub_1_1_found)
+
+
+/*
+ * Compact node ID to nasid mappings kept in the per-cpu data areas of each
+ * cpu.
+ */
+DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
+#define sn_cnodeid_to_nasid    (&__get_cpu_var(__sn_cnodeid_to_nasid[0]))
+
+#ifndef XEN
+extern u8 sn_partition_id;
+extern u8 sn_system_size;
+extern u8 sn_sharing_domain_size;
+extern u8 sn_region_size;
+
+extern void sn_flush_all_caches(long addr, long bytes);
+#endif
+#endif /* _ASM_IA64_SN_ARCH_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h    Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,95 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights 
reserved.
+ */
+#ifndef _ASM_IA64_SN_XTALK_HUBDEV_H
+#define _ASM_IA64_SN_XTALK_HUBDEV_H
+
+#ifndef XEN
+#include "xtalk/xwidgetdev.h"
+#else
+#include <asm/sn/xwidgetdev.h>
+#endif
+
+#define HUB_WIDGET_ID_MAX 0xf
+#define DEV_PER_WIDGET (2*2*8)
+#define IIO_ITTE_WIDGET_BITS    4       /* size of widget field */
+#define IIO_ITTE_WIDGET_MASK    ((1<<IIO_ITTE_WIDGET_BITS)-1)
+#define IIO_ITTE_WIDGET_SHIFT   8
+
+#define IIO_ITTE_WIDGET(itte)  \
+       (((itte) >> IIO_ITTE_WIDGET_SHIFT) & IIO_ITTE_WIDGET_MASK)
+
+/*
+ * Use the top big window as a surrogate for the first small window
+ */
+#define SWIN0_BIGWIN            HUB_NUM_BIG_WINDOW
+#define IIO_NUM_ITTES   7
+#define HUB_NUM_BIG_WINDOW      (IIO_NUM_ITTES - 1)
+
+/* This struct is shared between the PROM and the kernel.
+ * Changes to this struct will require corresponding changes to the kernel.
+ */
+struct sn_flush_device_common {
+       int sfdl_bus;
+       int sfdl_slot;
+       int sfdl_pin;
+       struct common_bar_list {
+               unsigned long start;
+               unsigned long end;
+       } sfdl_bar_list[6];
+       unsigned long sfdl_force_int_addr;
+       unsigned long sfdl_flush_value;
+       volatile unsigned long *sfdl_flush_addr;
+       u32 sfdl_persistent_busnum;
+       u32 sfdl_persistent_segment;
+       struct pcibus_info *sfdl_pcibus_info;
+};
+
+/* This struct is kernel only and is not used by the PROM */
+struct sn_flush_device_kernel {
+       spinlock_t sfdl_flush_lock;
+       struct sn_flush_device_common *common;
+};
+
+/* 01/16/06 This struct is the old PROM/kernel struct and needs to be included
+ * for older official PROMs to function on the new kernel base.  This struct
+ * will be removed when the next official PROM release occurs. */
+
+struct sn_flush_device_war {
+       struct sn_flush_device_common common;
+       u32 filler; /* older PROMs expect the default size of a spinlock_t */
+};
+
+/*
+ * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel.
+ */
+struct sn_flush_nasid_entry  {
+       struct sn_flush_device_kernel **widget_p; // Used as an array of wid_num
+       u64 iio_itte[8];
+};
+
+struct hubdev_info {
+       geoid_t                         hdi_geoid;
+       short                           hdi_nasid;
+       short                           hdi_peer_nasid;   /* Dual Porting Peer 
*/
+
+       struct sn_flush_nasid_entry     hdi_flush_nasid_list;
+       struct xwidget_info             hdi_xwidget_info[HUB_WIDGET_ID_MAX + 1];
+
+
+       void                            *hdi_nodepda;
+       void                            *hdi_node_vertex;
+       u32                             max_segment_number;
+       u32                             max_pcibus_number;
+};
+
+extern void hubdev_init_node(nodepda_t *, cnodeid_t);
+extern void hub_error_init(struct hubdev_info *);
+extern void ice_error_init(struct hubdev_info *);
+
+
+#endif /* _ASM_IA64_SN_XTALK_HUBDEV_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/sn/intr.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/intr.h      Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,73 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights 
reserved.
+ */
+
+#ifndef _ASM_IA64_SN_INTR_H
+#define _ASM_IA64_SN_INTR_H
+
+#ifndef XEN
+#include <linux/rcupdate.h>
+#else
+#include <linux/list.h>
+#endif
+#include <asm/sn/types.h>
+
+#define SGI_UART_VECTOR                0xe9
+
+/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
+#define SGI_XPC_ACTIVATE       0x30
+#define SGI_II_ERROR           0x31
+#define SGI_XBOW_ERROR         0x32
+#define SGI_PCIASIC_ERROR      0x33
+#define SGI_ACPI_SCI_INT       0x34
+#define SGI_TIOCA_ERROR                0x35
+#define SGI_TIO_ERROR          0x36
+#define SGI_TIOCX_ERROR                0x37
+#define SGI_MMTIMER_VECTOR     0x38
+#define SGI_XPC_NOTIFY         0xe7
+
+#define IA64_SN2_FIRST_DEVICE_VECTOR   0x3c
+#define IA64_SN2_LAST_DEVICE_VECTOR    0xe6
+
+#define SN2_IRQ_RESERVED       0x1
+#define SN2_IRQ_CONNECTED      0x2
+#define SN2_IRQ_SHARED         0x4
+
+// The SN PROM irq struct
+struct sn_irq_info {
+       struct sn_irq_info *irq_next;   /* deprecated DO NOT USE     */
+       short           irq_nasid;      /* Nasid IRQ is assigned to  */
+       int             irq_slice;      /* slice IRQ is assigned to  */
+       int             irq_cpuid;      /* kernel logical cpuid      */
+       int             irq_irq;        /* the IRQ number */
+       int             irq_int_bit;    /* Bridge interrupt pin */
+                                       /* <0 means MSI */
+       u64     irq_xtalkaddr;  /* xtalkaddr IRQ is sent to  */
+       int             irq_bridge_type;/* pciio asic type (pciio.h) */
+       void           *irq_bridge;     /* bridge generating irq     */
+       void           *irq_pciioinfo;  /* associated pciio_info_t   */
+       int             irq_last_intr;  /* For Shub lb lost intr WAR */
+       int             irq_cookie;     /* unique cookie             */
+       int             irq_flags;      /* flags */
+       int             irq_share_cnt;  /* num devices sharing IRQ   */
+       struct list_head        list;   /* list of sn_irq_info structs */
+#ifndef XEN
+       struct rcu_head         rcu;    /* rcu callback list */
+#endif
+};
+
+extern void sn_send_IPI_phys(int, long, int, int);
+extern u64 sn_intr_alloc(nasid_t, int,
+                             struct sn_irq_info *,
+                             int, nasid_t, int);
+extern void sn_intr_free(nasid_t, int, struct sn_irq_info *);
+extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, 
int);
+extern struct list_head **sn_irq_lh;
+
+#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
+
+#endif /* _ASM_IA64_SN_INTR_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/linux-xen/asm/sn/io.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/io.h        Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,281 @@
+/* 
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_SN_IO_H
+#define _ASM_SN_IO_H
+#include <linux/compiler.h>
+#include <asm/intrinsics.h>
+
+extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward 
definition */
+extern void __sn_mmiowb(void); /* Forward definition */
+
+extern int num_cnodes;
+
+#define __sn_mf_a()   ia64_mfa()
+
+#ifdef XEN
+/*
+ * Xen doesn't deal with any PIC devices directly, it's all handled in dom0
+ */
+#define sn_dma_flush(foo)              do {} while(0)
+#else
+extern void sn_dma_flush(unsigned long);
+#endif
+
+#define __sn_inb ___sn_inb
+#define __sn_inw ___sn_inw
+#define __sn_inl ___sn_inl
+#define __sn_outb ___sn_outb
+#define __sn_outw ___sn_outw
+#define __sn_outl ___sn_outl
+#define __sn_readb ___sn_readb
+#define __sn_readw ___sn_readw
+#define __sn_readl ___sn_readl
+#define __sn_readq ___sn_readq
+#define __sn_readb_relaxed ___sn_readb_relaxed
+#define __sn_readw_relaxed ___sn_readw_relaxed
+#define __sn_readl_relaxed ___sn_readl_relaxed
+#define __sn_readq_relaxed ___sn_readq_relaxed
+
+/*
+ * Convenience macros for setting/clearing bits using the above accessors
+ */
+
+#define __sn_setq_relaxed(addr, val) \
+       writeq((__sn_readq_relaxed(addr) | (val)), (addr))
+#define __sn_clrq_relaxed(addr, val) \
+       writeq((__sn_readq_relaxed(addr) & ~(val)), (addr))
+
+/*
+ * The following routines are SN Platform specific, called when
+ * a reference is made to inX/outX set macros.  SN Platform
+ * inX set of macros ensures that Posted DMA writes on the
+ * Bridge is flushed.
+ *
+ * The routines should be self explainatory.
+ */
+
+static inline unsigned int
+___sn_inb (unsigned long port)
+{
+       volatile unsigned char *addr;
+       unsigned char ret = -1;
+
+       if ((addr = sn_io_addr(port))) {
+               ret = *addr;
+               __sn_mf_a();
+               sn_dma_flush((unsigned long)addr);
+       }
+       return ret;
+}
+
+static inline unsigned int
+___sn_inw (unsigned long port)
+{
+       volatile unsigned short *addr;
+       unsigned short ret = -1;
+
+       if ((addr = sn_io_addr(port))) {
+               ret = *addr;
+               __sn_mf_a();
+               sn_dma_flush((unsigned long)addr);
+       }
+       return ret;
+}
+
+static inline unsigned int
+___sn_inl (unsigned long port)
+{
+       volatile unsigned int *addr;
+       unsigned int ret = -1;
+
+       if ((addr = sn_io_addr(port))) {
+               ret = *addr;
+               __sn_mf_a();
+               sn_dma_flush((unsigned long)addr);
+       }
+       return ret;
+}
+
+static inline void
+___sn_outb (unsigned char val, unsigned long port)
+{
+       volatile unsigned char *addr;
+
+       if ((addr = sn_io_addr(port))) {
+               *addr = val;
+               __sn_mmiowb();
+       }
+}
+
+static inline void
+___sn_outw (unsigned short val, unsigned long port)
+{
+       volatile unsigned short *addr;
+
+       if ((addr = sn_io_addr(port))) {
+               *addr = val;
+               __sn_mmiowb();
+       }
+}
+
+static inline void
+___sn_outl (unsigned int val, unsigned long port)
+{
+       volatile unsigned int *addr;
+
+       if ((addr = sn_io_addr(port))) {
+               *addr = val;
+               __sn_mmiowb();
+       }
+}
+
+/*
+ * The following routines are SN Platform specific, called when 
+ * a reference is made to readX/writeX set macros.  SN Platform 
+ * readX set of macros ensures that Posted DMA writes on the 
+ * Bridge is flushed.
+ * 
+ * The routines should be self explainatory.
+ */
+
+static inline unsigned char
+___sn_readb (const volatile void __iomem *addr)
+{
+       unsigned char val;
+
+       val = *(volatile unsigned char __force *)addr;
+       __sn_mf_a();
+       sn_dma_flush((unsigned long)addr);
+        return val;
+}
+
+static inline unsigned short
+___sn_readw (const volatile void __iomem *addr)
+{
+       unsigned short val;
+
+       val = *(volatile unsigned short __force *)addr;
+       __sn_mf_a();
+       sn_dma_flush((unsigned long)addr);
+        return val;
+}
+
+static inline unsigned int
+___sn_readl (const volatile void __iomem *addr)
+{
+       unsigned int val;
+
+       val = *(volatile unsigned int __force *)addr;
+       __sn_mf_a();
+       sn_dma_flush((unsigned long)addr);
+        return val;
+}
+
+static inline unsigned long
+___sn_readq (const volatile void __iomem *addr)
+{
+       unsigned long val;
+
+       val = *(volatile unsigned long __force *)addr;
+       __sn_mf_a();
+       sn_dma_flush((unsigned long)addr);
+        return val;
+}
+
+/*
+ * For generic and SN2 kernels, we have a set of fast access
+ * PIO macros. These macros are provided on SN Platform
+ * because the normal inX and readX macros perform an
+ * additional task of flushing Post DMA request on the Bridge.
+ *
+ * These routines should be self explainatory.
+ */
+
+static inline unsigned int
+sn_inb_fast (unsigned long port)
+{
+       volatile unsigned char *addr = (unsigned char *)port;
+       unsigned char ret;
+
+       ret = *addr;
+       __sn_mf_a();
+       return ret;
+}
+
+static inline unsigned int
+sn_inw_fast (unsigned long port)
+{
+       volatile unsigned short *addr = (unsigned short *)port;
+       unsigned short ret;
+
+       ret = *addr;
+       __sn_mf_a();
+       return ret;
+}
+
+static inline unsigned int
+sn_inl_fast (unsigned long port)
+{
+       volatile unsigned int *addr = (unsigned int *)port;
+       unsigned int ret;
+
+       ret = *addr;
+       __sn_mf_a();
+       return ret;
+}
+
+static inline unsigned char
+___sn_readb_relaxed (const volatile void __iomem *addr)
+{
+       return *(volatile unsigned char __force *)addr;
+}
+
+static inline unsigned short
+___sn_readw_relaxed (const volatile void __iomem *addr)
+{
+       return *(volatile unsigned short __force *)addr;
+}
+
+static inline unsigned int
+___sn_readl_relaxed (const volatile void __iomem *addr)
+{
+       return *(volatile unsigned int __force *) addr;
+}
+
+static inline unsigned long
+___sn_readq_relaxed (const volatile void __iomem *addr)
+{
+       return *(volatile unsigned long __force *) addr;
+}
+
+struct pci_dev;
+
+static inline int
+sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan)
+{
+
+       if (vchan > 1) {
+               return -1;
+       }
+
+       if (!(*addr >> 32))     /* Using a mask here would be cleaner */
+               return 0;       /* but this generates better code */
+
+       if (vchan == 1) {
+               /* Set Bit 57 */
+               *addr |= (1UL << 57);
+       } else {
+               /* Clear Bit 57 */
+               *addr &= ~(1UL << 57);
+       }
+
+       return 0;
+}
+
+#endif /* _ASM_SN_IO_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h   Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,87 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights 
reserved.
+ */
+#ifndef _ASM_IA64_SN_NODEPDA_H
+#define _ASM_IA64_SN_NODEPDA_H
+
+
+#include <asm/semaphore.h>
+#include <asm/irq.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/intr.h>
+#ifndef XEN
+#include <asm/sn/bte.h>
+#endif
+
+/*
+ * NUMA Node-Specific Data structures are defined in this file.
+ * In particular, this is the location of the node PDA.
+ * A pointer to the right node PDA is saved in each CPU PDA.
+ */
+
+/*
+ * Node-specific data structure.
+ *
+ * One of these structures is allocated on each node of a NUMA system.
+ *
+ * This structure provides a convenient way of keeping together 
+ * all per-node data structures. 
+ */
+struct phys_cpuid {
+       short                   nasid;
+       char                    subnode;
+       char                    slice;
+};
+
+struct nodepda_s {
+       void            *pdinfo;        /* Platform-dependent per-node info */
+
+#ifndef XEN
+       /*
+        * The BTEs on this node are shared by the local cpus
+        */
+       struct bteinfo_s        bte_if[MAX_BTES_PER_NODE];      /* Virtual 
Interface */
+       struct timer_list       bte_recovery_timer;
+       spinlock_t              bte_recovery_lock;
+#endif
+
+       /* 
+        * Array of pointers to the nodepdas for each node.
+        */
+       struct nodepda_s        *pernode_pdaindr[MAX_COMPACT_NODES]; 
+
+       /*
+        * Array of physical cpu identifiers. Indexed by cpuid.
+        */
+       struct phys_cpuid       phys_cpuid[NR_CPUS];
+       spinlock_t              ptc_lock ____cacheline_aligned_in_smp;
+};
+
+typedef struct nodepda_s nodepda_t;
+
+/*
+ * Access Functions for node PDA.
+ * Since there is one nodepda for each node, we need a convenient mechanism
+ * to access these nodepdas without cluttering code with #ifdefs.
+ * The next set of definitions provides this.
+ * Routines are expected to use 
+ *
+ *     sn_nodepda   - to access node PDA for the node on which code is running
+ *     NODEPDA(cnodeid)   - to access node PDA for cnodeid
+ */
+
+DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda);
+#define sn_nodepda             (__get_cpu_var(__sn_nodepda))
+#define        NODEPDA(cnodeid)        (sn_nodepda->pernode_pdaindr[cnodeid])
+
+/*
+ * Check if given a compact node id the corresponding node has all the
+ * cpus disabled. 
+ */
+#define is_headless_node(cnodeid)      (nr_cpus_node(cnodeid) == 0)
+
+#endif /* _ASM_IA64_SN_NODEPDA_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h    Wed Dec 20 
14:55:02 2006 -0700
@@ -0,0 +1,153 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All rights 
reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
+#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
+
+#ifdef XEN
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#endif
+#include <asm/sn/intr.h>
+#include <asm/sn/pcibus_provider_defs.h>
+
+/* Workarounds */
+#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */
+
+#define BUSTYPE_MASK                    0x1
+
+/* Macros given a pcibus structure */
+#define IS_PCIX(ps)     ((ps)->pbi_bridge_mode & BUSTYPE_MASK)
+#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \
+                asic == PCIIO_ASIC_TYPE_TIOCP)
+#define IS_PIC_SOFT(ps)     (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC)
+
+
+/*
+ * The different PCI Bridge types supported on the SGI Altix platforms
+ */
+#define PCIBR_BRIDGETYPE_UNKNOWN       -1
+#define PCIBR_BRIDGETYPE_PIC            2
+#define PCIBR_BRIDGETYPE_TIOCP          3
+
+/*
+ * Bridge 64bit Direct Map Attributes
+ */
+#define PCI64_ATTR_PREF                 (1ull << 59)
+#define PCI64_ATTR_PREC                 (1ull << 58)
+#define PCI64_ATTR_VIRTUAL              (1ull << 57)
+#define PCI64_ATTR_BAR                  (1ull << 56)
+#define PCI64_ATTR_SWAP                 (1ull << 55)
+#define PCI64_ATTR_VIRTUAL1             (1ull << 54)
+
+#define PCI32_LOCAL_BASE                0
+#define PCI32_MAPPED_BASE               0x40000000
+#define PCI32_DIRECT_BASE               0x80000000
+
+#define IS_PCI32_MAPPED(x)              ((u64)(x) < PCI32_DIRECT_BASE && \
+                                         (u64)(x) >= PCI32_MAPPED_BASE)
+#define IS_PCI32_DIRECT(x)              ((u64)(x) >= PCI32_MAPPED_BASE)
+
+
+/*
+ * Bridge PMU Address Transaltion Entry Attibutes
+ */
+#define PCI32_ATE_V                     (0x1 << 0)
+#define PCI32_ATE_CO                    (0x1 << 1)
+#define PCI32_ATE_PREC                  (0x1 << 2)
+#define PCI32_ATE_MSI                   (0x1 << 2)
+#define PCI32_ATE_PREF                  (0x1 << 3)
+#define PCI32_ATE_BAR                   (0x1 << 4)
+#define PCI32_ATE_ADDR_SHFT             12
+
+#define MINIMAL_ATES_REQUIRED(addr, size) \
+       (IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1))
+
+#define MINIMAL_ATE_FLAG(addr, size) \
+       (MINIMAL_ATES_REQUIRED((u64)addr, size) ? 1 : 0)
+
+/* bit 29 of the pci address is the SWAP bit */
+#define ATE_SWAPSHIFT                   29
+#define ATE_SWAP_ON(x)                  ((x) |= (1 << ATE_SWAPSHIFT))
+#define ATE_SWAP_OFF(x)                 ((x) &= ~(1 << ATE_SWAPSHIFT))
+
+/*
+ * I/O page size
+ */
+#if PAGE_SIZE < 16384
+#define IOPFNSHIFT                      12      /* 4K per mapped page */
+#else
+#define IOPFNSHIFT                      14      /* 16K per mapped page */
+#endif
+
+#define IOPGSIZE                        (1 << IOPFNSHIFT)
+#define IOPG(x)                         ((x) >> IOPFNSHIFT)
+#define IOPGOFF(x)                      ((x) & (IOPGSIZE-1))
+
+#define PCIBR_DEV_SWAP_DIR              (1ull << 19)
+#define PCIBR_CTRL_PAGE_SIZE            (0x1 << 21)
+
+/*
+ * PMU resources.
+ */
+struct ate_resource{
+       u64 *ate;
+       u64 num_ate;
+       u64 lowest_free_index;
+};
+
+struct pcibus_info {
+       struct pcibus_bussoft   pbi_buscommon;   /* common header */
+       u32                pbi_moduleid;
+       short                   pbi_bridge_type;
+       short                   pbi_bridge_mode;
+
+       struct ate_resource     pbi_int_ate_resource;
+       u64                pbi_int_ate_size;
+
+       u64                pbi_dir_xbase;
+       char                    pbi_hub_xid;
+
+       u64                pbi_devreg[8];
+
+       u32             pbi_valid_devices;
+       u32             pbi_enabled_devices;
+
+       spinlock_t              pbi_lock;
+};
+
+extern int  pcibr_init_provider(void);
+extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *);
+extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int 
type);
+extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, 
size_t, int type);
+extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
+
+/*
+ * prototypes for the bridge asic register access routines in pcibr_reg.c
+ */
+extern void             pcireg_control_bit_clr(struct pcibus_info *, u64);
+extern void             pcireg_control_bit_set(struct pcibus_info *, u64);
+extern u64         pcireg_tflush_get(struct pcibus_info *);
+extern u64         pcireg_intr_status_get(struct pcibus_info *);
+extern void             pcireg_intr_enable_bit_clr(struct pcibus_info *, u64);
+extern void             pcireg_intr_enable_bit_set(struct pcibus_info *, u64);
+extern void             pcireg_intr_addr_addr_set(struct pcibus_info *, int, 
u64);
+extern void             pcireg_force_intr_set(struct pcibus_info *, int);
+extern u64         pcireg_wrb_flush_get(struct pcibus_info *, int);
+extern void             pcireg_int_ate_set(struct pcibus_info *, int, u64);
+extern u64 __iomem *   pcireg_int_ate_addr(struct pcibus_info *, int);
+extern void            pcibr_force_interrupt(struct sn_irq_info *sn_irq_info);
+extern void            pcibr_change_devices_irq(struct sn_irq_info 
*sn_irq_info);
+extern int             pcibr_ate_alloc(struct pcibus_info *, int);
+extern void            pcibr_ate_free(struct pcibus_info *, int);
+extern void            ate_write(struct pcibus_info *, int, int, u64);
+extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device,
+                                void *resp);
+extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device,
+                                 int action, void *resp);
+extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus);
+#endif
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h    Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,32 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002-2006 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+#ifndef _ASM_IA64_SN_RW_MMR_H
+#define _ASM_IA64_SN_RW_MMR_H
+
+
+/*
+ * This file that access MMRs via uncached physical addresses.
+ *     pio_phys_read_mmr  - read an MMR
+ *     pio_phys_write_mmr - write an MMR
+ *     pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
+ *             Second MMR will be skipped if address is NULL
+ *
+ * Addresses passed to these routines should be uncached physical addresses
+ * ie., 0x80000....
+ */
+
+
+extern long pio_phys_read_mmr(volatile long *mmr); 
+extern void pio_phys_write_mmr(volatile long *mmr, long val);
+#ifndef XEN
+extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, 
volatile long *mmr2, long val2); 
+#else
+extern void pio_atomic_phys_write_mmrs(volatile unsigned long *mmr1, long 
val1, volatile unsigned long *mmr2, long val2); 
+#endif
+
+#endif /* _ASM_IA64_SN_RW_MMR_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h
--- a/xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h    Wed Dec 20 08:53:42 
2006 -0700
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,994 +0,0 @@
-#ifndef _ASM_IA64_SN_SN_SAL_H
-#define _ASM_IA64_SN_SN_SAL_H
-
-/*
- * System Abstraction Layer definitions for IA64
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All rights reserved.
- */
-
-
-#include <linux/config.h>
-#include <asm/sal.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/geo.h>
-#include <asm/sn/nodepda.h>
-
-// SGI Specific Calls
-#define  SN_SAL_POD_MODE                           0x02000001
-#define  SN_SAL_SYSTEM_RESET                       0x02000002
-#define  SN_SAL_PROBE                              0x02000003
-#define  SN_SAL_GET_MASTER_NASID                   0x02000004
-#define         SN_SAL_GET_KLCONFIG_ADDR                  0x02000005
-#define  SN_SAL_LOG_CE                            0x02000006
-#define  SN_SAL_REGISTER_CE                       0x02000007
-#define  SN_SAL_GET_PARTITION_ADDR                0x02000009
-#define  SN_SAL_XP_ADDR_REGION                    0x0200000f
-#define  SN_SAL_NO_FAULT_ZONE_VIRTUAL             0x02000010
-#define  SN_SAL_NO_FAULT_ZONE_PHYSICAL            0x02000011
-#define  SN_SAL_PRINT_ERROR                       0x02000012
-#define  SN_SAL_SET_ERROR_HANDLING_FEATURES       0x0200001a   // reentrant
-#define  SN_SAL_GET_FIT_COMPT                     0x0200001b   // reentrant
-#define  SN_SAL_GET_HUB_INFO                       0x0200001c
-#define  SN_SAL_GET_SAPIC_INFO                     0x0200001d
-#define  SN_SAL_CONSOLE_PUTC                       0x02000021
-#define  SN_SAL_CONSOLE_GETC                       0x02000022
-#define  SN_SAL_CONSOLE_PUTS                       0x02000023
-#define  SN_SAL_CONSOLE_GETS                       0x02000024
-#define  SN_SAL_CONSOLE_GETS_TIMEOUT               0x02000025
-#define  SN_SAL_CONSOLE_POLL                       0x02000026
-#define  SN_SAL_CONSOLE_INTR                       0x02000027
-#define  SN_SAL_CONSOLE_PUTB                      0x02000028
-#define  SN_SAL_CONSOLE_XMIT_CHARS                0x0200002a
-#define  SN_SAL_CONSOLE_READC                     0x0200002b
-#define  SN_SAL_SYSCTL_MODID_GET                  0x02000031
-#define  SN_SAL_SYSCTL_GET                         0x02000032
-#define  SN_SAL_SYSCTL_IOBRICK_MODULE_GET          0x02000033
-#define  SN_SAL_SYSCTL_IO_PORTSPEED_GET            0x02000035
-#define  SN_SAL_SYSCTL_SLAB_GET                    0x02000036
-#define  SN_SAL_BUS_CONFIG                        0x02000037
-#define  SN_SAL_SYS_SERIAL_GET                    0x02000038
-#define  SN_SAL_PARTITION_SERIAL_GET              0x02000039
-#define  SN_SAL_SYSCTL_PARTITION_GET              0x0200003a
-#define  SN_SAL_SYSTEM_POWER_DOWN                 0x0200003b
-#define  SN_SAL_GET_MASTER_BASEIO_NASID                   0x0200003c
-#define  SN_SAL_COHERENCE                          0x0200003d
-#define  SN_SAL_MEMPROTECT                         0x0200003e
-#define  SN_SAL_SYSCTL_FRU_CAPTURE                0x0200003f
-
-#define  SN_SAL_SYSCTL_IOBRICK_PCI_OP             0x02000042   // reentrant
-#define         SN_SAL_IROUTER_OP                         0x02000043
-#define  SN_SAL_IOIF_INTERRUPT                    0x0200004a
-#define  SN_SAL_HWPERF_OP                         0x02000050   // lock
-#define  SN_SAL_IOIF_ERROR_INTERRUPT              0x02000051
-
-#define  SN_SAL_IOIF_SLOT_ENABLE                  0x02000053
-#define  SN_SAL_IOIF_SLOT_DISABLE                 0x02000054
-#define  SN_SAL_IOIF_GET_HUBDEV_INFO              0x02000055
-#define  SN_SAL_IOIF_GET_PCIBUS_INFO              0x02000056
-#define  SN_SAL_IOIF_GET_PCIDEV_INFO              0x02000057
-#define  SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST     0x02000058
-
-#define SN_SAL_HUB_ERROR_INTERRUPT                0x02000060
-
-
-/*
- * Service-specific constants
- */
-
-/* Console interrupt manipulation */
-       /* action codes */
-#define SAL_CONSOLE_INTR_OFF    0       /* turn the interrupt off */
-#define SAL_CONSOLE_INTR_ON     1       /* turn the interrupt on */
-#define SAL_CONSOLE_INTR_STATUS 2      /* retrieve the interrupt status */
-       /* interrupt specification & status return codes */
-#define SAL_CONSOLE_INTR_XMIT  1       /* output interrupt */
-#define SAL_CONSOLE_INTR_RECV  2       /* input interrupt */
-
-/* interrupt handling */
-#define SAL_INTR_ALLOC         1
-#define SAL_INTR_FREE          2
-
-/*
- * IRouter (i.e. generalized system controller) operations
- */
-#define SAL_IROUTER_OPEN       0       /* open a subchannel */
-#define SAL_IROUTER_CLOSE      1       /* close a subchannel */
-#define SAL_IROUTER_SEND       2       /* send part of an IRouter packet */
-#define SAL_IROUTER_RECV       3       /* receive part of an IRouter packet */
-#define SAL_IROUTER_INTR_STATUS        4       /* check the interrupt status 
for
-                                        * an open subchannel
-                                        */
-#define SAL_IROUTER_INTR_ON    5       /* enable an interrupt */
-#define SAL_IROUTER_INTR_OFF   6       /* disable an interrupt */
-#define SAL_IROUTER_INIT       7       /* initialize IRouter driver */
-
-/* IRouter interrupt mask bits */
-#define SAL_IROUTER_INTR_XMIT  SAL_CONSOLE_INTR_XMIT
-#define SAL_IROUTER_INTR_RECV  SAL_CONSOLE_INTR_RECV
-
-
-/*
- * SAL Error Codes
- */
-#define SALRET_MORE_PASSES     1
-#define SALRET_OK              0
-#define SALRET_NOT_IMPLEMENTED (-1)
-#define SALRET_INVALID_ARG     (-2)
-#define SALRET_ERROR           (-3)
-
-
-#ifndef XEN
-/**
- * sn_sal_rev_major - get the major SGI SAL revision number
- *
- * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
- * This routine simply extracts the major value from the
- * @ia64_sal_systab structure constructed by ia64_sal_init().
- */
-static inline int
-sn_sal_rev_major(void)
-{
-       struct ia64_sal_systab *systab = efi.sal_systab;
-
-       return (int)systab->sal_b_rev_major;
-}
-
-/**
- * sn_sal_rev_minor - get the minor SGI SAL revision number
- *
- * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
- * This routine simply extracts the minor value from the
- * @ia64_sal_systab structure constructed by ia64_sal_init().
- */
-static inline int
-sn_sal_rev_minor(void)
-{
-       struct ia64_sal_systab *systab = efi.sal_systab;
-       
-       return (int)systab->sal_b_rev_minor;
-}
-
-/*
- * Specify the minimum PROM revsion required for this kernel.
- * Note that they're stored in hex format...
- */
-#define SN_SAL_MIN_MAJOR       0x4  /* SN2 kernels need at least PROM 4.0 */
-#define SN_SAL_MIN_MINOR       0x0
-
-/*
- * Returns the master console nasid, if the call fails, return an illegal
- * value.
- */
-static inline u64
-ia64_sn_get_console_nasid(void)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_NASID, 0, 0, 0, 0, 0, 0, 0);
-
-       if (ret_stuff.status < 0)
-               return ret_stuff.status;
-
-       /* Master console nasid is in 'v0' */
-       return ret_stuff.v0;
-}
-
-/*
- * Returns the master baseio nasid, if the call fails, return an illegal
- * value.
- */
-static inline u64
-ia64_sn_get_master_baseio_nasid(void)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_BASEIO_NASID, 0, 0, 0, 0, 0, 0, 
0);
-
-       if (ret_stuff.status < 0)
-               return ret_stuff.status;
-
-       /* Master baseio nasid is in 'v0' */
-       return ret_stuff.v0;
-}
-
-static inline char *
-ia64_sn_get_klconfig_addr(nasid_t nasid)
-{
-       struct ia64_sal_retval ret_stuff;
-       int cnodeid;
-
-       cnodeid = nasid_to_cnodeid(nasid);
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 
0, 0);
-
-       /*
-        * We should panic if a valid cnode nasid does not produce
-        * a klconfig address.
-        */
-       if (ret_stuff.status != 0) {
-               panic("ia64_sn_get_klconfig_addr: Returned error %lx\n", 
ret_stuff.status);
-       }
-       return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL;
-}
-#endif /* !XEN */
-
-/*
- * Returns the next console character.
- */
-static inline u64
-ia64_sn_console_getc(int *ch)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_GETC, 0, 0, 0, 0, 0, 0, 0);
-
-       /* character is in 'v0' */
-       *ch = (int)ret_stuff.v0;
-
-       return ret_stuff.status;
-}
-
-/*
- * Read a character from the SAL console device, after a previous interrupt
- * or poll operation has given us to know that a character is available
- * to be read.
- */
-static inline u64
-ia64_sn_console_readc(void)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_READC, 0, 0, 0, 0, 0, 0, 0);
-
-       /* character is in 'v0' */
-       return ret_stuff.v0;
-}
-
-/*
- * Sends the given character to the console.
- */
-static inline u64
-ia64_sn_console_putc(char ch)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (uint64_t)ch, 0, 0, 0, 
0, 0, 0);
-
-       return ret_stuff.status;
-}
-
-/*
- * Sends the given buffer to the console.
- */
-static inline u64
-ia64_sn_console_putb(const char *buf, int len)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0; 
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (uint64_t)buf, 
(uint64_t)len, 0, 0, 0, 0, 0);
-
-       if ( ret_stuff.status == 0 ) {
-               return ret_stuff.v0;
-       }
-       return (u64)0;
-}
-
-#ifndef XEN
-/*
- * Print a platform error record
- */
-static inline u64
-ia64_sn_plat_specific_err_print(int (*hook)(const char*, ...), char *rec)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (uint64_t)hook, 
(uint64_t)rec, 0, 0, 0, 0, 0);
-
-       return ret_stuff.status;
-}
-
-/*
- * Check for Platform errors
- */
-static inline u64
-ia64_sn_plat_cpei_handler(void)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0);
-
-       return ret_stuff.status;
-}
-
-/*
- * Checks for console input.
- */
-static inline u64
-ia64_sn_console_check(int *result)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_POLL, 0, 0, 0, 0, 0, 0, 0);
-
-       /* result is in 'v0' */
-       *result = (int)ret_stuff.v0;
-
-       return ret_stuff.status;
-}
-
-/*
- * Checks console interrupt status
- */
-static inline u64
-ia64_sn_console_intr_status(void)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
-                0, SAL_CONSOLE_INTR_STATUS,
-                0, 0, 0, 0, 0);
-
-       if (ret_stuff.status == 0) {
-           return ret_stuff.v0;
-       }
-       
-       return 0;
-}
-
-/*
- * Enable an interrupt on the SAL console device.
- */
-static inline void
-ia64_sn_console_intr_enable(uint64_t intr)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
-                intr, SAL_CONSOLE_INTR_ON,
-                0, 0, 0, 0, 0);
-}
-
-/*
- * Disable an interrupt on the SAL console device.
- */
-static inline void
-ia64_sn_console_intr_disable(uint64_t intr)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, 
-                intr, SAL_CONSOLE_INTR_OFF,
-                0, 0, 0, 0, 0);
-}
-
-/*
- * Sends a character buffer to the console asynchronously.
- */
-static inline u64
-ia64_sn_console_xmit_chars(char *buf, int len)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_XMIT_CHARS,
-                (uint64_t)buf, (uint64_t)len,
-                0, 0, 0, 0, 0);
-
-       if (ret_stuff.status == 0) {
-           return ret_stuff.v0;
-       }
-
-       return 0;
-}
-
-/*
- * Returns the iobrick module Id
- */
-static inline u64
-ia64_sn_sysctl_iobrick_module_get(nasid_t nasid, int *result)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYSCTL_IOBRICK_MODULE_GET, nasid, 0, 
0, 0, 0, 0, 0);
-
-       /* result is in 'v0' */
-       *result = (int)ret_stuff.v0;
-
-       return ret_stuff.status;
-}
-
-/**
- * ia64_sn_pod_mode - call the SN_SAL_POD_MODE function
- *
- * SN_SAL_POD_MODE actually takes an argument, but it's always
- * 0 when we call it from the kernel, so we don't have to expose
- * it to the caller.
- */
-static inline u64
-ia64_sn_pod_mode(void)
-{
-       struct ia64_sal_retval isrv;
-       SAL_CALL(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0);
-       if (isrv.status)
-               return 0;
-       return isrv.v0;
-}
-
-/**
- * ia64_sn_probe_mem - read from memory safely
- * @addr: address to probe
- * @size: number bytes to read (1,2,4,8)
- * @data_ptr: address to store value read by probe (-1 returned if probe fails)
- *
- * Call into the SAL to do a memory read.  If the read generates a machine
- * check, this routine will recover gracefully and return -1 to the caller.
- * @addr is usually a kernel virtual address in uncached space (i.e. the
- * address starts with 0xc), but if called in physical mode, @addr should
- * be a physical address.
- *
- * Return values:
- *  0 - probe successful
- *  1 - probe failed (generated MCA)
- *  2 - Bad arg
- * <0 - PAL error
- */
-static inline u64
-ia64_sn_probe_mem(long addr, long size, void *data_ptr)
-{
-       struct ia64_sal_retval isrv;
-
-       SAL_CALL(isrv, SN_SAL_PROBE, addr, size, 0, 0, 0, 0, 0);
-
-       if (data_ptr) {
-               switch (size) {
-               case 1:
-                       *((u8*)data_ptr) = (u8)isrv.v0;
-                       break;
-               case 2:
-                       *((u16*)data_ptr) = (u16)isrv.v0;
-                       break;
-               case 4:
-                       *((u32*)data_ptr) = (u32)isrv.v0;
-                       break;
-               case 8:
-                       *((u64*)data_ptr) = (u64)isrv.v0;
-                       break;
-               default:
-                       isrv.status = 2;
-               }
-       }
-       return isrv.status;
-}
-
-/*
- * Retrieve the system serial number as an ASCII string.
- */
-static inline u64
-ia64_sn_sys_serial_get(char *buf)
-{
-       struct ia64_sal_retval ret_stuff;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYS_SERIAL_GET, buf, 0, 0, 0, 0, 0, 
0);
-       return ret_stuff.status;
-}
-
-extern char sn_system_serial_number_string[];
-extern u64 sn_partition_serial_number;
-
-static inline char *
-sn_system_serial_number(void) {
-       if (sn_system_serial_number_string[0]) {
-               return(sn_system_serial_number_string);
-       } else {
-               ia64_sn_sys_serial_get(sn_system_serial_number_string);
-               return(sn_system_serial_number_string);
-       }
-}
-       
-
-/*
- * Returns a unique id number for this system and partition (suitable for
- * use with license managers), based in part on the system serial number.
- */
-static inline u64
-ia64_sn_partition_serial_get(void)
-{
-       struct ia64_sal_retval ret_stuff;
-       SAL_CALL(ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, 0, 0, 0, 0, 0, 0);
-       if (ret_stuff.status != 0)
-           return 0;
-       return ret_stuff.v0;
-}
-
-static inline u64
-sn_partition_serial_number_val(void) {
-       if (sn_partition_serial_number) {
-               return(sn_partition_serial_number);
-       } else {
-               return(sn_partition_serial_number = 
ia64_sn_partition_serial_get());
-       }
-}
-
-/*
- * Returns the partition id of the nasid passed in as an argument,
- * or INVALID_PARTID if the partition id cannot be retrieved.
- */
-static inline partid_t
-ia64_sn_sysctl_partition_get(nasid_t nasid)
-{
-       struct ia64_sal_retval ret_stuff;
-       SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
-                0, 0, 0, 0, 0, 0);
-       if (ret_stuff.status != 0)
-           return INVALID_PARTID;
-       return ((partid_t)ret_stuff.v0);
-}
-
-/*
- * Returns the partition id of the current processor.
- */
-
-extern partid_t sn_partid;
-
-static inline partid_t
-sn_local_partid(void) {
-       if (sn_partid < 0) {
-               return (sn_partid = 
ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id())));
-       } else {
-               return sn_partid;
-       }
-}
-
-/*
- * Register or unregister a physical address range being referenced across
- * a partition boundary for which certain SAL errors should be scanned for,
- * cleaned up and ignored.  This is of value for kernel partitioning code only.
- * Values for the operation argument:
- *     1 = register this address range with SAL
- *     0 = unregister this address range with SAL
- * 
- * SAL maintains a reference count on an address range in case it is registered
- * multiple times.
- * 
- * On success, returns the reference count of the address range after the SAL
- * call has performed the current registration/unregistration.  Returns a
- * negative value if an error occurred.
- */
-static inline int
-sn_register_xp_addr_region(u64 paddr, u64 len, int operation)
-{
-       struct ia64_sal_retval ret_stuff;
-       SAL_CALL(ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, (u64)operation,
-                0, 0, 0, 0);
-       return ret_stuff.status;
-}
-
-/*
- * Register or unregister an instruction range for which SAL errors should
- * be ignored.  If an error occurs while in the registered range, SAL jumps
- * to return_addr after ignoring the error.  Values for the operation argument:
- *     1 = register this instruction range with SAL
- *     0 = unregister this instruction range with SAL
- *
- * Returns 0 on success, or a negative value if an error occurred.
- */
-static inline int
-sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr,
-                        int virtual, int operation)
-{
-       struct ia64_sal_retval ret_stuff;
-       u64 call;
-       if (virtual) {
-               call = SN_SAL_NO_FAULT_ZONE_VIRTUAL;
-       } else {
-               call = SN_SAL_NO_FAULT_ZONE_PHYSICAL;
-       }
-       SAL_CALL(ret_stuff, call, start_addr, end_addr, return_addr, (u64)1,
-                0, 0, 0);
-       return ret_stuff.status;
-}
-
-/*
- * Change or query the coherence domain for this partition. Each cpu-based
- * nasid is represented by a bit in an array of 64-bit words:
- *      0 = not in this partition's coherency domain
- *      1 = in this partition's coherency domain
- *
- * It is not possible for the local system's nasids to be removed from
- * the coherency domain.  Purpose of the domain arguments:
- *      new_domain = set the coherence domain to the given nasids
- *      old_domain = return the current coherence domain
- *
- * Returns 0 on success, or a negative value if an error occurred.
- */
-static inline int
-sn_change_coherence(u64 *new_domain, u64 *old_domain)
-{
-       struct ia64_sal_retval ret_stuff;
-       SAL_CALL(ret_stuff, SN_SAL_COHERENCE, new_domain, old_domain, 0, 0,
-                0, 0, 0);
-       return ret_stuff.status;
-}
-
-/*
- * Change memory access protections for a physical address range.
- * nasid_array is not used on Altix, but may be in future architectures.
- * Available memory protection access classes are defined after the function.
- */
-static inline int
-sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
-{
-       struct ia64_sal_retval ret_stuff;
-       int cnodeid;
-       unsigned long irq_flags;
-
-       cnodeid = nasid_to_cnodeid(get_node_number(paddr));
-       // spin_lock(&NODEPDA(cnodeid)->bist_lock);
-       local_irq_save(irq_flags);
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_MEMPROTECT, paddr, len, nasid_array,
-                perms, 0, 0, 0);
-       local_irq_restore(irq_flags);
-       // spin_unlock(&NODEPDA(cnodeid)->bist_lock);
-       return ret_stuff.status;
-}
-#define SN_MEMPROT_ACCESS_CLASS_0              0x14a080
-#define SN_MEMPROT_ACCESS_CLASS_1              0x2520c2
-#define SN_MEMPROT_ACCESS_CLASS_2              0x14a1ca
-#define SN_MEMPROT_ACCESS_CLASS_3              0x14a290
-#define SN_MEMPROT_ACCESS_CLASS_6              0x084080
-#define SN_MEMPROT_ACCESS_CLASS_7              0x021080
-
-/*
- * Turns off system power.
- */
-static inline void
-ia64_sn_power_down(void)
-{
-       struct ia64_sal_retval ret_stuff;
-       SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0);
-       while(1);
-       /* never returns */
-}
-
-/**
- * ia64_sn_fru_capture - tell the system controller to capture hw state
- *
- * This routine will call the SAL which will tell the system controller(s)
- * to capture hw mmr information from each SHub in the system.
- */
-static inline u64
-ia64_sn_fru_capture(void)
-{
-        struct ia64_sal_retval isrv;
-        SAL_CALL(isrv, SN_SAL_SYSCTL_FRU_CAPTURE, 0, 0, 0, 0, 0, 0, 0);
-        if (isrv.status)
-                return 0;
-        return isrv.v0;
-}
-
-/*
- * Performs an operation on a PCI bus or slot -- power up, power down
- * or reset.
- */
-static inline u64
-ia64_sn_sysctl_iobrick_pci_op(nasid_t n, u64 connection_type, 
-                             u64 bus, char slot, 
-                             u64 action)
-{
-       struct ia64_sal_retval rv = {0, 0, 0, 0};
-
-       SAL_CALL_NOLOCK(rv, SN_SAL_SYSCTL_IOBRICK_PCI_OP, connection_type, n, 
action,
-                bus, (u64) slot, 0, 0);
-       if (rv.status)
-               return rv.v0;
-       return 0;
-}
-
-
-/*
- * Open a subchannel for sending arbitrary data to the system
- * controller network via the system controller device associated with
- * 'nasid'.  Return the subchannel number or a negative error code.
- */
-static inline int
-ia64_sn_irtr_open(nasid_t nasid)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_OPEN, nasid,
-                          0, 0, 0, 0, 0);
-       return (int) rv.v0;
-}
-
-/*
- * Close system controller subchannel 'subch' previously opened on 'nasid'.
- */
-static inline int
-ia64_sn_irtr_close(nasid_t nasid, int subch)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_CLOSE,
-                          (u64) nasid, (u64) subch, 0, 0, 0, 0);
-       return (int) rv.status;
-}
-
-/*
- * Read data from system controller associated with 'nasid' on
- * subchannel 'subch'.  The buffer to be filled is pointed to by
- * 'buf', and its capacity is in the integer pointed to by 'len'.  The
- * referent of 'len' is set to the number of bytes read by the SAL
- * call.  The return value is either SALRET_OK (for bytes read) or
- * SALRET_ERROR (for error or "no data available").
- */
-static inline int
-ia64_sn_irtr_recv(nasid_t nasid, int subch, char *buf, int *len)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_RECV,
-                          (u64) nasid, (u64) subch, (u64) buf, (u64) len,
-                          0, 0);
-       return (int) rv.status;
-}
-
-/*
- * Write data to the system controller network via the system
- * controller associated with 'nasid' on suchannel 'subch'.  The
- * buffer to be written out is pointed to by 'buf', and 'len' is the
- * number of bytes to be written.  The return value is either the
- * number of bytes written (which could be zero) or a negative error
- * code.
- */
-static inline int
-ia64_sn_irtr_send(nasid_t nasid, int subch, char *buf, int len)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_SEND,
-                          (u64) nasid, (u64) subch, (u64) buf, (u64) len,
-                          0, 0);
-       return (int) rv.v0;
-}
-
-/*
- * Check whether any interrupts are pending for the system controller
- * associated with 'nasid' and its subchannel 'subch'.  The return
- * value is a mask of pending interrupts (SAL_IROUTER_INTR_XMIT and/or
- * SAL_IROUTER_INTR_RECV).
- */
-static inline int
-ia64_sn_irtr_intr(nasid_t nasid, int subch)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_STATUS,
-                          (u64) nasid, (u64) subch, 0, 0, 0, 0);
-       return (int) rv.v0;
-}
-
-/*
- * Enable the interrupt indicated by the intr parameter (either
- * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
- */
-static inline int
-ia64_sn_irtr_intr_enable(nasid_t nasid, int subch, u64 intr)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_ON,
-                          (u64) nasid, (u64) subch, intr, 0, 0, 0);
-       return (int) rv.v0;
-}
-
-/*
- * Disable the interrupt indicated by the intr parameter (either
- * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV).
- */
-static inline int
-ia64_sn_irtr_intr_disable(nasid_t nasid, int subch, u64 intr)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_OFF,
-                          (u64) nasid, (u64) subch, intr, 0, 0, 0);
-       return (int) rv.v0;
-}
-
-/**
- * ia64_sn_get_fit_compt - read a FIT entry from the PROM header
- * @nasid: NASID of node to read
- * @index: FIT entry index to be retrieved (0..n)
- * @fitentry: 16 byte buffer where FIT entry will be stored.
- * @banbuf: optional buffer for retrieving banner
- * @banlen: length of banner buffer
- *
- * Access to the physical PROM chips needs to be serialized since reads and
- * writes can't occur at the same time, so we need to call into the SAL when
- * we want to look at the FIT entries on the chips.
- *
- * Returns:
- *     %SALRET_OK if ok
- *     %SALRET_INVALID_ARG if index too big
- *     %SALRET_NOT_IMPLEMENTED if running on older PROM
- *     ??? if nasid invalid OR banner buffer not large enough
- */
-static inline int
-ia64_sn_get_fit_compt(u64 nasid, u64 index, void *fitentry, void *banbuf,
-                     u64 banlen)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_NOLOCK(rv, SN_SAL_GET_FIT_COMPT, nasid, index, fitentry,
-                       banbuf, banlen, 0, 0);
-       return (int) rv.status;
-}
-
-/*
- * Initialize the SAL components of the system controller
- * communication driver; specifically pass in a sizable buffer that
- * can be used for allocation of subchannel queues as new subchannels
- * are opened.  "buf" points to the buffer, and "len" specifies its
- * length.
- */
-static inline int
-ia64_sn_irtr_init(nasid_t nasid, void *buf, int len)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INIT,
-                          (u64) nasid, (u64) buf, (u64) len, 0, 0, 0);
-       return (int) rv.status;
-}
-
-/*
- * Returns the nasid, subnode & slice corresponding to a SAPIC ID
- *
- *  In:
- *     arg0 - SN_SAL_GET_SAPIC_INFO
- *     arg1 - sapicid (lid >> 16) 
- *  Out:
- *     v0 - nasid
- *     v1 - subnode
- *     v2 - slice
- */
-static inline u64
-ia64_sn_get_sapic_info(int sapicid, int *nasid, int *subnode, int *slice)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO, sapicid, 0, 0, 0, 0, 
0, 0);
-
-/***** BEGIN HACK - temp til old proms no longer supported ********/
-       if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
-               if (nasid) *nasid = sapicid & 0xfff;
-               if (subnode) *subnode = (sapicid >> 13) & 1;
-               if (slice) *slice = (sapicid >> 12) & 3;
-               return 0;
-       }
-/***** END HACK *******/
-
-       if (ret_stuff.status < 0)
-               return ret_stuff.status;
-
-       if (nasid) *nasid = (int) ret_stuff.v0;
-       if (subnode) *subnode = (int) ret_stuff.v1;
-       if (slice) *slice = (int) ret_stuff.v2;
-       return 0;
-}
- 
-/*
- * Returns information about the HUB/SHUB.
- *  In:
- *     arg0 - SN_SAL_GET_HUB_INFO
- *     arg1 - 0 (other values reserved for future use)
- *  Out:
- *     v0 - shub type (0=shub1, 1=shub2)
- *     v1 - masid mask (ex., 0x7ff for 11 bit nasid)
- *     v2 - bit position of low nasid bit
- */
-static inline u64
-ia64_sn_get_hub_info(int fc, u64 *arg1, u64 *arg2, u64 *arg3)
-{
-       struct ia64_sal_retval ret_stuff;
-
-       ret_stuff.status = 0;
-       ret_stuff.v0 = 0;
-       ret_stuff.v1 = 0;
-       ret_stuff.v2 = 0;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_HUB_INFO, fc, 0, 0, 0, 0, 0, 0);
-
-/***** BEGIN HACK - temp til old proms no longer supported ********/
-       if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
-               if (arg1) *arg1 = 0;
-               if (arg2) *arg2 = 0x7ff;
-               if (arg3) *arg3 = 38;
-               return 0;
-       }
-/***** END HACK *******/
-
-       if (ret_stuff.status < 0)
-               return ret_stuff.status;
-
-       if (arg1) *arg1 = ret_stuff.v0;
-       if (arg2) *arg2 = ret_stuff.v1;
-       if (arg3) *arg3 = ret_stuff.v2;
-       return 0;
-}
- 
-/*
- * This is the access point to the Altix PROM hardware performance
- * and status monitoring interface. For info on using this, see
- * include/asm-ia64/sn/sn2/sn_hwperf.h
- */
-static inline int
-ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2,
-                  u64 a3, u64 a4, int *v0)
-{
-       struct ia64_sal_retval rv;
-       SAL_CALL_NOLOCK(rv, SN_SAL_HWPERF_OP, (u64)nasid,
-               opcode, a0, a1, a2, a3, a4);
-       if (v0)
-               *v0 = (int) rv.v0;
-       return (int) rv.status;
-}
-#endif /* !XEN */
-#endif /* _ASM_IA64_SN_SN_SAL_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/asm/sn/types.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/types.h     Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,28 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_IA64_SN_TYPES_H
+#define _ASM_IA64_SN_TYPES_H
+
+#include <linux/types.h>
+
+typedef unsigned long  cpuid_t;
+typedef signed short   nasid_t;        /* node id in numa-as-id space */
+typedef signed char    partid_t;       /* partition ID type */
+typedef unsigned int    moduleid_t;     /* user-visible module number type */
+typedef unsigned int    cmoduleid_t;    /* kernel compact module id type */
+typedef unsigned char  slotid_t;       /* slot (blade) within module */
+typedef unsigned char  slabid_t;       /* slab (asic) within slot */
+typedef u64 nic_t;
+typedef unsigned long iopaddr_t;
+#ifndef XEN
+typedef unsigned long paddr_t;
+#endif
+typedef short cnodeid_t;
+
+#endif /* _ASM_IA64_SN_TYPES_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/linux-xen/asm/system.h
--- a/xen/include/asm-ia64/linux-xen/asm/system.h       Wed Dec 20 08:53:42 
2006 -0700
+++ b/xen/include/asm-ia64/linux-xen/asm/system.h       Wed Dec 20 14:55:02 
2006 -0700
@@ -190,6 +190,7 @@ do {                                                        
        \
 #ifdef XEN
 #define local_irq_is_enabled() (!irqs_disabled())
 extern struct vcpu *ia64_switch_to(struct vcpu *next_task);
+#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
 #else
 #ifdef __KERNEL__
 
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/linux-xen/asm/types.h
--- a/xen/include/asm-ia64/linux-xen/asm/types.h        Wed Dec 20 08:53:42 
2006 -0700
+++ b/xen/include/asm-ia64/linux-xen/asm/types.h        Wed Dec 20 14:55:02 
2006 -0700
@@ -74,6 +74,14 @@ typedef unsigned short kmem_bufctl_t;
 
 #ifdef XEN
 #include <asm/xentypes.h>
+
+#ifndef __ASSEMBLY__
+typedef unsigned int gfp_t;
+typedef u64 resource_size_t;
+typedef u32 dev_t;
+typedef unsigned int mode_t;
+#define THIS_MODULE    NULL
+#endif
 #endif
 
 #endif /* _ASM_IA64_TYPES_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/linux/README.origin
--- a/xen/include/asm-ia64/linux-xen/linux/README.origin        Wed Dec 20 
08:53:42 2006 -0700
+++ b/xen/include/asm-ia64/linux-xen/linux/README.origin        Wed Dec 20 
14:55:02 2006 -0700
@@ -12,3 +12,8 @@ interrupt.h           -> linux/include/linux/int
 
 # The files below are from Linux-2.6.16.33
 oprofile.h             -> linux/include/linux/oprofile.h
+
+# The files below are from Linux-2.6.19
+pci.h                  -> linux/include/linux/pci.h
+kobject.h              -> linux/include/linux/kobject.h
+device.h               -> linux/include/linux/device.h
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/linux/device.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/linux/device.h     Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,489 @@
+/*
+ * device.h - generic, centralized driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@xxxxxxxx>
+ *
+ * This file is released under the GPLv2
+ *
+ * See Documentation/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_H_
+#define _DEVICE_H_
+
+#include <linux/ioport.h>
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/list.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <asm/semaphore.h>
+#include <asm/atomic.h>
+
+#define DEVICE_NAME_SIZE       50
+#define DEVICE_NAME_HALF       __stringify(20) /* Less than half to 
accommodate slop */
+#define DEVICE_ID_SIZE         32
+#define BUS_ID_SIZE            KOBJ_NAME_LEN
+
+
+struct device;
+struct device_driver;
+struct class;
+struct class_device;
+
+struct bus_type {
+       const char              * name;
+
+       struct subsystem        subsys;
+       struct kset             drivers;
+       struct kset             devices;
+       struct klist            klist_devices;
+       struct klist            klist_drivers;
+
+       struct bus_attribute    * bus_attrs;
+       struct device_attribute * dev_attrs;
+       struct driver_attribute * drv_attrs;
+
+       int             (*match)(struct device * dev, struct device_driver * 
drv);
+       int             (*uevent)(struct device *dev, char **envp,
+                                 int num_envp, char *buffer, int buffer_size);
+       int             (*probe)(struct device * dev);
+       int             (*remove)(struct device * dev);
+       void            (*shutdown)(struct device * dev);
+
+       int (*suspend)(struct device * dev, pm_message_t state);
+       int (*suspend_late)(struct device * dev, pm_message_t state);
+       int (*resume_early)(struct device * dev);
+       int (*resume)(struct device * dev);
+};
+
+extern int __must_check bus_register(struct bus_type * bus);
+extern void bus_unregister(struct bus_type * bus);
+
+extern int __must_check bus_rescan_devices(struct bus_type * bus);
+
+/* iterator helpers for buses */
+
+int bus_for_each_dev(struct bus_type * bus, struct device * start, void * data,
+                    int (*fn)(struct device *, void *));
+struct device * bus_find_device(struct bus_type *bus, struct device *start,
+                               void *data, int (*match)(struct device *, void 
*));
+
+int __must_check bus_for_each_drv(struct bus_type *bus,
+               struct device_driver *start, void *data,
+               int (*fn)(struct device_driver *, void *));
+
+/* driverfs interface for exporting bus attributes */
+
+struct bus_attribute {
+#ifndef XEN
+       struct attribute        attr;
+#endif
+       ssize_t (*show)(struct bus_type *, char * buf);
+       ssize_t (*store)(struct bus_type *, const char * buf, size_t count);
+};
+
+#define BUS_ATTR(_name,_mode,_show,_store)     \
+struct bus_attribute bus_attr_##_name = __ATTR(_name,_mode,_show,_store)
+
+extern int __must_check bus_create_file(struct bus_type *,
+                                       struct bus_attribute *);
+extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
+
+struct device_driver {
+       const char              * name;
+       struct bus_type         * bus;
+
+       struct completion       unloaded;
+       struct kobject          kobj;
+       struct klist            klist_devices;
+       struct klist_node       knode_bus;
+
+       struct module           * owner;
+
+       int     (*probe)        (struct device * dev);
+       int     (*remove)       (struct device * dev);
+       void    (*shutdown)     (struct device * dev);
+       int     (*suspend)      (struct device * dev, pm_message_t state);
+       int     (*resume)       (struct device * dev);
+
+       unsigned int multithread_probe:1;
+};
+
+
+extern int __must_check driver_register(struct device_driver * drv);
+extern void driver_unregister(struct device_driver * drv);
+
+extern struct device_driver * get_driver(struct device_driver * drv);
+extern void put_driver(struct device_driver * drv);
+extern struct device_driver *driver_find(const char *name, struct bus_type 
*bus);
+extern int driver_probe_done(void);
+
+/* driverfs interface for exporting driver attributes */
+
+struct driver_attribute {
+#ifndef XEN
+       struct attribute        attr;
+#endif
+       ssize_t (*show)(struct device_driver *, char * buf);
+       ssize_t (*store)(struct device_driver *, const char * buf, size_t 
count);
+};
+
+#define DRIVER_ATTR(_name,_mode,_show,_store)  \
+struct driver_attribute driver_attr_##_name = __ATTR(_name,_mode,_show,_store)
+
+extern int __must_check driver_create_file(struct device_driver *,
+                                       struct driver_attribute *);
+extern void driver_remove_file(struct device_driver *, struct driver_attribute 
*);
+
+extern int __must_check driver_for_each_device(struct device_driver * drv,
+               struct device *start, void *data,
+               int (*fn)(struct device *, void *));
+struct device * driver_find_device(struct device_driver *drv,
+                                  struct device *start, void *data,
+                                  int (*match)(struct device *, void *));
+
+/*
+ * device classes
+ */
+struct class {
+       const char              * name;
+       struct module           * owner;
+
+       struct subsystem        subsys;
+       struct list_head        children;
+       struct list_head        devices;
+       struct list_head        interfaces;
+#ifdef XEN
+       spinlock_t              sem;
+#else
+       struct semaphore        sem;    /* locks both the children and 
interfaces lists */
+#endif
+
+       struct kobject          *virtual_dir;
+
+       struct class_attribute          * class_attrs;
+       struct class_device_attribute   * class_dev_attrs;
+       struct device_attribute         * dev_attrs;
+
+       int     (*uevent)(struct class_device *dev, char **envp,
+                          int num_envp, char *buffer, int buffer_size);
+       int     (*dev_uevent)(struct device *dev, char **envp, int num_envp,
+                               char *buffer, int buffer_size);
+
+       void    (*release)(struct class_device *dev);
+       void    (*class_release)(struct class *class);
+       void    (*dev_release)(struct device *dev);
+
+       int     (*suspend)(struct device *, pm_message_t state);
+       int     (*resume)(struct device *);
+};
+
+extern int __must_check class_register(struct class *);
+extern void class_unregister(struct class *);
+
+
+struct class_attribute {
+#ifndef XEN
+       struct attribute        attr;
+#endif
+       ssize_t (*show)(struct class *, char * buf);
+       ssize_t (*store)(struct class *, const char * buf, size_t count);
+};
+
+#define CLASS_ATTR(_name,_mode,_show,_store)                   \
+struct class_attribute class_attr_##_name = __ATTR(_name,_mode,_show,_store) 
+
+extern int __must_check class_create_file(struct class *,
+                                       const struct class_attribute *);
+extern void class_remove_file(struct class *, const struct class_attribute *);
+
+struct class_device_attribute {
+#ifndef XEN
+       struct attribute        attr;
+#endif
+       ssize_t (*show)(struct class_device *, char * buf);
+       ssize_t (*store)(struct class_device *, const char * buf, size_t count);
+};
+
+#define CLASS_DEVICE_ATTR(_name,_mode,_show,_store)            \
+struct class_device_attribute class_device_attr_##_name =      \
+       __ATTR(_name,_mode,_show,_store)
+
+extern int __must_check class_device_create_file(struct class_device *,
+                                   const struct class_device_attribute *);
+
+/**
+ * struct class_device - class devices
+ * @class: pointer to the parent class for this class device.  This is 
required.
+ * @devt: for internal use by the driver core only.
+ * @node: for internal use by the driver core only.
+ * @kobj: for internal use by the driver core only.
+ * @devt_attr: for internal use by the driver core only.
+ * @groups: optional additional groups to be created
+ * @dev: if set, a symlink to the struct device is created in the sysfs
+ * directory for this struct class device.
+ * @class_data: pointer to whatever you want to store here for this struct
+ * class_device.  Use class_get_devdata() and class_set_devdata() to get and
+ * set this pointer.
+ * @parent: pointer to a struct class_device that is the parent of this struct
+ * class_device.  If NULL, this class_device will show up at the root of the
+ * struct class in sysfs (which is probably what you want to have happen.)
+ * @release: pointer to a release function for this struct class_device.  If
+ * set, this will be called instead of the class specific release function.
+ * Only use this if you want to override the default release function, like
+ * when you are nesting class_device structures.
+ * @uevent: pointer to a uevent function for this struct class_device.  If
+ * set, this will be called instead of the class specific uevent function.
+ * Only use this if you want to override the default uevent function, like
+ * when you are nesting class_device structures.
+ */
+struct class_device {
+       struct list_head        node;
+
+       struct kobject          kobj;
+       struct class            * class;        /* required */
+       dev_t                   devt;           /* dev_t, creates the sysfs 
"dev" */
+       struct class_device_attribute *devt_attr;
+       struct class_device_attribute uevent_attr;
+       struct device           * dev;          /* not necessary, but nice to 
have */
+       void                    * class_data;   /* class-specific data */
+       struct class_device     *parent;        /* parent of this child device, 
if there is one */
+       struct attribute_group  ** groups;      /* optional groups */
+
+       void    (*release)(struct class_device *dev);
+       int     (*uevent)(struct class_device *dev, char **envp,
+                          int num_envp, char *buffer, int buffer_size);
+       char    class_id[BUS_ID_SIZE];  /* unique to this class */
+};
+
+static inline void *
+class_get_devdata (struct class_device *dev)
+{
+       return dev->class_data;
+}
+
+static inline void
+class_set_devdata (struct class_device *dev, void *data)
+{
+       dev->class_data = data;
+}
+
+
+extern int __must_check class_device_register(struct class_device *);
+extern void class_device_unregister(struct class_device *);
+extern void class_device_initialize(struct class_device *);
+extern int __must_check class_device_add(struct class_device *);
+extern void class_device_del(struct class_device *);
+
+extern int class_device_rename(struct class_device *, char *);
+
+extern struct class_device * class_device_get(struct class_device *);
+extern void class_device_put(struct class_device *);
+
+extern void class_device_remove_file(struct class_device *, 
+                                    const struct class_device_attribute *);
+extern int __must_check class_device_create_bin_file(struct class_device *,
+                                       struct bin_attribute *);
+extern void class_device_remove_bin_file(struct class_device *,
+                                        struct bin_attribute *);
+
+struct class_interface {
+       struct list_head        node;
+       struct class            *class;
+
+       int (*add)      (struct class_device *, struct class_interface *);
+       void (*remove)  (struct class_device *, struct class_interface *);
+       int (*add_dev)          (struct device *, struct class_interface *);
+       void (*remove_dev)      (struct device *, struct class_interface *);
+};
+
+extern int __must_check class_interface_register(struct class_interface *);
+extern void class_interface_unregister(struct class_interface *);
+
+extern struct class *class_create(struct module *owner, const char *name);
+extern void class_destroy(struct class *cls);
+extern struct class_device *class_device_create(struct class *cls,
+                                               struct class_device *parent,
+                                               dev_t devt,
+                                               struct device *device,
+                                               const char *fmt, ...)
+                                       __attribute__((format(printf,5,6)));
+extern void class_device_destroy(struct class *cls, dev_t devt);
+
+/* interface for exporting device attributes */
+struct device_attribute {
+       struct attribute        attr;
+       ssize_t (*show)(struct device *dev, struct device_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+                        const char *buf, size_t count);
+};
+
+#define DEVICE_ATTR(_name,_mode,_show,_store) \
+struct device_attribute dev_attr_##_name = __ATTR(_name,_mode,_show,_store)
+
+extern int __must_check device_create_file(struct device *device,
+                                       struct device_attribute * entry);
+extern void device_remove_file(struct device * dev, struct device_attribute * 
attr);
+extern int __must_check device_create_bin_file(struct device *dev,
+                                              struct bin_attribute *attr);
+extern void device_remove_bin_file(struct device *dev,
+                                  struct bin_attribute *attr);
+struct device {
+       struct klist            klist_children;
+       struct klist_node       knode_parent;           /* node in sibling list 
*/
+       struct klist_node       knode_driver;
+       struct klist_node       knode_bus;
+       struct device   * parent;
+
+       struct kobject kobj;
+       char    bus_id[BUS_ID_SIZE];    /* position on parent bus */
+       unsigned                is_registered:1;
+       struct device_attribute uevent_attr;
+       struct device_attribute *devt_attr;
+
+#ifdef XEN
+       spinlock_t              sem;
+#else
+       struct semaphore        sem;    /* semaphore to synchronize calls to
+                                        * its driver.
+                                        */
+#endif
+
+       struct bus_type * bus;          /* type of bus device is on */
+       struct device_driver *driver;   /* which driver has allocated this
+                                          device */
+       void            *driver_data;   /* data private to the driver */
+       void            *platform_data; /* Platform specific data, device
+                                          core doesn't touch it */
+       void            *firmware_data; /* Firmware specific data (e.g. ACPI,
+                                          BIOS data),reserved for device core*/
+       struct dev_pm_info      power;
+
+       u64             *dma_mask;      /* dma mask (if dma'able device) */
+       u64             coherent_dma_mask;/* Like dma_mask, but for
+                                            alloc_coherent mappings as
+                                            not all hardware supports
+                                            64 bit addresses for consistent
+                                            allocations such descriptors. */
+
+       struct list_head        dma_pools;      /* dma pools (if dma'ble) */
+
+       struct dma_coherent_mem *dma_mem; /* internal for coherent mem
+                                            override */
+
+       /* class_device migration path */
+       struct list_head        node;
+       struct class            *class;         /* optional*/
+       dev_t                   devt;           /* dev_t, creates the sysfs 
"dev" */
+       struct attribute_group  **groups;       /* optional groups */
+
+       void    (*release)(struct device * dev);
+};
+
+static inline void *
+dev_get_drvdata (struct device *dev)
+{
+       return dev->driver_data;
+}
+
+static inline void
+dev_set_drvdata (struct device *dev, void *data)
+{
+       dev->driver_data = data;
+}
+
+static inline int device_is_registered(struct device *dev)
+{
+       return dev->is_registered;
+}
+
+/*
+ * High level routines for use by the bus drivers
+ */
+extern int __must_check device_register(struct device * dev);
+extern void device_unregister(struct device * dev);
+extern void device_initialize(struct device * dev);
+extern int __must_check device_add(struct device * dev);
+extern void device_del(struct device * dev);
+extern int device_for_each_child(struct device *, void *,
+                    int (*fn)(struct device *, void *));
+extern int device_rename(struct device *dev, char *new_name);
+
+/*
+ * Manual binding of a device to driver. See drivers/base/bus.c
+ * for information on use.
+ */
+extern int __must_check device_bind_driver(struct device *dev);
+extern void device_release_driver(struct device * dev);
+extern int  __must_check device_attach(struct device * dev);
+extern int __must_check driver_attach(struct device_driver *drv);
+extern int __must_check device_reprobe(struct device *dev);
+
+/*
+ * Easy functions for dynamically creating devices on the fly
+ */
+extern struct device *device_create(struct class *cls, struct device *parent,
+                                   dev_t devt, const char *fmt, ...)
+                                   __attribute__((format(printf,4,5)));
+extern void device_destroy(struct class *cls, dev_t devt);
+
+extern int virtual_device_parent(struct device *dev);
+
+/*
+ * Platform "fixup" functions - allow the platform to have their say
+ * about devices and actions that the general device layer doesn't
+ * know about.
+ */
+/* Notify platform of device discovery */
+extern int (*platform_notify)(struct device * dev);
+
+extern int (*platform_notify_remove)(struct device * dev);
+
+
+/**
+ * get_device - atomically increment the reference count for the device.
+ *
+ */
+extern struct device * get_device(struct device * dev);
+extern void put_device(struct device * dev);
+
+
+/* drivers/base/power/shutdown.c */
+extern void device_shutdown(void);
+
+
+/* drivers/base/firmware.c */
+extern int __must_check firmware_register(struct subsystem *);
+extern void firmware_unregister(struct subsystem *);
+
+/* debugging and troubleshooting/diagnostic helpers. */
+extern const char *dev_driver_string(struct device *dev);
+#define dev_printk(level, dev, format, arg...) \
+       printk(level "%s %s: " format , dev_driver_string(dev) , (dev)->bus_id 
, ## arg)
+
+#ifdef DEBUG
+#define dev_dbg(dev, format, arg...)           \
+       dev_printk(KERN_DEBUG , dev , format , ## arg)
+#else
+#define dev_dbg(dev, format, arg...) do { (void)(dev); } while (0)
+#endif
+
+#define dev_err(dev, format, arg...)           \
+       dev_printk(KERN_ERR , dev , format , ## arg)
+#define dev_info(dev, format, arg...)          \
+       dev_printk(KERN_INFO , dev , format , ## arg)
+#define dev_warn(dev, format, arg...)          \
+       dev_printk(KERN_WARNING , dev , format , ## arg)
+#define dev_notice(dev, format, arg...)                \
+       dev_printk(KERN_NOTICE , dev , format , ## arg)
+
+/* Create alias, so I can be autoloaded. */
+#define MODULE_ALIAS_CHARDEV(major,minor) \
+       MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
+#define MODULE_ALIAS_CHARDEV_MAJOR(major) \
+       MODULE_ALIAS("char-major-" __stringify(major) "-*")
+#endif /* _DEVICE_H_ */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux-xen/linux/kobject.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/linux/kobject.h    Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,286 @@
+/*
+ * kobject.h - generic kernel object infrastructure.
+ *
+ * Copyright (c) 2002-2003     Patrick Mochel
+ * Copyright (c) 2002-2003     Open Source Development Labs
+ *
+ * This file is released under the GPLv2.
+ *
+ * 
+ * Please read Documentation/kobject.txt before using the kobject
+ * interface, ESPECIALLY the parts about reference counts and object
+ * destructors. 
+ */
+
+#ifndef _KOBJECT_H_
+#define _KOBJECT_H_
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/sysfs.h>
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/kref.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <asm/atomic.h>
+
+#define KOBJ_NAME_LEN                  20
+#define UEVENT_HELPER_PATH_LEN         256
+
+/* path to the userspace helper executed on an event */
+extern char uevent_helper[];
+
+/* counter to tag the uevent, read only except for the kobject core */
+extern u64 uevent_seqnum;
+
+/* the actions here must match the proper string in lib/kobject_uevent.c */
+typedef int __bitwise kobject_action_t;
+enum kobject_action {
+       KOBJ_ADD        = (__force kobject_action_t) 0x01,      /* exclusive to 
core */
+       KOBJ_REMOVE     = (__force kobject_action_t) 0x02,      /* exclusive to 
core */
+       KOBJ_CHANGE     = (__force kobject_action_t) 0x03,      /* device state 
change */
+       KOBJ_MOUNT      = (__force kobject_action_t) 0x04,      /* mount event 
for block devices (broken) */
+       KOBJ_UMOUNT     = (__force kobject_action_t) 0x05,      /* umount event 
for block devices (broken) */
+       KOBJ_OFFLINE    = (__force kobject_action_t) 0x06,      /* device 
offline */
+       KOBJ_ONLINE     = (__force kobject_action_t) 0x07,      /* device 
online */
+};
+
+struct kobject {
+       const char              * k_name;
+       char                    name[KOBJ_NAME_LEN];
+       struct kref             kref;
+       struct list_head        entry;
+       struct kobject          * parent;
+       struct kset             * kset;
+       struct kobj_type        * ktype;
+       struct dentry           * dentry;
+       wait_queue_head_t       poll;
+};
+
+extern int kobject_set_name(struct kobject *, const char *, ...)
+       __attribute__((format(printf,2,3)));
+
+static inline const char * kobject_name(const struct kobject * kobj)
+{
+       return kobj->k_name;
+}
+
+extern void kobject_init(struct kobject *);
+extern void kobject_cleanup(struct kobject *);
+
+extern int __must_check kobject_add(struct kobject *);
+extern void kobject_del(struct kobject *);
+
+extern int __must_check kobject_rename(struct kobject *, const char *new_name);
+
+extern int __must_check kobject_register(struct kobject *);
+extern void kobject_unregister(struct kobject *);
+
+extern struct kobject * kobject_get(struct kobject *);
+extern void kobject_put(struct kobject *);
+
+extern struct kobject *kobject_add_dir(struct kobject *, const char *);
+
+extern char * kobject_get_path(struct kobject *, gfp_t);
+
+struct kobj_type {
+       void (*release)(struct kobject *);
+       struct sysfs_ops        * sysfs_ops;
+       struct attribute        ** default_attrs;
+};
+
+
+/**
+ *     kset - a set of kobjects of a specific type, belonging
+ *     to a specific subsystem.
+ *
+ *     All kobjects of a kset should be embedded in an identical 
+ *     type. This type may have a descriptor, which the kset points
+ *     to. This allows there to exist sets of objects of the same
+ *     type in different subsystems.
+ *
+ *     A subsystem does not have to be a list of only one type 
+ *     of object; multiple ksets can belong to one subsystem. All 
+ *     ksets of a subsystem share the subsystem's lock.
+ *
+ *     Each kset can support specific event variables; it can
+ *     supress the event generation or add subsystem specific
+ *     variables carried with the event.
+ */
+struct kset_uevent_ops {
+       int (*filter)(struct kset *kset, struct kobject *kobj);
+       const char *(*name)(struct kset *kset, struct kobject *kobj);
+       int (*uevent)(struct kset *kset, struct kobject *kobj, char **envp,
+                       int num_envp, char *buffer, int buffer_size);
+};
+
+struct kset {
+       struct subsystem        * subsys;
+       struct kobj_type        * ktype;
+       struct list_head        list;
+       spinlock_t              list_lock;
+       struct kobject          kobj;
+       struct kset_uevent_ops  * uevent_ops;
+};
+
+
+extern void kset_init(struct kset * k);
+extern int __must_check kset_add(struct kset * k);
+extern int __must_check kset_register(struct kset * k);
+extern void kset_unregister(struct kset * k);
+
+static inline struct kset * to_kset(struct kobject * kobj)
+{
+       return kobj ? container_of(kobj,struct kset,kobj) : NULL;
+}
+
+static inline struct kset * kset_get(struct kset * k)
+{
+       return k ? to_kset(kobject_get(&k->kobj)) : NULL;
+}
+
+static inline void kset_put(struct kset * k)
+{
+       kobject_put(&k->kobj);
+}
+
+static inline struct kobj_type * get_ktype(struct kobject * k)
+{
+       if (k->kset && k->kset->ktype)
+               return k->kset->ktype;
+       else 
+               return k->ktype;
+}
+
+extern struct kobject * kset_find_obj(struct kset *, const char *);
+
+
+/**
+ * Use this when initializing an embedded kset with no other 
+ * fields to initialize.
+ */
+#define set_kset_name(str)     .kset = { .kobj = { .name = str } }
+
+
+
+struct subsystem {
+       struct kset             kset;
+#ifndef XEN
+       struct rw_semaphore     rwsem;
+#endif
+};
+
+#define decl_subsys(_name,_type,_uevent_ops) \
+struct subsystem _name##_subsys = { \
+       .kset = { \
+               .kobj = { .name = __stringify(_name) }, \
+               .ktype = _type, \
+               .uevent_ops =_uevent_ops, \
+       } \
+}
+#define decl_subsys_name(_varname,_name,_type,_uevent_ops) \
+struct subsystem _varname##_subsys = { \
+       .kset = { \
+               .kobj = { .name = __stringify(_name) }, \
+               .ktype = _type, \
+               .uevent_ops =_uevent_ops, \
+       } \
+}
+
+/* The global /sys/kernel/ subsystem for people to chain off of */
+extern struct subsystem kernel_subsys;
+/* The global /sys/hypervisor/ subsystem  */
+extern struct subsystem hypervisor_subsys;
+
+/**
+ * Helpers for setting the kset of registered objects.
+ * Often, a registered object belongs to a kset embedded in a 
+ * subsystem. These do no magic, just make the resulting code
+ * easier to follow. 
+ */
+
+/**
+ *     kobj_set_kset_s(obj,subsys) - set kset for embedded kobject.
+ *     @obj:           ptr to some object type.
+ *     @subsys:        a subsystem object (not a ptr).
+ *
+ *     Can be used for any object type with an embedded ->kobj.
+ */
+
+#define kobj_set_kset_s(obj,subsys) \
+       (obj)->kobj.kset = &(subsys).kset
+
+/**
+ *     kset_set_kset_s(obj,subsys) - set kset for embedded kset.
+ *     @obj:           ptr to some object type.
+ *     @subsys:        a subsystem object (not a ptr).
+ *
+ *     Can be used for any object type with an embedded ->kset.
+ *     Sets the kset of @obj's  embedded kobject (via its embedded
+ *     kset) to @subsys.kset. This makes @obj a member of that 
+ *     kset.
+ */
+
+#define kset_set_kset_s(obj,subsys) \
+       (obj)->kset.kobj.kset = &(subsys).kset
+
+/**
+ *     subsys_set_kset(obj,subsys) - set kset for subsystem
+ *     @obj:           ptr to some object type.
+ *     @subsys:        a subsystem object (not a ptr).
+ *
+ *     Can be used for any object type with an embedded ->subsys.
+ *     Sets the kset of @obj's kobject to @subsys.kset. This makes
+ *     the object a member of that kset.
+ */
+
+#define subsys_set_kset(obj,_subsys) \
+       (obj)->subsys.kset.kobj.kset = &(_subsys).kset
+
+extern void subsystem_init(struct subsystem *);
+extern int __must_check subsystem_register(struct subsystem *);
+extern void subsystem_unregister(struct subsystem *);
+
+static inline struct subsystem * subsys_get(struct subsystem * s)
+{
+       return s ? container_of(kset_get(&s->kset),struct subsystem,kset) : 
NULL;
+}
+
+static inline void subsys_put(struct subsystem * s)
+{
+       kset_put(&s->kset);
+}
+
+struct subsys_attribute {
+#ifndef XEN
+       struct attribute attr;
+#endif
+       ssize_t (*show)(struct subsystem *, char *);
+       ssize_t (*store)(struct subsystem *, const char *, size_t); 
+};
+
+extern int __must_check subsys_create_file(struct subsystem * ,
+                                       struct subsys_attribute *);
+
+#if defined(CONFIG_HOTPLUG)
+void kobject_uevent(struct kobject *kobj, enum kobject_action action);
+
+int add_uevent_var(char **envp, int num_envp, int *cur_index,
+                       char *buffer, int buffer_size, int *cur_len,
+                       const char *format, ...)
+       __attribute__((format (printf, 7, 8)));
+#else
+static inline void kobject_uevent(struct kobject *kobj, enum kobject_action 
action) { }
+
+static inline int add_uevent_var(char **envp, int num_envp, int *cur_index,
+                                     char *buffer, int buffer_size, int 
*cur_len, 
+                                     const char *format, ...)
+{ return 0; }
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _KOBJECT_H_ */
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/linux-xen/linux/pci.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux-xen/linux/pci.h        Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,820 @@
+/*
+ *     pci.h
+ *
+ *     PCI defines and function prototypes
+ *     Copyright 1994, Drew Eckhardt
+ *     Copyright 1997--1999 Martin Mares <mj@xxxxxx>
+ *
+ *     For more information, please consult the following manuals (look at
+ *     http://www.pcisig.com/ for how to get them):
+ *
+ *     PCI BIOS Specification
+ *     PCI Local Bus Specification
+ *     PCI to PCI Bridge Specification
+ *     PCI System Design Guide
+ */
+
+#ifndef LINUX_PCI_H
+#define LINUX_PCI_H
+
+/* Include the pci register defines */
+#include <linux/pci_regs.h>
+
+/* Include the ID list */
+#include <linux/pci_ids.h>
+#ifdef XEN
+#include <asm/processor.h>
+#endif
+
+/*
+ * The PCI interface treats multi-function devices as independent
+ * devices.  The slot/function address of each device is encoded
+ * in a single byte as follows:
+ *
+ *     7:3 = slot
+ *     2:0 = function
+ */
+#define PCI_DEVFN(slot,func)   ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+#define PCI_SLOT(devfn)                (((devfn) >> 3) & 0x1f)
+#define PCI_FUNC(devfn)                ((devfn) & 0x07)
+
+/* Ioctls for /proc/bus/pci/X/Y nodes. */
+#define PCIIOC_BASE            ('P' << 24 | 'C' << 16 | 'I' << 8)
+#define PCIIOC_CONTROLLER      (PCIIOC_BASE | 0x00)    /* Get controller for 
PCI device. */
+#define PCIIOC_MMAP_IS_IO      (PCIIOC_BASE | 0x01)    /* Set mmap state to 
I/O space. */
+#define PCIIOC_MMAP_IS_MEM     (PCIIOC_BASE | 0x02)    /* Set mmap state to 
MEM space. */
+#define PCIIOC_WRITE_COMBINE   (PCIIOC_BASE | 0x03)    /* Enable/disable 
write-combining. */
+
+#ifdef __KERNEL__
+
+#include <linux/mod_devicetable.h>
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+
+/* File state for mmap()s on /proc/bus/pci/X/Y */
+enum pci_mmap_state {
+       pci_mmap_io,
+       pci_mmap_mem
+};
+
+/* This defines the direction arg to the DMA mapping routines. */
+#define PCI_DMA_BIDIRECTIONAL  0
+#define PCI_DMA_TODEVICE       1
+#define PCI_DMA_FROMDEVICE     2
+#define PCI_DMA_NONE           3
+
+#define DEVICE_COUNT_COMPATIBLE        4
+#define DEVICE_COUNT_RESOURCE  12
+
+typedef int __bitwise pci_power_t;
+
+#define PCI_D0         ((pci_power_t __force) 0)
+#define PCI_D1         ((pci_power_t __force) 1)
+#define PCI_D2         ((pci_power_t __force) 2)
+#define PCI_D3hot      ((pci_power_t __force) 3)
+#define PCI_D3cold     ((pci_power_t __force) 4)
+#define PCI_UNKNOWN    ((pci_power_t __force) 5)
+#define PCI_POWER_ERROR        ((pci_power_t __force) -1)
+
+/** The pci_channel state describes connectivity between the CPU and
+ *  the pci device.  If some PCI bus between here and the pci device
+ *  has crashed or locked up, this info is reflected here.
+ */
+typedef unsigned int __bitwise pci_channel_state_t;
+
+enum pci_channel_state {
+       /* I/O channel is in normal state */
+       pci_channel_io_normal = (__force pci_channel_state_t) 1,
+
+       /* I/O to channel is blocked */
+       pci_channel_io_frozen = (__force pci_channel_state_t) 2,
+
+       /* PCI card is dead */
+       pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
+};
+
+typedef unsigned short __bitwise pci_bus_flags_t;
+enum pci_bus_flags {
+       PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
+};
+
+struct pci_cap_saved_state {
+       struct hlist_node next;
+       char cap_nr;
+       u32 data[0];
+};
+
+/*
+ * The pci_dev structure is used to describe PCI devices.
+ */
+struct pci_dev {
+       struct list_head global_list;   /* node in list of all PCI devices */
+       struct list_head bus_list;      /* node in per-bus list */
+       struct pci_bus  *bus;           /* bus this device is on */
+       struct pci_bus  *subordinate;   /* bus this device bridges to */
+
+       void            *sysdata;       /* hook for sys-specific extension */
+       struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
+
+       unsigned int    devfn;          /* encoded device & function index */
+       unsigned short  vendor;
+       unsigned short  device;
+       unsigned short  subsystem_vendor;
+       unsigned short  subsystem_device;
+       unsigned int    class;          /* 3 bytes: (base,sub,prog-if) */
+       u8              hdr_type;       /* PCI header type (`multi' flag masked 
out) */
+       u8              rom_base_reg;   /* which config register controls the 
ROM */
+       u8              pin;            /* which interrupt pin this device uses 
*/
+
+       struct pci_driver *driver;      /* which driver has allocated this 
device */
+       u64             dma_mask;       /* Mask of the bits of bus address this
+                                          device implements.  Normally this is
+                                          0xffffffff.  You only need to change
+                                          this if your device has broken DMA
+                                          or supports 64-bit transfers.  */
+
+       pci_power_t     current_state;  /* Current operating state. In 
ACPI-speak,
+                                          this is D0-D3, D0 being fully 
functional,
+                                          and D3 being off. */
+
+       pci_channel_state_t error_state;        /* current connectivity state */
+       struct  device  dev;            /* Generic device interface */
+
+       /* device is compatible with these IDs */
+       unsigned short vendor_compatible[DEVICE_COUNT_COMPATIBLE];
+       unsigned short device_compatible[DEVICE_COUNT_COMPATIBLE];
+
+       int             cfg_size;       /* Size of configuration space */
+
+       /*
+        * Instead of touching interrupt line and base address registers
+        * directly, use the values stored here. They might be different!
+        */
+       unsigned int    irq;
+       struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory 
regions + expansion ROMs */
+
+       /* These fields are used by common fixups */
+       unsigned int    transparent:1;  /* Transparent PCI bridge */
+       unsigned int    multifunction:1;/* Part of multi-function device */
+       /* keep track of device state */
+       unsigned int    is_enabled:1;   /* pci_enable_device has been called */
+       unsigned int    is_busmaster:1; /* device is busmaster */
+       unsigned int    no_msi:1;       /* device may not use msi */
+       unsigned int    no_d1d2:1;   /* only allow d0 or d3 */
+       unsigned int    block_ucfg_access:1;    /* userspace config space 
access is blocked */
+       unsigned int    broken_parity_status:1; /* Device generates false 
positive parity */
+       unsigned int    msi_enabled:1;
+       unsigned int    msix_enabled:1;
+
+       u32             saved_config_space[16]; /* config space saved at 
suspend time */
+       struct hlist_head saved_cap_space;
+       struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM 
entry */
+       int rom_attr_enabled;           /* has display of the rom attribute 
been enabled? */
+       struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file 
for resources */
+};
+
+#define pci_dev_g(n) list_entry(n, struct pci_dev, global_list)
+#define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list)
+#define        to_pci_dev(n) container_of(n, struct pci_dev, dev)
+#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, 
d)) != NULL)
+
+static inline struct pci_cap_saved_state *pci_find_saved_cap(
+       struct pci_dev *pci_dev,char cap)
+{
+       struct pci_cap_saved_state *tmp;
+       struct hlist_node *pos;
+
+       hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
+               if (tmp->cap_nr == cap)
+                       return tmp;
+       }
+       return NULL;
+}
+
+static inline void pci_add_saved_cap(struct pci_dev *pci_dev,
+       struct pci_cap_saved_state *new_cap)
+{
+       hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
+}
+
+static inline void pci_remove_saved_cap(struct pci_cap_saved_state *cap)
+{
+       hlist_del(&cap->next);
+}
+
+/*
+ *  For PCI devices, the region numbers are assigned this way:
+ *
+ *     0-5     standard PCI regions
+ *     6       expansion ROM
+ *     7-10    bridges: address space assigned to buses behind the bridge
+ */
+
+#define PCI_ROM_RESOURCE       6
+#define PCI_BRIDGE_RESOURCES   7
+#define PCI_NUM_RESOURCES      11
+
+#ifndef PCI_BUS_NUM_RESOURCES
+#define PCI_BUS_NUM_RESOURCES  8
+#endif
+
+#define PCI_REGION_FLAG_MASK   0x0fU   /* These bits of resource flags tell us 
the PCI region flags */
+
+struct pci_bus {
+       struct list_head node;          /* node in list of buses */
+       struct pci_bus  *parent;        /* parent bus this bridge is on */
+       struct list_head children;      /* list of child buses */
+       struct list_head devices;       /* list of devices on this bus */
+       struct pci_dev  *self;          /* bridge device as seen by parent */
+       struct resource *resource[PCI_BUS_NUM_RESOURCES];
+                                       /* address space routed to this bus */
+
+       struct pci_ops  *ops;           /* configuration access functions */
+       void            *sysdata;       /* hook for sys-specific extension */
+       struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
+
+       unsigned char   number;         /* bus number */
+       unsigned char   primary;        /* number of primary bridge */
+       unsigned char   secondary;      /* number of secondary bridge */
+       unsigned char   subordinate;    /* max number of subordinate buses */
+
+       char            name[48];
+
+       unsigned short  bridge_ctl;     /* manage NO_ISA/FBB/et al behaviors */
+       pci_bus_flags_t bus_flags;      /* Inherited by child busses */
+       struct device           *bridge;
+       struct class_device     class_dev;
+       struct bin_attribute    *legacy_io; /* legacy I/O for this bus */
+       struct bin_attribute    *legacy_mem; /* legacy mem */
+};
+
+#define pci_bus_b(n)   list_entry(n, struct pci_bus, node)
+#define to_pci_bus(n)  container_of(n, struct pci_bus, class_dev)
+
+/*
+ * Error values that may be returned by PCI functions.
+ */
+#define PCIBIOS_SUCCESSFUL             0x00
+#define PCIBIOS_FUNC_NOT_SUPPORTED     0x81
+#define PCIBIOS_BAD_VENDOR_ID          0x83
+#define PCIBIOS_DEVICE_NOT_FOUND       0x86
+#define PCIBIOS_BAD_REGISTER_NUMBER    0x87
+#define PCIBIOS_SET_FAILED             0x88
+#define PCIBIOS_BUFFER_TOO_SMALL       0x89
+
+/* Low-level architecture-dependent routines */
+
+struct pci_ops {
+       int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int 
size, u32 *val);
+       int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int 
size, u32 val);
+};
+
+struct pci_raw_ops {
+       int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
+                   int reg, int len, u32 *val);
+       int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
+                    int reg, int len, u32 val);
+};
+
+extern struct pci_raw_ops *raw_pci_ops;
+
+struct pci_bus_region {
+       unsigned long start;
+       unsigned long end;
+};
+
+struct pci_dynids {
+       spinlock_t lock;            /* protects list, index */
+       struct list_head list;      /* for IDs added at runtime */
+       unsigned int use_driver_data:1; /* pci_driver->driver_data is used */
+};
+
+/* ---------------------------------------------------------------- */
+/** PCI Error Recovery System (PCI-ERS).  If a PCI device driver provides
+ *  a set fof callbacks in struct pci_error_handlers, then that device driver
+ *  will be notified of PCI bus errors, and will be driven to recovery
+ *  when an error occurs.
+ */
+
+typedef unsigned int __bitwise pci_ers_result_t;
+
+enum pci_ers_result {
+       /* no result/none/not supported in device driver */
+       PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
+
+       /* Device driver can recover without slot reset */
+       PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
+
+       /* Device driver wants slot to be reset. */
+       PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
+
+       /* Device has completely failed, is unrecoverable */
+       PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
+
+       /* Device driver is fully recovered and operational */
+       PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
+};
+
+/* PCI bus error event callbacks */
+struct pci_error_handlers
+{
+       /* PCI bus error detected on this device */
+       pci_ers_result_t (*error_detected)(struct pci_dev *dev,
+                             enum pci_channel_state error);
+
+       /* MMIO has been re-enabled, but not DMA */
+       pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
+
+       /* PCI Express link has been reset */
+       pci_ers_result_t (*link_reset)(struct pci_dev *dev);
+
+       /* PCI slot has been reset */
+       pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
+
+       /* Device driver may resume normal operations */
+       void (*resume)(struct pci_dev *dev);
+};
+
+/* ---------------------------------------------------------------- */
+
+struct module;
+struct pci_driver {
+       struct list_head node;
+       char *name;
+       const struct pci_device_id *id_table;   /* must be non-NULL for probe 
to be called */
+       int  (*probe)  (struct pci_dev *dev, const struct pci_device_id *id);   
/* New device inserted */
+       void (*remove) (struct pci_dev *dev);   /* Device removed (NULL if not 
a hot-plug capable driver) */
+       int  (*suspend) (struct pci_dev *dev, pm_message_t state);      /* 
Device suspended */
+       int  (*suspend_late) (struct pci_dev *dev, pm_message_t state);
+       int  (*resume_early) (struct pci_dev *dev);
+       int  (*resume) (struct pci_dev *dev);                   /* Device woken 
up */
+       int  (*enable_wake) (struct pci_dev *dev, pci_power_t state, int 
enable);   /* Enable wake event */
+       void (*shutdown) (struct pci_dev *dev);
+
+       struct pci_error_handlers *err_handler;
+       struct device_driver    driver;
+       struct pci_dynids dynids;
+
+       int multithread_probe;
+};
+
+#define        to_pci_driver(drv) container_of(drv,struct pci_driver, driver)
+
+/**
+ * PCI_DEVICE - macro used to describe a specific pci device
+ * @vend: the 16 bit PCI Vendor ID
+ * @dev: the 16 bit PCI Device ID
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific device.  The subvendor and subdevice fields will be set to
+ * PCI_ANY_ID.
+ */
+#define PCI_DEVICE(vend,dev) \
+       .vendor = (vend), .device = (dev), \
+       .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+
+/**
+ * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
+ * @dev_class: the class, subclass, prog-if triple for this device
+ * @dev_class_mask: the class mask for this device
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific PCI class.  The vendor, device, subvendor, and subdevice
+ * fields will be set to PCI_ANY_ID.
+ */
+#define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
+       .class = (dev_class), .class_mask = (dev_class_mask), \
+       .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
+       .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+
+/*
+ * pci_module_init is obsolete, this stays here till we fix up all usages of it
+ * in the tree.
+ */
+#define pci_module_init        pci_register_driver
+
+/* these external functions are only available when PCI support is enabled */
+#ifdef CONFIG_PCI
+
+extern struct bus_type pci_bus_type;
+
+/* Do NOT directly access these two variables, unless you are arch specific pci
+ * code, or pci core code. */
+extern struct list_head pci_root_buses;        /* list of all known PCI buses 
*/
+extern struct list_head pci_devices;   /* list of all devices */
+
+void pcibios_fixup_bus(struct pci_bus *);
+int __must_check pcibios_enable_device(struct pci_dev *, int mask);
+char *pcibios_setup (char *str);
+
+/* Used only when drivers/pci/setup.c is used */
+void pcibios_align_resource(void *, struct resource *, resource_size_t,
+                               resource_size_t);
+void pcibios_update_irq(struct pci_dev *, int irq);
+
+/* Generic PCI functions used internally */
+
+extern struct pci_bus *pci_find_bus(int domain, int busnr);
+void pci_bus_add_devices(struct pci_bus *bus);
+struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, struct 
pci_ops *ops, void *sysdata);
+static inline struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void 
*sysdata)
+{
+       struct pci_bus *root_bus;
+       root_bus = pci_scan_bus_parented(NULL, bus, ops, sysdata);
+       if (root_bus)
+               pci_bus_add_devices(root_bus);
+       return root_bus;
+}
+struct pci_bus *pci_create_bus(struct device *parent, int bus, struct pci_ops 
*ops, void *sysdata);
+struct pci_bus * pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 
int busnr);
+int pci_scan_slot(struct pci_bus *bus, int devfn);
+struct pci_dev * pci_scan_single_device(struct pci_bus *bus, int devfn);
+void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
+unsigned int pci_scan_child_bus(struct pci_bus *bus);
+int __must_check pci_bus_add_device(struct pci_dev *dev);
+void pci_read_bridge_bases(struct pci_bus *child);
+struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct 
resource *res);
+int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
+extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
+extern void pci_dev_put(struct pci_dev *dev);
+extern void pci_remove_bus(struct pci_bus *b);
+extern void pci_remove_bus_device(struct pci_dev *dev);
+extern void pci_stop_bus_device(struct pci_dev *dev);
+void pci_setup_cardbus(struct pci_bus *bus);
+extern void pci_sort_breadthfirst(void);
+
+/* Generic PCI functions exported to card drivers */
+
+struct pci_dev *pci_find_device (unsigned int vendor, unsigned int device, 
const struct pci_dev *from);
+struct pci_dev *pci_find_device_reverse (unsigned int vendor, unsigned int 
device, const struct pci_dev *from);
+struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn);
+int pci_find_capability (struct pci_dev *dev, int cap);
+int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap);
+int pci_find_ext_capability (struct pci_dev *dev, int cap);
+struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
+
+struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
+                               struct pci_dev *from);
+struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int 
device,
+                               struct pci_dev *from);
+
+struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned int device,
+                               unsigned int ss_vendor, unsigned int ss_device,
+                               struct pci_dev *from);
+struct pci_dev *pci_get_slot (struct pci_bus *bus, unsigned int devfn);
+struct pci_dev *pci_get_bus_and_slot (unsigned int bus, unsigned int devfn);
+struct pci_dev *pci_get_class (unsigned int class, struct pci_dev *from);
+int pci_dev_present(const struct pci_device_id *ids);
+
+int pci_bus_read_config_byte (struct pci_bus *bus, unsigned int devfn, int 
where, u8 *val);
+int pci_bus_read_config_word (struct pci_bus *bus, unsigned int devfn, int 
where, u16 *val);
+int pci_bus_read_config_dword (struct pci_bus *bus, unsigned int devfn, int 
where, u32 *val);
+int pci_bus_write_config_byte (struct pci_bus *bus, unsigned int devfn, int 
where, u8 val);
+int pci_bus_write_config_word (struct pci_bus *bus, unsigned int devfn, int 
where, u16 val);
+int pci_bus_write_config_dword (struct pci_bus *bus, unsigned int devfn, int 
where, u32 val);
+
+static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val)
+{
+       return pci_bus_read_config_byte (dev->bus, dev->devfn, where, val);
+}
+static inline int pci_read_config_word(struct pci_dev *dev, int where, u16 
*val)
+{
+       return pci_bus_read_config_word (dev->bus, dev->devfn, where, val);
+}
+static inline int pci_read_config_dword(struct pci_dev *dev, int where, u32 
*val)
+{
+       return pci_bus_read_config_dword (dev->bus, dev->devfn, where, val);
+}
+static inline int pci_write_config_byte(struct pci_dev *dev, int where, u8 val)
+{
+       return pci_bus_write_config_byte (dev->bus, dev->devfn, where, val);
+}
+static inline int pci_write_config_word(struct pci_dev *dev, int where, u16 
val)
+{
+       return pci_bus_write_config_word (dev->bus, dev->devfn, where, val);
+}
+static inline int pci_write_config_dword(struct pci_dev *dev, int where, u32 
val)
+{
+       return pci_bus_write_config_dword (dev->bus, dev->devfn, where, val);
+}
+
+int __must_check pci_enable_device(struct pci_dev *dev);
+int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask);
+void pci_disable_device(struct pci_dev *dev);
+void pci_set_master(struct pci_dev *dev);
+#define HAVE_PCI_SET_MWI
+int __must_check pci_set_mwi(struct pci_dev *dev);
+void pci_clear_mwi(struct pci_dev *dev);
+void pci_intx(struct pci_dev *dev, int enable);
+int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
+int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask);
+void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
+int __must_check pci_assign_resource(struct pci_dev *dev, int i);
+int __must_check pci_assign_resource_fixed(struct pci_dev *dev, int i);
+void pci_restore_bars(struct pci_dev *dev);
+
+/* ROM control related routines */
+void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
+void __iomem __must_check *pci_map_rom_copy(struct pci_dev *pdev, size_t 
*size);
+void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
+void pci_remove_rom(struct pci_dev *pdev);
+
+/* Power management related routines */
+int pci_save_state(struct pci_dev *dev);
+int pci_restore_state(struct pci_dev *dev);
+int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
+pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
+
+/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
+void pci_bus_assign_resources(struct pci_bus *bus);
+void pci_bus_size_bridges(struct pci_bus *bus);
+int pci_claim_resource(struct pci_dev *, int);
+void pci_assign_unassigned_resources(void);
+void pdev_enable_device(struct pci_dev *);
+void pdev_sort_resources(struct pci_dev *, struct resource_list *);
+void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
+                   int (*)(struct pci_dev *, u8, u8));
+#define HAVE_PCI_REQ_REGIONS   2
+int __must_check pci_request_regions(struct pci_dev *, const char *);
+void pci_release_regions(struct pci_dev *);
+int __must_check pci_request_region(struct pci_dev *, int, const char *);
+void pci_release_region(struct pci_dev *, int);
+
+/* drivers/pci/bus.c */
+int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
+                       struct resource *res, resource_size_t size,
+                       resource_size_t align, resource_size_t min,
+                       unsigned int type_mask,
+                       void (*alignf)(void *, struct resource *,
+                               resource_size_t, resource_size_t),
+                       void *alignf_data);
+void pci_enable_bridges(struct pci_bus *bus);
+
+/* Proper probing supporting hot-pluggable devices */
+int __must_check __pci_register_driver(struct pci_driver *, struct module *);
+static inline int __must_check pci_register_driver(struct pci_driver *driver)
+{
+       return __pci_register_driver(driver, THIS_MODULE);
+}
+
+void pci_unregister_driver(struct pci_driver *);
+void pci_remove_behind_bridge(struct pci_dev *);
+struct pci_driver *pci_dev_driver(const struct pci_dev *);
+const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct 
pci_dev *dev);
+const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, 
struct pci_dev *dev);
+int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int 
pass);
+
+void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
+                 void *userdata);
+int pci_cfg_space_size(struct pci_dev *dev);
+unsigned char pci_bus_max_busnr(struct pci_bus* bus);
+
+/* kmem_cache style wrapper around pci_alloc_consistent() */
+
+#include <linux/dmapool.h>
+
+#define        pci_pool dma_pool
+#define pci_pool_create(name, pdev, size, align, allocation) \
+               dma_pool_create(name, &pdev->dev, size, align, allocation)
+#define        pci_pool_destroy(pool) dma_pool_destroy(pool)
+#define        pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, 
handle)
+#define        pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, 
addr)
+
+enum pci_dma_burst_strategy {
+       PCI_DMA_BURST_INFINITY, /* make bursts as large as possible,
+                                  strategy_parameter is N/A */
+       PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
+                                  byte boundaries */
+       PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
+                                  strategy_parameter byte boundaries */
+};
+
+#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
+extern struct pci_dev *isa_bridge;
+#endif
+
+struct msix_entry {
+       u16     vector; /* kernel uses to write allocated vector */
+       u16     entry;  /* driver uses to specify entry, OS writes */
+};
+
+
+#ifndef CONFIG_PCI_MSI
+static inline void pci_scan_msi_device(struct pci_dev *dev) {}
+static inline int pci_enable_msi(struct pci_dev *dev) {return -1;}
+static inline void pci_disable_msi(struct pci_dev *dev) {}
+static inline int pci_enable_msix(struct pci_dev* dev,
+       struct msix_entry *entries, int nvec) {return -1;}
+static inline void pci_disable_msix(struct pci_dev *dev) {}
+static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {}
+#else
+extern void pci_scan_msi_device(struct pci_dev *dev);
+extern int pci_enable_msi(struct pci_dev *dev);
+extern void pci_disable_msi(struct pci_dev *dev);
+extern int pci_enable_msix(struct pci_dev* dev,
+       struct msix_entry *entries, int nvec);
+extern void pci_disable_msix(struct pci_dev *dev);
+extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
+#endif
+
+#ifdef CONFIG_HT_IRQ
+/* The functions a driver should call */
+int  ht_create_irq(struct pci_dev *dev, int idx);
+void ht_destroy_irq(unsigned int irq);
+#endif /* CONFIG_HT_IRQ */
+
+extern void pci_block_user_cfg_access(struct pci_dev *dev);
+extern void pci_unblock_user_cfg_access(struct pci_dev *dev);
+
+/*
+ * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
+ * a PCI domain is defined to be a set of PCI busses which share
+ * configuration space.
+ */
+#ifndef CONFIG_PCI_DOMAINS
+static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+       return 0;
+}
+#endif
+
+#else /* CONFIG_PCI is not enabled */
+
+/*
+ *  If the system does not have PCI, clearly these return errors.  Define
+ *  these as simple inline functions to avoid hair in drivers.
+ */
+
+#define _PCI_NOP(o,s,t) \
+       static inline int pci_##o##_config_##s (struct pci_dev *dev, int where, 
t val) \
+               { return PCIBIOS_FUNC_NOT_SUPPORTED; }
+#define _PCI_NOP_ALL(o,x)      _PCI_NOP(o,byte,u8 x) \
+                               _PCI_NOP(o,word,u16 x) \
+                               _PCI_NOP(o,dword,u32 x)
+_PCI_NOP_ALL(read, *)
+_PCI_NOP_ALL(write,)
+
+static inline struct pci_dev *pci_find_device(unsigned int vendor, unsigned 
int device, const struct pci_dev *from)
+{ return NULL; }
+
+static inline struct pci_dev *pci_find_slot(unsigned int bus, unsigned int 
devfn)
+{ return NULL; }
+
+static inline struct pci_dev *pci_get_device(unsigned int vendor,
+                               unsigned int device, struct pci_dev *from)
+{ return NULL; }
+
+static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor,
+                               unsigned int device, struct pci_dev *from)
+{ return NULL; }
+
+static inline struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned 
int device,
+unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from)
+{ return NULL; }
+
+static inline struct pci_dev *pci_get_class(unsigned int class, struct pci_dev 
*from)
+{ return NULL; }
+
+#define pci_dev_present(ids)   (0)
+#define pci_dev_put(dev)       do { } while (0)
+
+static inline void pci_set_master(struct pci_dev *dev) { }
+static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
+static inline void pci_disable_device(struct pci_dev *dev) { }
+static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { return 
-EIO; }
+static inline int pci_assign_resource(struct pci_dev *dev, int i) { return 
-EBUSY;}
+static inline int __pci_register_driver(struct pci_driver *drv, struct module 
*owner) { return 0;}
+static inline int pci_register_driver(struct pci_driver *drv) { return 0;}
+static inline void pci_unregister_driver(struct pci_driver *drv) { }
+static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 
0; }
+static inline int pci_find_next_capability (struct pci_dev *dev, u8 post, int 
cap) { return 0; }
+static inline int pci_find_ext_capability (struct pci_dev *dev, int cap) 
{return 0; }
+static inline const struct pci_device_id *pci_match_device(const struct 
pci_device_id *ids, const struct pci_dev *dev) { return NULL; }
+
+/* Power management related routines */
+static inline int pci_save_state(struct pci_dev *dev) { return 0; }
+static inline int pci_restore_state(struct pci_dev *dev) { return 0; }
+static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 
{ return 0; }
+static inline pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t 
state) { return PCI_D0; }
+static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int 
enable) { return 0; }
+
+#define        isa_bridge      ((struct pci_dev *)NULL)
+
+#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
+
+static inline void pci_block_user_cfg_access(struct pci_dev *dev) { }
+static inline void pci_unblock_user_cfg_access(struct pci_dev *dev) { }
+
+#endif /* CONFIG_PCI */
+
+/* Include architecture-dependent settings and functions */
+
+#include <asm/pci.h>
+
+/* these helpers provide future and backwards compatibility
+ * for accessing popular PCI BAR info */
+#define pci_resource_start(dev,bar)   ((dev)->resource[(bar)].start)
+#define pci_resource_end(dev,bar)     ((dev)->resource[(bar)].end)
+#define pci_resource_flags(dev,bar)   ((dev)->resource[(bar)].flags)
+#define pci_resource_len(dev,bar) \
+       ((pci_resource_start((dev),(bar)) == 0 &&       \
+         pci_resource_end((dev),(bar)) ==              \
+         pci_resource_start((dev),(bar))) ? 0 :        \
+                                                       \
+        (pci_resource_end((dev),(bar)) -               \
+         pci_resource_start((dev),(bar)) + 1))
+
+/* Similar to the helpers above, these manipulate per-pci_dev
+ * driver-specific data.  They are really just a wrapper around
+ * the generic device structure functions of these calls.
+ */
+static inline void *pci_get_drvdata (struct pci_dev *pdev)
+{
+       return dev_get_drvdata(&pdev->dev);
+}
+
+static inline void pci_set_drvdata (struct pci_dev *pdev, void *data)
+{
+       dev_set_drvdata(&pdev->dev, data);
+}
+
+/* If you want to know what to call your pci_dev, ask this function.
+ * Again, it's a wrapper around the generic device.
+ */
+static inline char *pci_name(struct pci_dev *pdev)
+{
+       return pdev->dev.bus_id;
+}
+
+
+/* Some archs don't want to expose struct resource to userland as-is
+ * in sysfs and /proc
+ */
+#ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER
+static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
+                const struct resource *rsrc, resource_size_t *start,
+               resource_size_t *end)
+{
+       *start = rsrc->start;
+       *end = rsrc->end;
+}
+#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
+
+
+/*
+ *  The world is not perfect and supplies us with broken PCI devices.
+ *  For at least a part of these bugs we need a work-around, so both
+ *  generic (drivers/pci/quirks.c) and per-architecture code can define
+ *  fixup hooks to be called for particular buggy devices.
+ */
+
+struct pci_fixup {
+       u16 vendor, device;     /* You can use PCI_ANY_ID here of course */
+       void (*hook)(struct pci_dev *dev);
+};
+
+enum pci_fixup_pass {
+       pci_fixup_early,        /* Before probing BARs */
+       pci_fixup_header,       /* After reading configuration header */
+       pci_fixup_final,        /* Final phase of device fixups */
+       pci_fixup_enable,       /* pci_enable_device() time */
+};
+
+/* Anonymous variables would be nice... */
+#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, hook) \
+       static const struct pci_fixup __pci_fixup_##name __attribute_used__ \
+       __attribute__((__section__(#section))) = { vendor, device, hook };
+#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)                  \
+       DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,                     \
+                       vendor##device##hook, vendor, device, hook)
+#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)                 \
+       DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,                    \
+                       vendor##device##hook, vendor, device, hook)
+#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)                  \
+       DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,                     \
+                       vendor##device##hook, vendor, device, hook)
+#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)                 \
+       DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,                    \
+                       vendor##device##hook, vendor, device, hook)
+
+
+void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
+
+extern int pci_pci_problems;
+#define PCIPCI_FAIL            1       /* No PCI PCI DMA */
+#define PCIPCI_TRITON          2
+#define PCIPCI_NATOMA          4
+#define PCIPCI_VIAETBF         8
+#define PCIPCI_VSFX            16
+#define PCIPCI_ALIMAGIK                32      /* Need low latency setting */
+#define PCIAGP_FAIL            64      /* No PCI to AGP DMA */
+
+#endif /* __KERNEL__ */
+#endif /* LINUX_PCI_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/linux/README.origin
--- a/xen/include/asm-ia64/linux/README.origin  Wed Dec 20 08:53:42 2006 -0700
+++ b/xen/include/asm-ia64/linux/README.origin  Wed Dec 20 14:55:02 2006 -0700
@@ -24,3 +24,15 @@ timex.h                      -> linux/include/linux/timex.h
 timex.h                        -> linux/include/linux/timex.h
 topology.h             -> linux/include/linux/topology.h
 wait.h                 -> linux/include/linux/wait.h
+
+# The files below are from Linux-2.6.19
+completion.h           -> linux/include/linux/completion.h
+ioport.h               -> linux/include/linux/ioport.h
+klist.h                        -> linux/include/linux/klist.h
+kref.h                 -> linux/include/linux/kref.h
+mod_devicetable.h      -> linux/include/linux/mod_devicetable.h
+pci_ids.h              -> linux/include/linux/pci_ids.h
+pci_regs.h             -> linux/include/linux/pci_regs.h
+pm.h                   -> linux/include/linux/pm.h
+sysfs.h                        -> linux/include/linux/sysfs.h
+
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux/asm/README.origin
--- a/xen/include/asm-ia64/linux/asm/README.origin      Wed Dec 20 08:53:42 
2006 -0700
+++ b/xen/include/asm-ia64/linux/asm/README.origin      Wed Dec 20 14:55:02 
2006 -0700
@@ -21,7 +21,6 @@ ioctl.h                       -> linux/include/asm-ia64/ioct
 ioctl.h                        -> linux/include/asm-ia64/ioctl.h
 irq.h                  -> linux/include/asm-ia64/irq.h
 linkage.h              -> linux/include/asm-ia64/linkage.h
-machvec.h              -> linux/include/asm-ia64/machvec.h
 machvec_hpsim.h                -> linux/include/asm-ia64/machvec_hpsim.h
 mca.h                  -> linux/include/asm-ia64/mca.h
 nodedata.h             -> linux/include/asm-ia64/nodedate.h
@@ -41,3 +40,6 @@ unaligned.h           -> linux/include/asm-ia64/u
 unaligned.h            -> linux/include/asm-ia64/unaligned.h
 unistd.h               -> linux/include/asm-ia64/unistd.h
 unwind.h               -> linux/include/asm-ia64/unwind.h
+
+# The files below are from Linux-2.6.19
+machvec_init.h         -> linux/include/asm-ia64/machvec_init.h
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/linux/asm/machvec.h
--- a/xen/include/asm-ia64/linux/asm/machvec.h  Wed Dec 20 08:53:42 2006 -0700
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,390 +0,0 @@
-/*
- * Machine vector for IA-64.
- *
- * Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) Srinivasa Thirumalachar <sprasad@xxxxxxxxxxxx>
- * Copyright (C) Vijay Chander <vijay@xxxxxxxxxxxx>
- * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co.
- *     David Mosberger-Tang <davidm@xxxxxxxxxx>
- */
-#ifndef _ASM_IA64_MACHVEC_H
-#define _ASM_IA64_MACHVEC_H
-
-#include <linux/config.h>
-#include <linux/types.h>
-
-/* forward declarations: */
-struct device;
-struct pt_regs;
-struct scatterlist;
-struct page;
-struct mm_struct;
-struct pci_bus;
-
-typedef void ia64_mv_setup_t (char **);
-typedef void ia64_mv_cpu_init_t (void);
-typedef void ia64_mv_irq_init_t (void);
-typedef void ia64_mv_send_ipi_t (int, int, int, int);
-typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *);
-typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, 
unsigned long);
-typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
-typedef unsigned int ia64_mv_local_vector_to_irq (u8);
-typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
-typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
-                                      u8 size);
-typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
-                                       u8 size);
-
-/* DMA-mapping interface: */
-typedef void ia64_mv_dma_init (void);
-typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t 
*, int);
-typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, 
dma_addr_t);
-typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, 
int);
-typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, 
int);
-typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, 
int);
-typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, 
int);
-typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, 
size_t, int);
-typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist 
*, int, int);
-typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, 
size_t, int);
-typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct 
scatterlist *, int, int);
-typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
-typedef int ia64_mv_dma_supported (struct device *, u64);
-
-/*
- * WARNING: The legacy I/O space is _architected_.  Platforms are
- * expected to follow this architected model (see Section 10.7 in the
- * IA-64 Architecture Software Developer's Manual).  Unfortunately,
- * some broken machines do not follow that model, which is why we have
- * to make the inX/outX operations part of the machine vector.
- * Platform designers should follow the architected model whenever
- * possible.
- */
-typedef unsigned int ia64_mv_inb_t (unsigned long);
-typedef unsigned int ia64_mv_inw_t (unsigned long);
-typedef unsigned int ia64_mv_inl_t (unsigned long);
-typedef void ia64_mv_outb_t (unsigned char, unsigned long);
-typedef void ia64_mv_outw_t (unsigned short, unsigned long);
-typedef void ia64_mv_outl_t (unsigned int, unsigned long);
-typedef void ia64_mv_mmiowb_t (void);
-typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *);
-typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *);
-typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *);
-typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *);
-typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
-typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
-typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
-typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
-
-static inline void
-machvec_noop (void)
-{
-}
-
-static inline void
-machvec_noop_mm (struct mm_struct *mm)
-{
-}
-
-extern void machvec_setup (char **);
-extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
-extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
-extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, 
int);
-extern void machvec_tlb_migrate_finish (struct mm_struct *);
-
-# if defined (CONFIG_IA64_HP_SIM)
-#  include <asm/machvec_hpsim.h>
-# elif defined (CONFIG_IA64_DIG)
-#  include <asm/machvec_dig.h>
-# elif defined (CONFIG_IA64_HP_ZX1)
-#  include <asm/machvec_hpzx1.h>
-# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
-#  include <asm/machvec_hpzx1_swiotlb.h>
-# elif defined (CONFIG_IA64_SGI_SN2)
-#  include <asm/machvec_sn2.h>
-# elif defined (CONFIG_IA64_GENERIC)
-
-# ifdef MACHVEC_PLATFORM_HEADER
-#  include MACHVEC_PLATFORM_HEADER
-# else
-#  define platform_name                ia64_mv.name
-#  define platform_setup       ia64_mv.setup
-#  define platform_cpu_init    ia64_mv.cpu_init
-#  define platform_irq_init    ia64_mv.irq_init
-#  define platform_send_ipi    ia64_mv.send_ipi
-#  define platform_timer_interrupt     ia64_mv.timer_interrupt
-#  define platform_global_tlb_purge    ia64_mv.global_tlb_purge
-#  define platform_tlb_migrate_finish  ia64_mv.tlb_migrate_finish
-#  define platform_dma_init            ia64_mv.dma_init
-#  define platform_dma_alloc_coherent  ia64_mv.dma_alloc_coherent
-#  define platform_dma_free_coherent   ia64_mv.dma_free_coherent
-#  define platform_dma_map_single      ia64_mv.dma_map_single
-#  define platform_dma_unmap_single    ia64_mv.dma_unmap_single
-#  define platform_dma_map_sg          ia64_mv.dma_map_sg
-#  define platform_dma_unmap_sg                ia64_mv.dma_unmap_sg
-#  define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
-#  define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
-#  define platform_dma_sync_single_for_device 
ia64_mv.dma_sync_single_for_device
-#  define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
-#  define platform_dma_mapping_error           ia64_mv.dma_mapping_error
-#  define platform_dma_supported       ia64_mv.dma_supported
-#  define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
-#  define platform_pci_get_legacy_mem  ia64_mv.pci_get_legacy_mem
-#  define platform_pci_legacy_read     ia64_mv.pci_legacy_read
-#  define platform_pci_legacy_write    ia64_mv.pci_legacy_write
-#  define platform_inb         ia64_mv.inb
-#  define platform_inw         ia64_mv.inw
-#  define platform_inl         ia64_mv.inl
-#  define platform_outb                ia64_mv.outb
-#  define platform_outw                ia64_mv.outw
-#  define platform_outl                ia64_mv.outl
-#  define platform_mmiowb      ia64_mv.mmiowb
-#  define platform_readb        ia64_mv.readb
-#  define platform_readw        ia64_mv.readw
-#  define platform_readl        ia64_mv.readl
-#  define platform_readq        ia64_mv.readq
-#  define platform_readb_relaxed        ia64_mv.readb_relaxed
-#  define platform_readw_relaxed        ia64_mv.readw_relaxed
-#  define platform_readl_relaxed        ia64_mv.readl_relaxed
-#  define platform_readq_relaxed        ia64_mv.readq_relaxed
-# endif
-
-/* __attribute__((__aligned__(16))) is required to make size of the
- * structure multiple of 16 bytes.
- * This will fillup the holes created because of section 3.3.1 in
- * Software Conventions guide.
- */
-struct ia64_machine_vector {
-       const char *name;
-       ia64_mv_setup_t *setup;
-       ia64_mv_cpu_init_t *cpu_init;
-       ia64_mv_irq_init_t *irq_init;
-       ia64_mv_send_ipi_t *send_ipi;
-       ia64_mv_timer_interrupt_t *timer_interrupt;
-       ia64_mv_global_tlb_purge_t *global_tlb_purge;
-       ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
-       ia64_mv_dma_init *dma_init;
-       ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
-       ia64_mv_dma_free_coherent *dma_free_coherent;
-       ia64_mv_dma_map_single *dma_map_single;
-       ia64_mv_dma_unmap_single *dma_unmap_single;
-       ia64_mv_dma_map_sg *dma_map_sg;
-       ia64_mv_dma_unmap_sg *dma_unmap_sg;
-       ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
-       ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
-       ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
-       ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
-       ia64_mv_dma_mapping_error *dma_mapping_error;
-       ia64_mv_dma_supported *dma_supported;
-       ia64_mv_local_vector_to_irq *local_vector_to_irq;
-       ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
-       ia64_mv_pci_legacy_read_t *pci_legacy_read;
-       ia64_mv_pci_legacy_write_t *pci_legacy_write;
-       ia64_mv_inb_t *inb;
-       ia64_mv_inw_t *inw;
-       ia64_mv_inl_t *inl;
-       ia64_mv_outb_t *outb;
-       ia64_mv_outw_t *outw;
-       ia64_mv_outl_t *outl;
-       ia64_mv_mmiowb_t *mmiowb;
-       ia64_mv_readb_t *readb;
-       ia64_mv_readw_t *readw;
-       ia64_mv_readl_t *readl;
-       ia64_mv_readq_t *readq;
-       ia64_mv_readb_relaxed_t *readb_relaxed;
-       ia64_mv_readw_relaxed_t *readw_relaxed;
-       ia64_mv_readl_relaxed_t *readl_relaxed;
-       ia64_mv_readq_relaxed_t *readq_relaxed;
-} __attribute__((__aligned__(16))); /* align attrib? see above comment */
-
-#define MACHVEC_INIT(name)                     \
-{                                              \
-       #name,                                  \
-       platform_setup,                         \
-       platform_cpu_init,                      \
-       platform_irq_init,                      \
-       platform_send_ipi,                      \
-       platform_timer_interrupt,               \
-       platform_global_tlb_purge,              \
-       platform_tlb_migrate_finish,            \
-       platform_dma_init,                      \
-       platform_dma_alloc_coherent,            \
-       platform_dma_free_coherent,             \
-       platform_dma_map_single,                \
-       platform_dma_unmap_single,              \
-       platform_dma_map_sg,                    \
-       platform_dma_unmap_sg,                  \
-       platform_dma_sync_single_for_cpu,       \
-       platform_dma_sync_sg_for_cpu,           \
-       platform_dma_sync_single_for_device,    \
-       platform_dma_sync_sg_for_device,        \
-       platform_dma_mapping_error,                     \
-       platform_dma_supported,                 \
-       platform_local_vector_to_irq,           \
-       platform_pci_get_legacy_mem,            \
-       platform_pci_legacy_read,               \
-       platform_pci_legacy_write,              \
-       platform_inb,                           \
-       platform_inw,                           \
-       platform_inl,                           \
-       platform_outb,                          \
-       platform_outw,                          \
-       platform_outl,                          \
-       platform_mmiowb,                        \
-       platform_readb,                         \
-       platform_readw,                         \
-       platform_readl,                         \
-       platform_readq,                         \
-       platform_readb_relaxed,                 \
-       platform_readw_relaxed,                 \
-       platform_readl_relaxed,                 \
-       platform_readq_relaxed,                 \
-}
-
-extern struct ia64_machine_vector ia64_mv;
-extern void machvec_init (const char *name);
-
-# else
-#  error Unknown configuration.  Update asm-ia64/machvec.h.
-# endif /* CONFIG_IA64_GENERIC */
-
-/*
- * Declare default routines which aren't declared anywhere else:
- */
-extern ia64_mv_dma_init                        swiotlb_init;
-extern ia64_mv_dma_alloc_coherent      swiotlb_alloc_coherent;
-extern ia64_mv_dma_free_coherent       swiotlb_free_coherent;
-extern ia64_mv_dma_map_single          swiotlb_map_single;
-extern ia64_mv_dma_unmap_single                swiotlb_unmap_single;
-extern ia64_mv_dma_map_sg              swiotlb_map_sg;
-extern ia64_mv_dma_unmap_sg            swiotlb_unmap_sg;
-extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
-extern ia64_mv_dma_sync_sg_for_cpu     swiotlb_sync_sg_for_cpu;
-extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
-extern ia64_mv_dma_sync_sg_for_device  swiotlb_sync_sg_for_device;
-extern ia64_mv_dma_mapping_error       swiotlb_dma_mapping_error;
-extern ia64_mv_dma_supported           swiotlb_dma_supported;
-
-/*
- * Define default versions so we can extend machvec for new platforms without 
having
- * to update the machvec files for all existing platforms.
- */
-#ifndef platform_setup
-# define platform_setup                        machvec_setup
-#endif
-#ifndef platform_cpu_init
-# define platform_cpu_init             machvec_noop
-#endif
-#ifndef platform_irq_init
-# define platform_irq_init             machvec_noop
-#endif
-
-#ifndef platform_send_ipi
-# define platform_send_ipi             ia64_send_ipi   /* default to 
architected version */
-#endif
-#ifndef platform_timer_interrupt
-# define platform_timer_interrupt      machvec_timer_interrupt
-#endif
-#ifndef platform_global_tlb_purge
-# define platform_global_tlb_purge     ia64_global_tlb_purge /* default to 
architected version */
-#endif
-#ifndef platform_tlb_migrate_finish
-# define platform_tlb_migrate_finish   machvec_noop_mm
-#endif
-#ifndef platform_dma_init
-# define platform_dma_init             swiotlb_init
-#endif
-#ifndef platform_dma_alloc_coherent
-# define platform_dma_alloc_coherent   swiotlb_alloc_coherent
-#endif
-#ifndef platform_dma_free_coherent
-# define platform_dma_free_coherent    swiotlb_free_coherent
-#endif
-#ifndef platform_dma_map_single
-# define platform_dma_map_single       swiotlb_map_single
-#endif
-#ifndef platform_dma_unmap_single
-# define platform_dma_unmap_single     swiotlb_unmap_single
-#endif
-#ifndef platform_dma_map_sg
-# define platform_dma_map_sg           swiotlb_map_sg
-#endif
-#ifndef platform_dma_unmap_sg
-# define platform_dma_unmap_sg         swiotlb_unmap_sg
-#endif
-#ifndef platform_dma_sync_single_for_cpu
-# define platform_dma_sync_single_for_cpu      swiotlb_sync_single_for_cpu
-#endif
-#ifndef platform_dma_sync_sg_for_cpu
-# define platform_dma_sync_sg_for_cpu          swiotlb_sync_sg_for_cpu
-#endif
-#ifndef platform_dma_sync_single_for_device
-# define platform_dma_sync_single_for_device   swiotlb_sync_single_for_device
-#endif
-#ifndef platform_dma_sync_sg_for_device
-# define platform_dma_sync_sg_for_device       swiotlb_sync_sg_for_device
-#endif
-#ifndef platform_dma_mapping_error
-# define platform_dma_mapping_error            swiotlb_dma_mapping_error
-#endif
-#ifndef platform_dma_supported
-# define  platform_dma_supported       swiotlb_dma_supported
-#endif
-#ifndef platform_local_vector_to_irq
-# define platform_local_vector_to_irq  __ia64_local_vector_to_irq
-#endif
-#ifndef platform_pci_get_legacy_mem
-# define platform_pci_get_legacy_mem   ia64_pci_get_legacy_mem
-#endif
-#ifndef platform_pci_legacy_read
-# define platform_pci_legacy_read      ia64_pci_legacy_read
-#endif
-#ifndef platform_pci_legacy_write
-# define platform_pci_legacy_write     ia64_pci_legacy_write
-#endif
-#ifndef platform_inb
-# define platform_inb          __ia64_inb
-#endif
-#ifndef platform_inw
-# define platform_inw          __ia64_inw
-#endif
-#ifndef platform_inl
-# define platform_inl          __ia64_inl
-#endif
-#ifndef platform_outb
-# define platform_outb         __ia64_outb
-#endif
-#ifndef platform_outw
-# define platform_outw         __ia64_outw
-#endif
-#ifndef platform_outl
-# define platform_outl         __ia64_outl
-#endif
-#ifndef platform_mmiowb
-# define platform_mmiowb       __ia64_mmiowb
-#endif
-#ifndef platform_readb
-# define platform_readb                __ia64_readb
-#endif
-#ifndef platform_readw
-# define platform_readw                __ia64_readw
-#endif
-#ifndef platform_readl
-# define platform_readl                __ia64_readl
-#endif
-#ifndef platform_readq
-# define platform_readq                __ia64_readq
-#endif
-#ifndef platform_readb_relaxed
-# define platform_readb_relaxed        __ia64_readb_relaxed
-#endif
-#ifndef platform_readw_relaxed
-# define platform_readw_relaxed        __ia64_readw_relaxed
-#endif
-#ifndef platform_readl_relaxed
-# define platform_readl_relaxed        __ia64_readl_relaxed
-#endif
-#ifndef platform_readq_relaxed
-# define platform_readq_relaxed        __ia64_readq_relaxed
-#endif
-
-#endif /* _ASM_IA64_MACHVEC_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux/asm/machvec_init.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux/asm/machvec_init.h     Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,32 @@
+#include <asm/machvec.h>
+
+extern ia64_mv_send_ipi_t ia64_send_ipi;
+extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
+extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
+extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
+extern ia64_mv_pci_legacy_read_t ia64_pci_legacy_read;
+extern ia64_mv_pci_legacy_write_t ia64_pci_legacy_write;
+
+extern ia64_mv_inb_t __ia64_inb;
+extern ia64_mv_inw_t __ia64_inw;
+extern ia64_mv_inl_t __ia64_inl;
+extern ia64_mv_outb_t __ia64_outb;
+extern ia64_mv_outw_t __ia64_outw;
+extern ia64_mv_outl_t __ia64_outl;
+extern ia64_mv_mmiowb_t __ia64_mmiowb;
+extern ia64_mv_readb_t __ia64_readb;
+extern ia64_mv_readw_t __ia64_readw;
+extern ia64_mv_readl_t __ia64_readl;
+extern ia64_mv_readq_t __ia64_readq;
+extern ia64_mv_readb_t __ia64_readb_relaxed;
+extern ia64_mv_readw_t __ia64_readw_relaxed;
+extern ia64_mv_readl_t __ia64_readl_relaxed;
+extern ia64_mv_readq_t __ia64_readq_relaxed;
+
+#define MACHVEC_HELPER(name)                                                   
                \
+ struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ 
(".machvec")))  \
+       = MACHVEC_INIT(name);
+
+#define MACHVEC_DEFINE(name)   MACHVEC_HELPER(name)
+
+MACHVEC_DEFINE(MACHVEC_PLATFORM_NAME)
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/linux/asm/pci.h
--- a/xen/include/asm-ia64/linux/asm/pci.h      Wed Dec 20 08:53:42 2006 -0700
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,161 +0,0 @@
-#ifndef _ASM_IA64_PCI_H
-#define _ASM_IA64_PCI_H
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/io.h>
-#include <asm/scatterlist.h>
-
-/*
- * Can be used to override the logic in pci_scan_bus for skipping 
already-configured bus
- * numbers - to be used for buggy BIOSes or architectures with incomplete PCI 
setup by the
- * loader.
- */
-#define pcibios_assign_all_busses()     0
-#define pcibios_scan_all_fns(a, b)     0
-
-#define PCIBIOS_MIN_IO         0x1000
-#define PCIBIOS_MIN_MEM                0x10000000
-
-void pcibios_config_init(void);
-
-struct pci_dev;
-
-/*
- * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct 
correspondence
- * between device bus addresses and CPU physical addresses.  Platforms with a 
hardware I/O
- * MMU _must_ turn this off to suppress the bounce buffer handling code in the 
block and
- * network device layers.  Platforms with separate bus address spaces _must_ 
turn this off
- * and provide a device DMA mapping implementation that takes care of the 
necessary
- * address translation.
- *
- * For now, the ia64 platforms which may have separate/multiple bus address 
spaces all
- * have I/O MMUs which support the merging of physically discontiguous 
buffers, so we can
- * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS.
- */
-extern unsigned long ia64_max_iommu_merge_mask;
-#define PCI_DMA_BUS_IS_PHYS    (ia64_max_iommu_merge_mask == ~0UL)
-
-static inline void
-pcibios_set_master (struct pci_dev *dev)
-{
-       /* No special bus mastering setup handling */
-}
-
-static inline void
-pcibios_penalize_isa_irq (int irq, int active)
-{
-       /* We don't do dynamic PCI IRQ allocation */
-}
-
-#define HAVE_ARCH_PCI_MWI 1
-extern int pcibios_prep_mwi (struct pci_dev *);
-
-#include <asm-generic/pci-dma-compat.h>
-
-/* pci_unmap_{single,page} is not a nop, thus... */
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)      \
-       dma_addr_t ADDR_NAME;
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)                \
-       __u32 LEN_NAME;
-#define pci_unmap_addr(PTR, ADDR_NAME)                 \
-       ((PTR)->ADDR_NAME)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)                \
-       (((PTR)->ADDR_NAME) = (VAL))
-#define pci_unmap_len(PTR, LEN_NAME)                   \
-       ((PTR)->LEN_NAME)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL)          \
-       (((PTR)->LEN_NAME) = (VAL))
-
-/* The ia64 platform always supports 64-bit addressing. */
-#define pci_dac_dma_supported(pci_dev, mask)           (1)
-#define pci_dac_page_to_dma(dev,pg,off,dir)            ((dma_addr_t) 
page_to_bus(pg) + (off))
-#define pci_dac_dma_to_page(dev,dma_addr)              
(virt_to_page(bus_to_virt(dma_addr)))
-#define pci_dac_dma_to_offset(dev,dma_addr)            offset_in_page(dma_addr)
-#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir)  do { } while (0)
-#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir)       do { 
mb(); } while (0)
-
-#define sg_dma_len(sg)         ((sg)->dma_length)
-#define sg_dma_address(sg)     ((sg)->dma_address)
-
-#ifdef CONFIG_PCI
-static inline void pci_dma_burst_advice(struct pci_dev *pdev,
-                                       enum pci_dma_burst_strategy *strat,
-                                       unsigned long *strategy_parameter)
-{
-       unsigned long cacheline_size;
-       u8 byte;
-
-       pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
-       if (byte == 0)
-               cacheline_size = 1024;
-       else
-               cacheline_size = (int) byte * 4;
-
-       *strat = PCI_DMA_BURST_MULTIPLE;
-       *strategy_parameter = cacheline_size;
-}
-#endif
-
-#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct 
*vma,
-                               enum pci_mmap_state mmap_state, int 
write_combine);
-#define HAVE_PCI_LEGACY
-extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
-                                     struct vm_area_struct *vma);
-extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
-                                 size_t count);
-extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
-                                  size_t count);
-extern int pci_mmap_legacy_mem(struct kobject *kobj,
-                              struct bin_attribute *attr,
-                              struct vm_area_struct *vma);
-
-#define pci_get_legacy_mem platform_pci_get_legacy_mem
-#define pci_legacy_read platform_pci_legacy_read
-#define pci_legacy_write platform_pci_legacy_write
-
-struct pci_window {
-       struct resource resource;
-       u64 offset;
-};
-
-struct pci_controller {
-       void *acpi_handle;
-       void *iommu;
-       int segment;
-       int node;               /* nearest node with memory or -1 for global 
allocation */
-
-       unsigned int windows;
-       struct pci_window *window;
-
-       void *platform_data;
-};
-
-#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
-#define pci_domain_nr(busdev)    (PCI_CONTROLLER(busdev)->segment)
-
-extern struct pci_ops pci_root_ops;
-
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
-       return (pci_domain_nr(bus) != 0);
-}
-
-static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-{
-}
-
-extern void pcibios_resource_to_bus(struct pci_dev *dev,
-               struct pci_bus_region *region, struct resource *res);
-
-extern void pcibios_bus_to_resource(struct pci_dev *dev,
-               struct resource *res, struct pci_bus_region *region);
-
-#define pcibios_scan_all_fns(a, b)     0
-
-#endif /* _ASM_IA64_PCI_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux/asm/sn/README.origin
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux/asm/sn/README.origin   Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,24 @@
+# Source files in this directory are identical copies of linux-2.6.19 files:
+# 
+# NOTE: DO NOT commit changes to these files!   If a file
+# needs to be changed, move it to ../linux-xen and follow
+# the instructions in the README there.
+
+geo.h                  -> linux/include/asm-ia64/sn/geo.h
+klconfig.h             -> linux/include/asm-ia64/sn/klconfig.h
+l1.h                   -> linux/include/asm-ia64/sn/l1.h
+leds.h                 -> linux/include/asm-ia64/sn/leds.h
+module.h               -> linux/include/asm-ia64/sn/module.h
+pcibus_provider_defs.h -> linux/include/asm-ia64/sn/pcibus_provider_defs.h
+pcidev.h               -> linux/include/asm-ia64/sn/pcidev.h
+pda.h                  -> linux/include/asm-ia64/sn/pda.h
+pic.h                  -> linux/include/asm-ia64/sn/pic.h
+shub_mmr.h             -> linux/include/asm-ia64/sn/shub_mmr.h
+shubio.h               -> linux/include/asm-ia64/sn/shubio.h
+simulator.h            -> linux/include/asm-ia64/sn/simulator.h
+sn_cpuid.h             -> linux/include/asm-ia64/sn/sn_cpuid.h
+sn_feature_sets.h      -> linux/include/asm-ia64/sn/sn_feature_sets.h
+sn_sal.h               -> linux/include/asm-ia64/sn/sn_sal.h
+tiocp.h                        -> linux/include/asm-ia64/sn/tiocp.h
+xbow.h                 -> linux/arch/ia64/sn/include/xtalk/xbow.h
+xwidgetdev.h           -> linux/arch/ia64/sn/include/xtalk/xwidgetdev.h
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/linux/asm/sn/geo.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux/asm/sn/geo.h   Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,132 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights 
reserved.
+ */
+
+#ifndef _ASM_IA64_SN_GEO_H
+#define _ASM_IA64_SN_GEO_H
+
+/* The geoid_t implementation below is based loosely on the pcfg_t
+   implementation in sys/SN/promcfg.h. */
+
+/* Type declaractions */
+
+/* Size of a geoid_t structure (must be before decl. of geoid_u) */
+#define GEOID_SIZE     8       /* Would 16 be better?  The size can
+                                  be different on different platforms. */
+
+#define MAX_SLOTS      0xf     /* slots per module */
+#define MAX_SLABS      0xf     /* slabs per slot */
+
+typedef unsigned char  geo_type_t;
+
+/* Fields common to all substructures */
+typedef struct geo_common_s {
+    moduleid_t module;         /* The module (box) this h/w lives in */
+    geo_type_t type;           /* What type of h/w is named by this geoid_t */
+    slabid_t   slab:4;         /* slab (ASIC), 0 .. 15 within slot */
+    slotid_t   slot:4;         /* slot (Blade), 0 .. 15 within module */
+} geo_common_t;
+
+/* Additional fields for particular types of hardware */
+typedef struct geo_node_s {
+    geo_common_t       common;         /* No additional fields needed */
+} geo_node_t;
+
+typedef struct geo_rtr_s {
+    geo_common_t       common;         /* No additional fields needed */
+} geo_rtr_t;
+
+typedef struct geo_iocntl_s {
+    geo_common_t       common;         /* No additional fields needed */
+} geo_iocntl_t;
+
+typedef struct geo_pcicard_s {
+    geo_iocntl_t       common;
+    char               bus;    /* Bus/widget number */
+    char               slot;   /* PCI slot number */
+} geo_pcicard_t;
+
+/* Subcomponents of a node */
+typedef struct geo_cpu_s {
+    geo_node_t node;
+    char       slice;          /* Which CPU on the node */
+} geo_cpu_t;
+
+typedef struct geo_mem_s {
+    geo_node_t node;
+    char       membus;         /* The memory bus on the node */
+    char       memslot;        /* The memory slot on the bus */
+} geo_mem_t;
+
+
+typedef union geoid_u {
+    geo_common_t       common;
+    geo_node_t         node;
+    geo_iocntl_t       iocntl;
+    geo_pcicard_t      pcicard;
+    geo_rtr_t          rtr;
+    geo_cpu_t          cpu;
+    geo_mem_t          mem;
+    char               padsize[GEOID_SIZE];
+} geoid_t;
+
+
+/* Preprocessor macros */
+
+#define GEO_MAX_LEN    48      /* max. formatted length, plus some pad:
+                                  module/001c07/slab/5/node/memory/2/slot/4 */
+
+/* Values for geo_type_t */
+#define GEO_TYPE_INVALID       0
+#define GEO_TYPE_MODULE                1
+#define GEO_TYPE_NODE          2
+#define GEO_TYPE_RTR           3
+#define GEO_TYPE_IOCNTL                4
+#define GEO_TYPE_IOCARD                5
+#define GEO_TYPE_CPU           6
+#define GEO_TYPE_MEM           7
+#define GEO_TYPE_MAX           (GEO_TYPE_MEM+1)
+
+/* Parameter for hwcfg_format_geoid_compt() */
+#define GEO_COMPT_MODULE       1
+#define GEO_COMPT_SLAB         2
+#define GEO_COMPT_IOBUS                3
+#define GEO_COMPT_IOSLOT       4
+#define GEO_COMPT_CPU          5
+#define GEO_COMPT_MEMBUS       6
+#define GEO_COMPT_MEMSLOT      7
+
+#define GEO_INVALID_STR                "<invalid>"
+
+#define INVALID_NASID           ((nasid_t)-1)
+#define INVALID_CNODEID         ((cnodeid_t)-1)
+#define INVALID_PNODEID         ((pnodeid_t)-1)
+#define INVALID_SLAB            (slabid_t)-1
+#define INVALID_SLOT            (slotid_t)-1
+#define INVALID_MODULE          ((moduleid_t)-1)
+
+static inline slabid_t geo_slab(geoid_t g)
+{
+       return (g.common.type == GEO_TYPE_INVALID) ?
+               INVALID_SLAB : g.common.slab;
+}
+
+static inline slotid_t geo_slot(geoid_t g)
+{
+       return (g.common.type == GEO_TYPE_INVALID) ?
+               INVALID_SLOT : g.common.slot;
+}
+
+static inline moduleid_t geo_module(geoid_t g)
+{
+       return (g.common.type == GEO_TYPE_INVALID) ?
+               INVALID_MODULE : g.common.module;
+}
+
+extern geoid_t cnodeid_get_geoid(cnodeid_t cnode);
+
+#endif /* _ASM_IA64_SN_GEO_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux/asm/sn/klconfig.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux/asm/sn/klconfig.h      Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,246 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Derived from IRIX <sys/SN/klconfig.h>.
+ *
+ * Copyright (C) 1992-1997,1999,2001-2004 Silicon Graphics, Inc.  All Rights 
Reserved.
+ * Copyright (C) 1999 by Ralf Baechle
+ */
+#ifndef _ASM_IA64_SN_KLCONFIG_H
+#define _ASM_IA64_SN_KLCONFIG_H
+
+/*
+ * The KLCONFIG structures store info about the various BOARDs found
+ * during Hardware Discovery. In addition, it stores info about the
+ * components found on the BOARDs.
+ */
+
+typedef s32 klconf_off_t;
+
+
+/* Functions/macros needed to use this structure */
+
+typedef struct kl_config_hdr {
+       char            pad[20];
+       klconf_off_t    ch_board_info;  /* the link list of boards */
+       char            pad0[88];
+} kl_config_hdr_t;
+
+
+#define NODE_OFFSET_TO_LBOARD(nasid,off)        
(lboard_t*)(GLOBAL_CAC_ADDR((nasid), (off)))
+
+/*
+ * The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
+ * can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to 
+ * the LOCAL/current NODE. REMOTE means it is attached to a different
+ * node.(TBD - Need a way to treat ROUTER boards.)
+ *
+ * There are 2 different structures to represent these boards -
+ * lboard - Local board, rboard - remote board. These 2 structures
+ * can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer
+ * Figure below). The first byte of the rboard or lboard structure
+ * is used to find out its type - no unions are used.
+ * If it is a lboard, then the config info of this board will be found
+ * on the local node. (LOCAL NODE BASE + offset value gives pointer to 
+ * the structure.
+ * If it is a rboard, the local structure contains the node number
+ * and the offset of the beginning of the LINKED LIST on the remote node.
+ * The details of the hardware on a remote node can be built locally,
+ * if required, by reading the LINKED LIST on the remote node and 
+ * ignoring all the rboards on that node.
+ *
+ * The local node uses the REMOTE NODE NUMBER + OFFSET to point to the 
+ * First board info on the remote node. The remote node list is 
+ * traversed as the local list, using the REMOTE BASE ADDRESS and not
+ * the local base address and ignoring all rboard values.
+ *
+ * 
+ KLCONFIG
+
+ +------------+      +------------+      +------------+      +------------+
+ |  lboard    |  +-->|   lboard   |  +-->|   rboard   |  +-->|   lboard   |
+ +------------+  |   +------------+  |   +------------+  |   +------------+
+ | board info |  |   | board info |  |   |errinfo,bptr|  |   | board info |
+ +------------+  |   +------------+  |   +------------+  |   +------------+
+ | offset     |--+   |  offset    |--+   |  offset    |--+   |offset=NULL |
+ +------------+      +------------+      +------------+      +------------+
+
+
+ +------------+
+ | board info |
+ +------------+       +--------------------------------+
+ | compt 1    |------>| type, rev, diaginfo, size ...  |  (CPU)
+ +------------+       +--------------------------------+
+ | compt 2    |--+
+ +------------+  |    +--------------------------------+
+ |  ...       |  +--->| type, rev, diaginfo, size ...  |  (MEM_BANK)
+ +------------+       +--------------------------------+
+ | errinfo    |--+
+ +------------+  |    +--------------------------------+
+                 +--->|r/l brd errinfo,compt err flags |
+                      +--------------------------------+
+
+ *
+ * Each BOARD consists of COMPONENTs and the BOARD structure has 
+ * pointers (offsets) to its COMPONENT structure.
+ * The COMPONENT structure has version info, size and speed info, revision,
+ * error info and the NIC info. This structure can accommodate any
+ * BOARD with arbitrary COMPONENT composition.
+ *
+ * The ERRORINFO part of each BOARD has error information
+ * that describes errors about the BOARD itself. It also has flags to
+ * indicate the COMPONENT(s) on the board that have errors. The error 
+ * information specific to the COMPONENT is present in the respective 
+ * COMPONENT structure.
+ *
+ * The ERRORINFO structure is also treated like a COMPONENT, ie. the 
+ * BOARD has pointers(offset) to the ERRORINFO structure. The rboard
+ * structure also has a pointer to the ERRORINFO structure. This is 
+ * the place to store ERRORINFO about a REMOTE NODE, if the HUB on
+ * that NODE is not working or if the REMOTE MEMORY is BAD. In cases where 
+ * only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can
+ * be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info 
+ * which is present on the REMOTE NODE.(TBD)
+ * REMOTE ERRINFO can be stored on any of the nearest nodes 
+ * or on all the nearest nodes.(TBD)
+ * Like BOARD structures, REMOTE ERRINFO structures can be built locally
+ * using the rboard errinfo pointer.
+ *
+ * In order to get useful information from this Data organization, a set of
+ * interface routines are provided (TBD). The important thing to remember while
+ * manipulating the structures, is that, the NODE number information should
+ * be used. If the NODE is non-zero (remote) then each offset should
+ * be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE 
ADDR. 
+ * This includes offsets for BOARDS, COMPONENTS and ERRORINFO.
+ * 
+ * Note that these structures do not provide much info about connectivity.
+ * That info will be part of HWGRAPH, which is an extension of the cfg_t
+ * data structure. (ref IP27prom/cfg.h) It has to be extended to include
+ * the IO part of the Network(TBD).
+ *
+ * The data structures below define the above concepts.
+ */
+
+
+/*
+ * BOARD classes
+ */
+
+#define KLCLASS_MASK   0xf0   
+#define KLCLASS_NONE   0x00
+#define KLCLASS_NODE   0x10             /* CPU, Memory and HUB board */
+#define KLCLASS_CPU    KLCLASS_NODE    
+#define KLCLASS_IO     0x20             /* BaseIO, 4 ch SCSI, ethernet, FDDI 
+                                           and the non-graphics widget boards 
*/
+#define KLCLASS_ROUTER 0x30             /* Router board */
+#define KLCLASS_MIDPLANE 0x40            /* We need to treat this as a board
+                                            so that we can record error info */
+#define KLCLASS_IOBRICK        0x70            /* IP35 iobrick */
+#define KLCLASS_MAX    8               /* Bump this if a new CLASS is added */
+
+#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
+
+
+/*
+ * board types
+ */
+
+#define KLTYPE_MASK    0x0f
+#define KLTYPE(_x)      ((_x) & KLTYPE_MASK)
+
+#define KLTYPE_SNIA    (KLCLASS_CPU | 0x1)
+#define KLTYPE_TIO     (KLCLASS_CPU | 0x2)
+
+#define KLTYPE_ROUTER     (KLCLASS_ROUTER | 0x1)
+#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
+#define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4)
+
+#define KLTYPE_IOBRICK_XBOW    (KLCLASS_MIDPLANE | 0x2)
+
+#define KLTYPE_IOBRICK         (KLCLASS_IOBRICK | 0x0)
+#define KLTYPE_NBRICK          (KLCLASS_IOBRICK | 0x4)
+#define KLTYPE_PXBRICK         (KLCLASS_IOBRICK | 0x6)
+#define KLTYPE_IXBRICK         (KLCLASS_IOBRICK | 0x7)
+#define KLTYPE_CGBRICK         (KLCLASS_IOBRICK | 0x8)
+#define KLTYPE_OPUSBRICK       (KLCLASS_IOBRICK | 0x9)
+#define KLTYPE_SABRICK          (KLCLASS_IOBRICK | 0xa)
+#define KLTYPE_IABRICK         (KLCLASS_IOBRICK | 0xb)
+#define KLTYPE_PABRICK          (KLCLASS_IOBRICK | 0xc)
+#define KLTYPE_GABRICK         (KLCLASS_IOBRICK | 0xd)
+
+
+/* 
+ * board structures
+ */
+
+#define MAX_COMPTS_PER_BRD 24
+
+typedef struct lboard_s {
+       klconf_off_t    brd_next_any;     /* Next BOARD */
+       unsigned char   struct_type;      /* type of structure, local or remote 
*/
+       unsigned char   brd_type;         /* type+class */
+       unsigned char   brd_sversion;     /* version of this structure */
+        unsigned char  brd_brevision;    /* board revision */
+        unsigned char  brd_promver;      /* board prom version, if any */
+       unsigned char   brd_flags;        /* Enabled, Disabled etc */
+       unsigned char   brd_slot;         /* slot number */
+       unsigned short  brd_debugsw;      /* Debug switches */
+       geoid_t         brd_geoid;        /* geo id */
+       partid_t        brd_partition;    /* Partition number */
+        unsigned short         brd_diagval;      /* diagnostic value */
+        unsigned short         brd_diagparm;     /* diagnostic parameter */
+        unsigned char  brd_inventory;    /* inventory history */
+        unsigned char  brd_numcompts;    /* Number of components */
+        nic_t          brd_nic;          /* Number in CAN */
+       nasid_t         brd_nasid;        /* passed parameter */
+       klconf_off_t    brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to 
COMPONENTS */
+       klconf_off_t    brd_errinfo;      /* Board's error information */
+       struct lboard_s *brd_parent;      /* Logical parent for this brd */
+       char            pad0[4];
+       unsigned char   brd_confidence;   /* confidence that the board is bad */
+       nasid_t         brd_owner;        /* who owns this board */
+       unsigned char   brd_nic_flags;    /* To handle 8 more NICs */
+       char            pad1[24];         /* future expansion */
+       char            brd_name[32];
+       nasid_t         brd_next_same_host; /* host of next brd w/same nasid */
+       klconf_off_t    brd_next_same;    /* Next BOARD with same nasid */
+} lboard_t;
+
+/*
+ * Generic info structure. This stores common info about a 
+ * component.
+ */
+ 
+typedef struct klinfo_s {                  /* Generic info */
+        unsigned char   struct_type;       /* type of this structure */
+        unsigned char   struct_version;    /* version of this structure */
+        unsigned char   flags;            /* Enabled, disabled etc */
+        unsigned char   revision;         /* component revision */
+        unsigned short  diagval;          /* result of diagnostics */
+        unsigned short  diagparm;         /* diagnostic parameter */
+        unsigned char   inventory;        /* previous inventory status */
+        unsigned short  partid;                   /* widget part number */
+       nic_t           nic;              /* MUst be aligned properly */
+        unsigned char   physid;           /* physical id of component */
+        unsigned int    virtid;           /* virtual id as seen by system */
+       unsigned char   widid;            /* Widget id - if applicable */
+       nasid_t         nasid;            /* node number - from parent */
+       char            pad1;             /* pad out structure. */
+       char            pad2;             /* pad out structure. */
+       void            *data;
+        klconf_off_t   errinfo;          /* component specific errors */
+        unsigned short  pad3;             /* pci fields have moved over to */
+        unsigned short  pad4;             /* klbri_t */
+} klinfo_t ;
+
+
+static inline lboard_t *find_lboard_next(lboard_t * brd)
+{
+       if (brd && brd->brd_next_any)
+               return NODE_OFFSET_TO_LBOARD(NASID_GET(brd), brd->brd_next_any);
+        return NULL;
+}
+
+#endif /* _ASM_IA64_SN_KLCONFIG_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/linux/asm/sn/l1.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux/asm/sn/l1.h    Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,51 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc.  All Rights 
Reserved.
+ */
+
+#ifndef _ASM_IA64_SN_L1_H
+#define _ASM_IA64_SN_L1_H
+
+/* brick type response codes */
+#define L1_BRICKTYPE_PX         0x23            /* # */
+#define L1_BRICKTYPE_PE         0x25            /* % */
+#define L1_BRICKTYPE_N_p0       0x26            /* & */
+#define L1_BRICKTYPE_IP45       0x34            /* 4 */
+#define L1_BRICKTYPE_IP41       0x35            /* 5 */
+#define L1_BRICKTYPE_TWISTER    0x36            /* 6 */ /* IP53 & ROUTER */
+#define L1_BRICKTYPE_IX         0x3d            /* = */
+#define L1_BRICKTYPE_IP34       0x61            /* a */
+#define L1_BRICKTYPE_GA                0x62            /* b */
+#define L1_BRICKTYPE_C          0x63            /* c */
+#define L1_BRICKTYPE_OPUS_TIO  0x66            /* f */
+#define L1_BRICKTYPE_I          0x69            /* i */
+#define L1_BRICKTYPE_N          0x6e            /* n */
+#define L1_BRICKTYPE_OPUS       0x6f           /* o */
+#define L1_BRICKTYPE_P          0x70            /* p */
+#define L1_BRICKTYPE_R          0x72            /* r */
+#define L1_BRICKTYPE_CHI_CG     0x76            /* v */
+#define L1_BRICKTYPE_X          0x78            /* x */
+#define L1_BRICKTYPE_X2         0x79            /* y */
+#define L1_BRICKTYPE_SA                0x5e            /* ^ */
+#define L1_BRICKTYPE_PA                0x6a            /* j */
+#define L1_BRICKTYPE_IA                0x6b            /* k */
+#define L1_BRICKTYPE_ATHENA    0x2b            /* + */
+#define L1_BRICKTYPE_DAYTONA   0x7a            /* z */
+#define L1_BRICKTYPE_1932      0x2c            /* . */
+#define L1_BRICKTYPE_191010    0x2e            /* , */
+
+/* board type response codes */
+#define L1_BOARDTYPE_IP69       0x0100          /* CA */
+#define L1_BOARDTYPE_IP63       0x0200          /* CB */
+#define L1_BOARDTYPE_BASEIO     0x0300          /* IB */
+#define L1_BOARDTYPE_PCIE2SLOT  0x0400          /* IC */
+#define L1_BOARDTYPE_PCIX3SLOT  0x0500          /* ID */
+#define L1_BOARDTYPE_PCIXPCIE4SLOT 0x0600       /* IE */
+#define L1_BOARDTYPE_ABACUS     0x0700          /* AB */
+#define L1_BOARDTYPE_DAYTONA    0x0800          /* AD */
+#define L1_BOARDTYPE_INVAL      (-1)            /* invalid brick type */
+
+#endif /* _ASM_IA64_SN_L1_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/linux/asm/sn/leds.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux/asm/sn/leds.h  Wed Dec 20 14:55:02 2006 -0700
@@ -0,0 +1,33 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_LEDS_H
+#define _ASM_IA64_SN_LEDS_H
+
+#include <asm/sn/addrs.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/shub_mmr.h>
+
+#define LED0           (LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0))
+#define LED_CPU_SHIFT  16
+
+#define LED_CPU_HEARTBEAT      0x01
+#define LED_CPU_ACTIVITY       0x02
+#define LED_ALWAYS_SET         0x00
+
+/*
+ * Basic macros for flashing the LEDS on an SGI SN.
+ */
+
+static __inline__ void
+set_led_bits(u8 value, u8 mask)
+{
+       pda->led_state = (pda->led_state & ~mask) | (value & mask);
+       *pda->led_address = (short) pda->led_state;
+}
+
+#endif /* _ASM_IA64_SN_LEDS_H */
+
diff -r 46c44b5e6a1b -r 80c5b5914b79 xen/include/asm-ia64/linux/asm/sn/module.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux/asm/sn/module.h        Wed Dec 20 14:55:02 
2006 -0700
@@ -0,0 +1,127 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights 
reserved.
+ */
+#ifndef _ASM_IA64_SN_MODULE_H
+#define _ASM_IA64_SN_MODULE_H
+
+/* parameter for format_module_id() */
+#define MODULE_FORMAT_BRIEF    1
+#define MODULE_FORMAT_LONG     2
+#define MODULE_FORMAT_LCD      3
+
+/*
+ *     Module id format
+ *
+ *     31-16   Rack ID (encoded class, group, number - 16-bit unsigned int)
+ *      15-8   Brick type (8-bit ascii character)
+ *       7-0   Bay (brick position in rack (0-63) - 8-bit unsigned int)
+ *
+ */
+
+/*
+ * Macros for getting the brick type
+ */
+#define MODULE_BTYPE_MASK      0xff00
+#define MODULE_BTYPE_SHFT      8
+#define MODULE_GET_BTYPE(_m)   (((_m) & MODULE_BTYPE_MASK) >> 
MODULE_BTYPE_SHFT)
+#define MODULE_BT_TO_CHAR(_b)  ((char)(_b))
+#define MODULE_GET_BTCHAR(_m)  (MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m)))
+
+/*
+ * Macros for getting the rack ID.
+ */
+#define MODULE_RACK_MASK       0xffff0000
+#define MODULE_RACK_SHFT       16
+#define MODULE_GET_RACK(_m)    (((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT)
+
+/*
+ * Macros for getting the brick position
+ */
+#define MODULE_BPOS_MASK       0x00ff
+#define MODULE_BPOS_SHFT       0
+#define MODULE_GET_BPOS(_m)    (((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT)
+
+/*
+ * Macros for encoding and decoding rack IDs
+ * A rack number consists of three parts:
+ *   class (0==CPU/mixed, 1==I/O), group, number
+ *
+ * Rack number is stored just as it is displayed on the screen:
+ * a 3-decimal-digit number.
+ */
+#define RACK_CLASS_DVDR         100
+#define RACK_GROUP_DVDR         10
+#define RACK_NUM_DVDR           1
+
+#define RACK_CREATE_RACKID(_c, _g, _n)  ((_c) * RACK_CLASS_DVDR +       \
+        (_g) * RACK_GROUP_DVDR + (_n) * RACK_NUM_DVDR)
+
+#define RACK_GET_CLASS(_r)              ((_r) / RACK_CLASS_DVDR)
+#define RACK_GET_GROUP(_r)              (((_r) - RACK_GET_CLASS(_r) *   \
+            RACK_CLASS_DVDR) / RACK_GROUP_DVDR)
+#define RACK_GET_NUM(_r)                (((_r) - RACK_GET_CLASS(_r) *   \
+            RACK_CLASS_DVDR - RACK_GET_GROUP(_r) *      \
+            RACK_GROUP_DVDR) / RACK_NUM_DVDR)
+
+/*
+ * Macros for encoding and decoding rack IDs
+ * A rack number consists of three parts:
+ *   class      1 bit, 0==CPU/mixed, 1==I/O
+ *   group      2 bits for CPU/mixed, 3 bits for I/O
+ *   number     3 bits for CPU/mixed, 2 bits for I/O (1 based)
+ */
+#define RACK_GROUP_BITS(_r)     (RACK_GET_CLASS(_r) ? 3 : 2)
+#define RACK_NUM_BITS(_r)       (RACK_GET_CLASS(_r) ? 2 : 3)
+
+#define RACK_CLASS_MASK(_r)     0x20
+#define RACK_CLASS_SHFT(_r)     5
+#define RACK_ADD_CLASS(_r, _c)  \
+        ((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r))
+
+#define RACK_GROUP_SHFT(_r)     RACK_NUM_BITS(_r)
+#define RACK_GROUP_MASK(_r)     \
+        ( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) )
+#define RACK_ADD_GROUP(_r, _g)  \
+        ((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r))
+
+#define RACK_NUM_SHFT(_r)       0
+#define RACK_NUM_MASK(_r)       \
+        ( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) )
+#define RACK_ADD_NUM(_r, _n)    \
+        ((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r))
+
+
+/*
+ * Brick type definitions
+ */
+#define MAX_BRICK_TYPES         256 /* brick type is stored as uchar */
+
+extern char brick_types[];
+
+#define MODULE_CBRICK           0
+#define MODULE_RBRICK           1
+#define MODULE_IBRICK           2
+#define MODULE_KBRICK           3
+#define MODULE_XBRICK           4
+#define MODULE_DBRICK           5
+#define MODULE_PBRICK           6
+#define MODULE_NBRICK           7
+#define MODULE_PEBRICK          8
+#define MODULE_PXBRICK          9
+#define MODULE_IXBRICK          10
+#define MODULE_CGBRICK         11
+#define MODULE_OPUSBRICK        12
+#define MODULE_SABRICK         13      /* TIO BringUp Brick */
+#define MODULE_IABRICK         14
+#define MODULE_PABRICK         15
+#define MODULE_GABRICK         16
+#define MODULE_OPUS_TIO                17      /* OPUS TIO Riser */
+
+extern char brick_types[];
+extern void format_module_id(char *, moduleid_t, int);
+
+#endif /* _ASM_IA64_SN_MODULE_H */
diff -r 46c44b5e6a1b -r 80c5b5914b79 
xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h  Wed Dec 20 
14:55:02 2006 -0700
@@ -0,0 +1,68 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights 
reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
+#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
+
+/*
+ * SN pci asic types.  Do not ever renumber these or reuse values.  The
+ * values must agree with what prom thinks they are.
+ */
+
+#define PCIIO_ASIC_TYPE_UNKNOWN        0
+#define PCIIO_ASIC_TYPE_PPB    1
+#define PCIIO_ASIC_TYPE_PIC    2
+#define PCIIO_ASIC_TYPE_TIOCP  3
+#define PCIIO_ASIC_TYPE_TIOCA  4
+#define PCIIO_ASIC_TYPE_TIOCE  5
+
+#define PCIIO_ASIC_MAX_TYPES   6
+
+/*
+ * Common pciio bus provider data.  There should be one of these as the
+ * first field in any pciio based provider soft structure (e.g. pcibr_soft
+ * tioca_soft, etc).
+ */
+
+struct pcibus_bussoft {
+       u32             bs_asic_type;   /* chipset type */
+       u32             bs_xid;         /* xwidget id */
+       u32             bs_persist_busnum; /* Persistent Bus Number */
+       u32             bs_persist_segment; /* Segment Number */
+       u64             bs_legacy_io;   /* legacy io pio addr */
+       u64             bs_legacy_mem;  /* legacy mem pio addr */
+       u64             bs_base;        /* widget base */
+       struct xwidget_info     *bs_xwidget_info;
+};
+
+struct pci_controller;
+/*
+ * SN pci bus indirection
+ */
+
+struct sn_pcibus_provider {
+       dma_addr_t      (*dma_map)(struct pci_dev *, unsigned long, size_t, int 
flags);
+       dma_addr_t      (*dma_map_consistent)(struct pci_dev *, unsigned long, 
size_t, int flags);
+       void            (*dma_unmap)(struct pci_dev *, dma_addr_t, int);
+       void *          (*bus_fixup)(struct pcibus_bussoft *, struct 
pci_controller *);
+       void            (*force_interrupt)(struct sn_irq_info *);
+       void            (*target_interrupt)(struct sn_irq_info *);
+};
+
+/*
+ * Flags used by the map interfaces
+ * bits 3:0 specifies format of passed in address
+ * bit  4   specifies that address is to be used for MSI

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] merge with ia64 sn2+machvec tree, Xen patchbot-unstable <=