WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] merge?

# HG changeset patch
# User cl349@xxxxxxxxxxxxxxxxxxxx
# Node ID d760699356fd26b89fef5d3e7d8f8cc12de8e454
# Parent  d7b79cac9ea9b1fdb18f673b99db9fa629f5b712
# Parent  67a530b01542cd4c715ed401c404e73b2eceec82
merge?

diff -r d7b79cac9ea9 -r d760699356fd .hgignore
--- a/.hgignore Tue Aug 23 17:32:44 2005
+++ b/.hgignore Tue Aug 23 17:33:11 2005
@@ -147,6 +147,7 @@
 ^tools/xcs/xcsdump$
 ^tools/xcutils/xc_restore$
 ^tools/xcutils/xc_save$
+^tools/xenstat/xentop/xentop$
 ^tools/xenstore/testsuite/tmp/.*$
 ^tools/xenstore/xen$
 ^tools/xenstore/xenstored$
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64       Tue Aug 
23 17:32:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64       Tue Aug 
23 17:33:11 2005
@@ -807,7 +807,107 @@
 #
 CONFIG_USB_ARCH_HAS_HCD=y
 CONFIG_USB_ARCH_HAS_OHCI=y
-# CONFIG_USB is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+
+#
+# Miscellaneous USB options
+#
+# CONFIG_USB_DEVICEFS is not set
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_EHCI_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB_UHCI_HCD=y
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_BLUETOOTH_TTY is not set
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; 
see USB_STORAGE Help for more information
+#
+# CONFIG_USB_STORAGE is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+CONFIG_USB_HIDINPUT=y
+# CONFIG_HID_FF is not set
+# CONFIG_USB_HIDDEV is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB Multimedia devices
+#
+# CONFIG_USB_DABUSB is not set
+
+#
+# Video4Linux support is needed for USB Multimedia device support
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETKIT is not set
+# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+
+#
+# USB ATM/DSL drivers
+#
 
 #
 # USB Gadget Support
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c Tue Aug 23 17:32:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c Tue Aug 23 17:33:11 2005
@@ -1575,19 +1575,20 @@
        /* Make sure we have a correctly sized P->M table. */
        if (max_pfn != xen_start_info.nr_pages) {
                phys_to_machine_mapping = alloc_bootmem_low_pages(
-                       max_pfn * sizeof(unsigned long));
+                       max_pfn * sizeof(unsigned int));
 
                if (max_pfn > xen_start_info.nr_pages) {
                        /* set to INVALID_P2M_ENTRY */
                        memset(phys_to_machine_mapping, ~0,
-                               max_pfn * sizeof(unsigned long));
+                               max_pfn * sizeof(unsigned int));
                        memcpy(phys_to_machine_mapping,
-                               (unsigned long *)xen_start_info.mfn_list,
-                               xen_start_info.nr_pages * sizeof(unsigned 
long));
+                               (unsigned int *)xen_start_info.mfn_list,
+                               xen_start_info.nr_pages * sizeof(unsigned int));
                } else {
                        memcpy(phys_to_machine_mapping,
-                               (unsigned long *)xen_start_info.mfn_list,
-                               max_pfn * sizeof(unsigned long));
+                               (unsigned int *)xen_start_info.mfn_list,
+                               max_pfn * sizeof(unsigned int));
+                       /* N.B. below relies on sizeof(int) == sizeof(long). */
                        if (HYPERVISOR_dom_mem_op(
                                MEMOP_decrease_reservation,
                                (unsigned long *)xen_start_info.mfn_list + 
max_pfn,
@@ -1597,11 +1598,11 @@
                free_bootmem(
                        __pa(xen_start_info.mfn_list), 
                        PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
-                       sizeof(unsigned long))));
+                       sizeof(unsigned int))));
        }
 
        pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
-       for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+       for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned int)), j++ )
        {       
             pfn_to_mfn_frame_list[j] = 
                  virt_to_mfn(&phys_to_machine_mapping[i]);
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c     Tue Aug 23 17:32:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c     Tue Aug 23 17:33:11 2005
@@ -281,7 +281,7 @@
        siginfo_t info;
 
        /* Set the "privileged fault" bit to something sane. */
-       error_code &= 3;
+       error_code &= ~4;
        error_code |= (regs->xcs & 2) << 1;
        if (regs->eflags & X86_EFLAGS_VM)
                error_code |= 4;
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/arch/xen/i386/mm/init.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c      Tue Aug 23 17:32:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c      Tue Aug 23 17:33:11 2005
@@ -348,9 +348,12 @@
 {
        unsigned long vaddr;
        pgd_t *pgd_base = (pgd_t *)xen_start_info.pt_base;
+       int i;
 
        swapper_pg_dir = pgd_base;
        init_mm.pgd    = pgd_base;
+       for (i = 0; i < NR_CPUS; i++)
+               per_cpu(cur_pgd, i) = pgd_base;
 
        /* Enable PSE if available */
        if (cpu_has_pse) {
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Tue Aug 23 17:32:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Tue Aug 23 17:33:11 2005
@@ -36,6 +36,8 @@
 {
 }
 
+#ifdef __i386__
+
 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
 {
        return NULL;
@@ -44,6 +46,8 @@
 void __init bt_iounmap(void *addr, unsigned long size)
 {
 }
+
+#endif /* __i386__ */
 
 #else
 
@@ -58,7 +62,7 @@
        extern unsigned long max_low_pfn;
        unsigned long mfn = address >> PAGE_SHIFT;
        unsigned long pfn = mfn_to_pfn(mfn);
-       return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
+       return ((pfn < max_low_pfn) && (phys_to_machine_mapping[pfn] == mfn));
 }
 
 /*
@@ -126,10 +130,12 @@
                return NULL;
        area->phys_addr = phys_addr;
        addr = (void __iomem *) area->addr;
+       flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
+#ifdef __x86_64__
+       flags |= _PAGE_USER;
+#endif
        if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
-                                   size, __pgprot(_PAGE_PRESENT | _PAGE_RW |
-                                                  _PAGE_DIRTY | _PAGE_ACCESSED
-                                                  | flags), domid)) {
+                                   size, __pgprot(flags), domid)) {
                vunmap((void __force *) addr);
                return NULL;
        }
@@ -218,6 +224,8 @@
        kfree(p); 
 }
 
+#ifdef __i386__
+
 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
 {
        unsigned long offset, last_addr;
@@ -288,6 +296,8 @@
                --nrpages;
        }
 }
+
+#endif /* __i386__ */
 
 #endif /* CONFIG_XEN_PHYSDEV_ACCESS */
 
@@ -346,7 +356,7 @@
                 * Fill in the machine address: PTE ptr is done later by
                 * __direct_remap_area_pages(). 
                 */
-               v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
+               v->val = pte_val_ma(pfn_pte_ma(machine_addr >> PAGE_SHIFT, 
prot));
 
                machine_addr += PAGE_SIZE;
                address += PAGE_SIZE; 
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile      Tue Aug 23 
17:32:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile      Tue Aug 23 
17:33:11 2005
@@ -44,7 +44,7 @@
 
 c-obj-$(CONFIG_MODULES)                += module.o
 
-#obj-y                         += topology.o
+obj-y                          += topology.o
 c-obj-y                                += intel_cacheinfo.o
 
 bootflag-y                     += ../../../i386/kernel/bootflag.o
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c       Tue Aug 23 
17:32:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c       Tue Aug 23 
17:33:11 2005
@@ -778,21 +778,21 @@
                /* Make sure we have a large enough P->M table. */
                if (end_pfn > xen_start_info.nr_pages) {
                        phys_to_machine_mapping = alloc_bootmem(
-                               max_pfn * sizeof(unsigned long));
+                               max_pfn * sizeof(u32));
                        memset(phys_to_machine_mapping, ~0,
-                              max_pfn * sizeof(unsigned long));
+                              max_pfn * sizeof(u32));
                        memcpy(phys_to_machine_mapping,
-                              (unsigned long *)xen_start_info.mfn_list,
-                              xen_start_info.nr_pages * sizeof(unsigned long));
+                              (u32 *)xen_start_info.mfn_list,
+                              xen_start_info.nr_pages * sizeof(u32));
                        free_bootmem(
                                __pa(xen_start_info.mfn_list), 
                                PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
-                                               sizeof(unsigned long))));
+                                               sizeof(u32))));
                }
 
                pfn_to_mfn_frame_list = alloc_bootmem(PAGE_SIZE);
 
-               for ( i=0, j=0; i < end_pfn; i+=(PAGE_SIZE/sizeof(unsigned 
long)), j++ )
+               for ( i=0, j=0; i < end_pfn; i+=(PAGE_SIZE/sizeof(u32)), j++ )
                {       
                        pfn_to_mfn_frame_list[j] = 
                                virt_to_mfn(&phys_to_machine_mapping[i]);
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile  Tue Aug 23 17:32:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile  Tue Aug 23 17:33:11 2005
@@ -6,10 +6,10 @@
 
 CFLAGS += -Iarch/$(XENARCH)/mm
 
-obj-y  := init.o fault.o ioremap.o pageattr.o
+obj-y  := init.o fault.o pageattr.o
 c-obj-y        := extable.o
 
-i386-obj-y := hypervisor.o
+i386-obj-y := hypervisor.o ioremap.o
 
 #obj-y  := init.o fault.o ioremap.o extable.o pageattr.o
 #c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c    Tue Aug 23 17:32:44 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c    Tue Aug 23 17:33:11 2005
@@ -559,6 +559,11 @@
 
 void __init xen_init_pt(void)
 {
+       int i;
+
+       for (i = 0; i < NR_CPUS; i++)
+               per_cpu(cur_pgd, i) = init_mm.pgd;
+
        memcpy((void *)init_level4_pgt, 
               (void *)xen_start_info.pt_base, PAGE_SIZE);
 
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c
--- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c        Tue Aug 23 
17:32:44 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c        Tue Aug 23 
17:33:11 2005
@@ -167,7 +167,7 @@
             if (ret)
                 goto batch_err;
 
-            u.val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
+            u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
             u.ptr = ptep;
 
             if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) )
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/page.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/page.h      Tue Aug 23 
17:32:44 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/page.h      Tue Aug 23 
17:33:11 2005
@@ -60,9 +60,13 @@
 #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
 
 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+#define INVALID_P2M_ENTRY      (~0U)
+#define FOREIGN_FRAME(m)       ((m) | 0x80000000U)
 extern unsigned int *phys_to_machine_mapping;
-#define pfn_to_mfn(_pfn) ((unsigned long)(phys_to_machine_mapping[(_pfn)]))
-#define mfn_to_pfn(_mfn) ((unsigned long)(machine_to_phys_mapping[(_mfn)]))
+#define pfn_to_mfn(pfn)        \
+((unsigned long)phys_to_machine_mapping[(unsigned int)(pfn)] & 0x7FFFFFFFUL)
+#define mfn_to_pfn(mfn)        \
+((unsigned long)machine_to_phys_mapping[(unsigned int)(mfn)])
 
 /* Definitions for machine and pseudophysical addresses. */
 #ifdef CONFIG_X86_PAE
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h    Tue Aug 
23 17:32:44 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h    Tue Aug 
23 17:33:11 2005
@@ -63,17 +63,15 @@
  * 
  * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
  *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
- *      require. In all the cases we care about, the high bit gets shifted out
- *      (e.g., phys_to_machine()) so behaviour there is correct.
+ *      require. In all the cases we care about, the FOREIGN_FRAME bit is
+ *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
  */
-#define INVALID_P2M_ENTRY (~0U)
-#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
 #define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
 #define pte_pfn(_pte)                                                  \
 ({                                                                     \
        unsigned long mfn = pte_mfn(_pte);                              \
        unsigned long pfn = mfn_to_pfn(mfn);                            \
-       if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn))             \
+       if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
                pfn = max_mapnr; /* special: force !pfn_valid() */      \
        pfn;                                                            \
 })
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-3level.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-3level.h    Tue Aug 
23 17:32:44 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-3level.h    Tue Aug 
23 17:33:11 2005
@@ -150,15 +150,13 @@
        return !pte.pte_low && !pte.pte_high;
 }
 
-#define INVALID_P2M_ENTRY (~0U)
-#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
 #define pte_mfn(_pte) ( ((_pte).pte_low >> PAGE_SHIFT) |\
                        (((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT)) )
 #define pte_pfn(_pte)                                                  \
 ({                                                                     \
        unsigned long mfn = pte_mfn(_pte);                              \
        unsigned long pfn = mfn_to_pfn(mfn);                            \
-       if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn))             \
+       if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
                pfn = max_mapnr; /* special: force !pfn_valid() */      \
        pfn;                                                            \
 })
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/page.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/page.h    Tue Aug 23 
17:32:44 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/page.h    Tue Aug 23 
17:33:11 2005
@@ -62,9 +62,13 @@
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+#define INVALID_P2M_ENTRY      (~0U)
+#define FOREIGN_FRAME(m)       ((m) | 0x80000000U)
 extern u32 *phys_to_machine_mapping;
-#define pfn_to_mfn(_pfn) ((unsigned long) phys_to_machine_mapping[(unsigned 
int)(_pfn)])
-#define mfn_to_pfn(_mfn) ((unsigned long) machine_to_phys_mapping[(unsigned 
int)(_mfn)])
+#define pfn_to_mfn(pfn)        \
+((unsigned long)phys_to_machine_mapping[(unsigned int)(pfn)] & 0x7FFFFFFFUL)
+#define mfn_to_pfn(mfn)        \
+((unsigned long)machine_to_phys_mapping[(unsigned int)(mfn)])
 
 /* Definitions for machine and pseudophysical addresses. */
 typedef unsigned long paddr_t;
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Tue Aug 23 
17:32:44 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Tue Aug 23 
17:33:11 2005
@@ -300,17 +300,15 @@
  * 
  * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
  *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
- *      require. In all the cases we care about, the high bit gets shifted out
- *      (e.g., phys_to_machine()) so behaviour there is correct.
- */
-#define INVALID_P2M_ENTRY (~0U)
-#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
+ *      require. In all the cases we care about, the FOREIGN_FRAME bit is
+ *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
+ */
 #define pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
 #define pte_pfn(_pte)                                                  \
 ({                                                                     \
        unsigned long mfn = pte_mfn(_pte);                              \
        unsigned pfn = mfn_to_pfn(mfn);                                 \
-       if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn))             \
+       if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
                pfn = max_mapnr; /* special: force !pfn_valid() */      \
        pfn;                                                            \
 })
diff -r d7b79cac9ea9 -r d760699356fd tools/xenstat/xentop/Makefile
--- a/tools/xenstat/xentop/Makefile     Tue Aug 23 17:32:44 2005
+++ b/tools/xenstat/xentop/Makefile     Tue Aug 23 17:33:11 2005
@@ -28,7 +28,7 @@
 
 CFLAGS += -DGCC_PRINTF -Wall -Werror -I$(XEN_LIBXENSTAT)
 LDFLAGS += -L$(XEN_LIBXENSTAT)
-LDLIBS += -lxenstat -lcurses
+LDLIBS += -lxenstat -lncurses
 
 all: xentop
 
diff -r d7b79cac9ea9 -r d760699356fd xen/arch/x86/io_apic.c
--- a/xen/arch/x86/io_apic.c    Tue Aug 23 17:32:44 2005
+++ b/xen/arch/x86/io_apic.c    Tue Aug 23 17:33:11 2005
@@ -1751,8 +1751,30 @@
     
     pin = (address - 0x10) >> 1;
 
+    *(u32 *)&rte = val;
     rte.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-    *(int *)&rte = val;
+
+    /*
+     * What about weird destination types?
+     *  SMI:    Ignore? Ought to be set up by the BIOS.
+     *  NMI:    Ignore? Watchdog functionality is Xen's concern.
+     *  INIT:   Definitely ignore: probably a guest OS bug.
+     *  ExtINT: Ignore? Linux only asserts this at start of day.
+     * For now, print a message and return an error. We can fix up on demand.
+     */
+    if ( rte.delivery_mode > dest_LowestPrio )
+    {
+        printk("ERROR: Attempt to write weird IOAPIC destination mode!\n");
+        printk("       APIC=%d/%d, lo-reg=%x\n", apicid, pin, val);
+        return -EINVAL;
+    }
+
+    /*
+     * The guest does not know physical APIC arrangement (flat vs. cluster).
+     * Apply genapic conventions for this platform.
+     */
+    rte.delivery_mode = INT_DELIVERY_MODE;
+    rte.dest_mode     = INT_DEST_MODE;
 
     if ( rte.vector >= FIRST_DEVICE_VECTOR )
     {
diff -r d7b79cac9ea9 -r d760699356fd xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Aug 23 17:32:44 2005
+++ b/xen/arch/x86/mm.c Tue Aug 23 17:33:11 2005
@@ -444,7 +444,7 @@
 
     if ( unlikely(l1e_get_flags(l1e) & L1_DISALLOW_MASK) )
     {
-        MEM_LOG("Bad L1 flags %x\n", l1e_get_flags(l1e) & L1_DISALLOW_MASK);
+        MEM_LOG("Bad L1 flags %x", l1e_get_flags(l1e) & L1_DISALLOW_MASK);
         return 0;
     }
 
@@ -490,7 +490,7 @@
 
     if ( unlikely((l2e_get_flags(l2e) & L2_DISALLOW_MASK)) )
     {
-        MEM_LOG("Bad L2 flags %x\n", l2e_get_flags(l2e) & L2_DISALLOW_MASK);
+        MEM_LOG("Bad L2 flags %x", l2e_get_flags(l2e) & L2_DISALLOW_MASK);
         return 0;
     }
 
@@ -523,7 +523,7 @@
 
     if ( unlikely((l3e_get_flags(l3e) & L3_DISALLOW_MASK)) )
     {
-        MEM_LOG("Bad L3 flags %x\n", l3e_get_flags(l3e) & L3_DISALLOW_MASK);
+        MEM_LOG("Bad L3 flags %x", l3e_get_flags(l3e) & L3_DISALLOW_MASK);
         return 0;
     }
 
@@ -557,7 +557,7 @@
 
     if ( unlikely((l4e_get_flags(l4e) & L4_DISALLOW_MASK)) )
     {
-        MEM_LOG("Bad L4 flags %x\n", l4e_get_flags(l4e) & L4_DISALLOW_MASK);
+        MEM_LOG("Bad L4 flags %x", l4e_get_flags(l4e) & L4_DISALLOW_MASK);
         return 0;
     }
 
@@ -1025,7 +1025,7 @@
          unlikely(o != l1e_get_intpte(ol1e)) )
     {
         MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte
-                ": saw %" PRIpte "\n",
+                ": saw %" PRIpte,
                 l1e_get_intpte(ol1e),
                 l1e_get_intpte(nl1e),
                 o);
@@ -1051,7 +1051,7 @@
     {
         if ( unlikely(l1e_get_flags(nl1e) & L1_DISALLOW_MASK) )
         {
-            MEM_LOG("Bad L1 flags %x\n",
+            MEM_LOG("Bad L1 flags %x",
                     l1e_get_flags(nl1e) & L1_DISALLOW_MASK);
             return 0;
         }
@@ -1113,7 +1113,7 @@
     {
         if ( unlikely(l2e_get_flags(nl2e) & L2_DISALLOW_MASK) )
         {
-            MEM_LOG("Bad L2 flags %x\n",
+            MEM_LOG("Bad L2 flags %x",
                     l2e_get_flags(nl2e) & L2_DISALLOW_MASK);
             return 0;
         }
@@ -1175,7 +1175,7 @@
     {
         if ( unlikely(l3e_get_flags(nl3e) & L3_DISALLOW_MASK) )
         {
-            MEM_LOG("Bad L3 flags %x\n",
+            MEM_LOG("Bad L3 flags %x",
                     l3e_get_flags(nl3e) & L3_DISALLOW_MASK);
             return 0;
         }
@@ -1237,7 +1237,7 @@
     {
         if ( unlikely(l4e_get_flags(nl4e) & L4_DISALLOW_MASK) )
         {
-            MEM_LOG("Bad L4 flags %x\n",
+            MEM_LOG("Bad L4 flags %x",
                     l4e_get_flags(nl4e) & L4_DISALLOW_MASK);
             return 0;
         }
@@ -1598,7 +1598,7 @@
             percpu_info[cpu].foreign = dom_io;
             break;
         default:
-            MEM_LOG("Dom %u cannot set foreign dom\n", d->domain_id);
+            MEM_LOG("Dom %u cannot set foreign dom", d->domain_id);
             okay = 0;
             break;
         }
@@ -1831,7 +1831,7 @@
         case MMUEXT_FLUSH_CACHE:
             if ( unlikely(!IS_CAPABLE_PHYSDEV(d)) )
             {
-                MEM_LOG("Non-physdev domain tried to FLUSH_CACHE.\n");
+                MEM_LOG("Non-physdev domain tried to FLUSH_CACHE.");
                 okay = 0;
             }
             else
@@ -1845,7 +1845,7 @@
             if ( shadow_mode_external(d) )
             {
                 MEM_LOG("ignoring SET_LDT hypercall from external "
-                        "domain %u\n", d->domain_id);
+                        "domain %u", d->domain_id);
                 okay = 0;
                 break;
             }
@@ -1916,7 +1916,7 @@
                  unlikely(IS_XEN_HEAP_FRAME(page)) )
             {
                 MEM_LOG("Transferee has no reservation headroom (%d,%d), or "
-                        "page is in Xen heap (%lx), or dom is dying (%ld).\n",
+                        "page is in Xen heap (%lx), or dom is dying (%ld).",
                         e->tot_pages, e->max_pages, op.mfn, e->domain_flags);
                 okay = 0;
                 goto reassign_fail;
@@ -1937,7 +1937,7 @@
                      unlikely(_nd != _d) )
                 {
                     MEM_LOG("Bad page values %lx: ed=%p(%u), sd=%p,"
-                            " caf=%08x, taf=%" PRtype_info "\n",
+                            " caf=%08x, taf=%" PRtype_info,
                             page_to_pfn(page), d, d->domain_id,
                             unpickle_domptr(_nd), x, page->u.inuse.type_info);
                     okay = 0;
@@ -2301,7 +2301,7 @@
     if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) ||
          !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) )
     {
-        DPRINTK("Grant map attempted to update a non-L1 page\n");
+        MEM_LOG("Grant map attempted to update a non-L1 page");
         rc = GNTST_general_error;
         goto failed;
     }
@@ -2363,7 +2363,7 @@
     if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) ||
          !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) )
     {
-        DPRINTK("Grant map attempted to update a non-L1 page\n");
+        MEM_LOG("Grant map attempted to update a non-L1 page");
         rc = GNTST_general_error;
         goto failed;
     }
@@ -2378,7 +2378,7 @@
     /* Check that the virtual address supplied is actually mapped to frame. */
     if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame) )
     {
-        DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n",
+        MEM_LOG("PTE entry %lx for address %lx doesn't match frame %lx",
                 (unsigned long)l1e_get_intpte(ol1e), addr, frame);
         put_page_type(page);
         rc = GNTST_general_error;
@@ -2388,7 +2388,7 @@
     /* Delete pagetable entry. */
     if ( unlikely(__put_user(0, (intpte_t *)va)))
     {
-        DPRINTK("Cannot delete PTE entry at %p.\n", va);
+        MEM_LOG("Cannot delete PTE entry at %p", va);
         put_page_type(page);
         rc = GNTST_general_error;
         goto failed;
@@ -2452,7 +2452,7 @@
 
     if ( unlikely(__get_user(ol1e.l1, &pl1e->l1) != 0) )
     {
-        DPRINTK("Could not find PTE entry for address %lx\n", addr);
+        MEM_LOG("Could not find PTE entry for address %lx", addr);
         return GNTST_general_error;
     }
 
@@ -2462,7 +2462,7 @@
      */
     if ( unlikely(l1e_get_pfn(ol1e) != frame) )
     {
-        DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n",
+        MEM_LOG("PTE entry %lx for address %lx doesn't match frame %lx",
                 l1e_get_pfn(ol1e), addr, frame);
         return GNTST_general_error;
     }
@@ -2470,7 +2470,7 @@
     /* Delete pagetable entry. */
     if ( unlikely(__put_user(0, &pl1e->l1)) )
     {
-        DPRINTK("Cannot delete PTE entry at %p.\n", (unsigned long *)pl1e);
+        MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e);
         return GNTST_general_error;
     }
     
@@ -2930,7 +2930,7 @@
 
         if ( unlikely(!get_page_from_l1e(nl1e, d)) )
         {
-            MEM_LOG("ptwr: Could not re-validate l1 page\n");
+            MEM_LOG("ptwr: Could not re-validate l1 page");
             /*
              * Make the remaining p.t's consistent before crashing, so the
              * reference counts are correct.
@@ -3056,7 +3056,7 @@
     /* Aligned access only, thank you. */
     if ( !access_ok(addr, bytes) || ((addr & (bytes-1)) != 0) )
     {
-        MEM_LOG("ptwr_emulate: Unaligned or bad size ptwr access (%d, %lx)\n",
+        MEM_LOG("ptwr_emulate: Unaligned or bad size ptwr access (%d, %lx)",
                 bytes, addr);
         return X86EMUL_UNHANDLEABLE;
     }
@@ -3089,7 +3089,7 @@
     if (__copy_from_user(&pte, &linear_pg_table[l1_linear_offset(addr)],
                          sizeof(pte)))
     {
-        MEM_LOG("ptwr_emulate: Cannot read thru linear_pg_table\n");
+        MEM_LOG("ptwr_emulate: Cannot read thru linear_pg_table");
         return X86EMUL_UNHANDLEABLE;
     }
 
@@ -3102,7 +3102,7 @@
          (page_get_owner(page) != d) )
     {
         MEM_LOG("ptwr_emulate: Page is mistyped or bad pte "
-                "(%lx, %" PRtype_info ")\n",
+                "(%lx, %" PRtype_info ")",
                 l1e_get_pfn(pte), page->u.inuse.type_info);
         return X86EMUL_UNHANDLEABLE;
     }
diff -r d7b79cac9ea9 -r d760699356fd xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c        Tue Aug 23 17:32:44 2005
+++ b/xen/arch/x86/vmx.c        Tue Aug 23 17:33:11 2005
@@ -1712,9 +1712,6 @@
     default:
         __vmx_bug(&regs);       /* should not happen */
     }
-
-    vmx_intr_assist(v);
-    return;
 }
 
 asmlinkage void load_cr2(void)
diff -r d7b79cac9ea9 -r d760699356fd xen/arch/x86/vmx_io.c
--- a/xen/arch/x86/vmx_io.c     Tue Aug 23 17:32:44 2005
+++ b/xen/arch/x86/vmx_io.c     Tue Aug 23 17:33:11 2005
@@ -631,12 +631,14 @@
     return ((eflags & X86_EFLAGS_IF) == 0);
 }
 
-void vmx_intr_assist(struct vcpu *v) 
+asmlinkage void vmx_intr_assist(void) 
 {
     int intr_type = 0;
-    int highest_vector = find_highest_pending_irq(v, &intr_type);
+    int highest_vector;
     unsigned long intr_fields, eflags, interruptibility, cpu_exec_control;
-
+    struct vcpu *v = current;
+
+    highest_vector = find_highest_pending_irq(v, &intr_type);
     __vmread(CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control);
 
     if (highest_vector == -1) {
@@ -712,9 +714,6 @@
 
     /* We can't resume the guest if we're waiting on I/O */
     ASSERT(!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags));
-
-    /* We always check for interrupts before resuming guest */
-    vmx_intr_assist(d);
 }
 
 #endif /* CONFIG_VMX */
diff -r d7b79cac9ea9 -r d760699356fd xen/arch/x86/x86_32/entry.S
--- a/xen/arch/x86/x86_32/entry.S       Tue Aug 23 17:32:44 2005
+++ b/xen/arch/x86/x86_32/entry.S       Tue Aug 23 17:33:11 2005
@@ -140,6 +140,7 @@
         jnz 2f
 
 /* vmx_restore_all_guest */
+        call vmx_intr_assist
         call load_cr2
         .endif
         VMX_RESTORE_ALL_NOSEGREGS
diff -r d7b79cac9ea9 -r d760699356fd xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c       Tue Aug 23 17:32:44 2005
+++ b/xen/arch/x86/x86_32/traps.c       Tue Aug 23 17:33:11 2005
@@ -1,5 +1,6 @@
 
 #include <xen/config.h>
+#include <xen/domain_page.h>
 #include <xen/init.h>
 #include <xen/sched.h>
 #include <xen/lib.h>
@@ -86,24 +87,33 @@
 
 void show_page_walk(unsigned long addr)
 {
-    l2_pgentry_t pmd;
-    l1_pgentry_t *pte;
-
-    if ( addr < PAGE_OFFSET )
-        return;
+    unsigned long pfn = read_cr3() >> PAGE_SHIFT;
+    intpte_t *ptab, ent;
 
     printk("Pagetable walk from %08lx:\n", addr);
-    
-    pmd = idle_pg_table_l2[l2_linear_offset(addr)];
-    printk(" L2 = %"PRIpte" %s\n", l2e_get_intpte(pmd),
-           (l2e_get_flags(pmd) & _PAGE_PSE) ? "(2/4MB)" : "");
-    if ( !(l2e_get_flags(pmd) & _PAGE_PRESENT) ||
-         (l2e_get_flags(pmd) & _PAGE_PSE) )
-        return;
-
-    pte  = __va(l2e_get_paddr(pmd));
-    pte += l1_table_offset(addr);
-    printk("  L1 = %"PRIpte"\n", l1e_get_intpte(*pte));
+
+#ifdef CONFIG_X86_PAE
+    ptab = map_domain_page(pfn);
+    ent = ptab[l3_table_offset(addr)];
+    printk(" L3 = %"PRIpte"\n", ent);
+    unmap_domain_page(ptab);
+    if ( !(ent & _PAGE_PRESENT) )
+        return;
+    pfn = ent >> PAGE_SHIFT;
+#endif
+
+    ptab = map_domain_page(pfn);
+    ent = ptab[l2_table_offset(addr)];
+    printk("  L2 = %"PRIpte" %s\n", ent, (ent & _PAGE_PSE) ? "(PSE)" : "");
+    unmap_domain_page(ptab);
+    if ( !(ent & _PAGE_PRESENT) || (ent & _PAGE_PSE) )
+        return;
+    pfn = ent >> PAGE_SHIFT;
+
+    ptab = map_domain_page(ent >> PAGE_SHIFT);
+    ent = ptab[l2_table_offset(addr)];
+    printk("   L1 = %"PRIpte"\n", ent);
+    unmap_domain_page(ptab);
 }
 
 #define DOUBLEFAULT_STACK_SIZE 1024
diff -r d7b79cac9ea9 -r d760699356fd xen/arch/x86/x86_64/entry.S
--- a/xen/arch/x86/x86_64/entry.S       Tue Aug 23 17:32:44 2005
+++ b/xen/arch/x86/x86_64/entry.S       Tue Aug 23 17:33:11 2005
@@ -233,6 +233,7 @@
         jnz  2f 
 
 /* vmx_restore_all_guest */
+        call vmx_intr_assist
         call load_cr2
         .endif
         /* 
diff -r d7b79cac9ea9 -r d760699356fd xen/include/asm-x86/vmx.h
--- a/xen/include/asm-x86/vmx.h Tue Aug 23 17:32:44 2005
+++ b/xen/include/asm-x86/vmx.h Tue Aug 23 17:33:11 2005
@@ -31,7 +31,7 @@
 extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
 extern void vmx_asm_do_resume(void);
 extern void vmx_asm_do_launch(void);
-extern void vmx_intr_assist(struct vcpu *d);
+extern void vmx_intr_assist(void);
 
 extern void arch_vmx_do_launch(struct vcpu *);
 extern void arch_vmx_do_resume(struct vcpu *);
@@ -355,7 +355,7 @@
 }
 
 /* Make sure that xen intercepts any FP accesses from current */
-static inline void vmx_stts()
+static inline void vmx_stts(void)
 {
     unsigned long cr0;
 
diff -r d7b79cac9ea9 -r d760699356fd 
linux-2.6-xen-sparse/arch/xen/x86_64/mm/ioremap.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/ioremap.c Tue Aug 23 17:32:44 2005
+++ /dev/null   Tue Aug 23 17:33:11 2005
@@ -1,499 +0,0 @@
-/*
- * arch/x86_64/mm/ioremap.c
- *
- * Re-map IO memory to kernel address space so that we can access it.
- * This is needed for high PCI addresses that aren't mapped in the
- * 640k-1MB IO memory area on PC's
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- */
-
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <asm/io.h>
-#include <asm/fixmap.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-
-/*
- * Reuse arch/xen/i396/mm/ioremap.c. Need to merge later
- */
-#ifndef CONFIG_XEN_PHYSDEV_ACCESS
-
-void * __ioremap(unsigned long phys_addr, unsigned long size,
-                unsigned long flags)
-{
-       return NULL;
-}
-
-void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
-{
-       return NULL;
-}
-
-void iounmap(volatile void __iomem *addr)
-{
-}
-
-void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-{
-       return NULL;
-}
-
-void __init bt_iounmap(void *addr, unsigned long size)
-{
-}
-
-#else
-
-#if defined(__i386__)
-/*
- * Does @address reside within a non-highmem page that is local to this virtual
- * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
- * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
- * why this works.
- */
-static inline int is_local_lowmem(unsigned long address)
-{
-       extern unsigned long max_low_pfn;
-       unsigned long mfn = address >> PAGE_SHIFT;
-       unsigned long pfn = mfn_to_pfn(mfn);
-       return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
-}
-#elif defined(__x86_64__)
-/*
- * 
- */
-static inline int is_local_lowmem(unsigned long address)
-{
-        return 0;
-}
-#endif
-
-/*
- * Generic mapping function (not visible outside):
- */
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned 
long flags)
-{
-       void __iomem * addr;
-       struct vm_struct * area;
-       unsigned long offset, last_addr;
-       domid_t domid = DOMID_IO;
-
-       /* Don't allow wraparound or zero size */
-       last_addr = phys_addr + size - 1;
-       if (!size || last_addr < phys_addr)
-               return NULL;
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-       /*
-        * Don't remap the low PCI/ISA area, it's always mapped..
-        */
-       if (phys_addr >= 0x0 && last_addr < 0x100000)
-               return isa_bus_to_virt(phys_addr);
-#endif
-
-       /*
-        * Don't allow anybody to remap normal RAM that we're using..
-        */
-       if (is_local_lowmem(phys_addr)) {
-               char *t_addr, *t_end;
-               struct page *page;
-
-               t_addr = bus_to_virt(phys_addr);
-               t_end = t_addr + (size - 1);
-          
-               for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); 
page++)
-                       if(!PageReserved(page))
-                               return NULL;
-
-               domid = DOMID_LOCAL;
-       }
-
-       /*
-        * Mappings have to be page-aligned
-        */
-       offset = phys_addr & ~PAGE_MASK;
-       phys_addr &= PAGE_MASK;
-       size = PAGE_ALIGN(last_addr+1) - phys_addr;
-
-       /*
-        * Ok, go for it..
-        */
-       area = get_vm_area(size, VM_IOREMAP | (flags << 20));
-       if (!area)
-               return NULL;
-       area->phys_addr = phys_addr;
-       addr = (void __iomem *) area->addr;
-       if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
-                                   size, __pgprot(_PAGE_PRESENT | _PAGE_RW |
-                                                  _PAGE_DIRTY | _PAGE_ACCESSED
-#if defined(__x86_64__)
-                                                   | _PAGE_USER
-#endif
-                                                  | flags), domid)) {
-               vunmap((void __force *) addr);
-               return NULL;
-       }
-       return (void __iomem *) (offset + (char __iomem *)addr);
-}
-
-
-/**
- * ioremap_nocache     -   map bus memory into CPU space
- * @offset:    bus address of the memory
- * @size:      size of the resource to map
- *
- * ioremap_nocache performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address. 
- *
- * This version of ioremap ensures that the memory is marked uncachable
- * on the CPU as well as honouring existing caching rules from things like
- * the PCI bus. Note that there are other caches and buffers on many 
- * busses. In particular driver authors should read up on PCI writes
- *
- * It's useful if some control registers are in such an area and
- * write combining or read caching is not desirable:
- * 
- * Must be freed with iounmap.
- */
-
-void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
-{
-       unsigned long last_addr;
-       void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
-       if (!p) 
-               return p; 
-
-       /* Guaranteed to be > phys_addr, as per __ioremap() */
-       last_addr = phys_addr + size - 1;
-
-       if (is_local_lowmem(last_addr)) { 
-               struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
-               unsigned long npages;
-
-               phys_addr &= PAGE_MASK;
-
-               /* This might overflow and become zero.. */
-               last_addr = PAGE_ALIGN(last_addr);
-
-               /* .. but that's ok, because modulo-2**n arithmetic will make
-               * the page-aligned "last - first" come out right.
-               */
-               npages = (last_addr - phys_addr) >> PAGE_SHIFT;
-
-               if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
-                       iounmap(p); 
-                       p = NULL;
-               }
-               global_flush_tlb();
-       }
-
-       return p;                                       
-}
-
-void iounmap(volatile void __iomem *addr)
-{
-       struct vm_struct *p;
-       if ((void __force *) addr <= high_memory) 
-               return; 
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-       if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
-               return;
-#endif
-       p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
-       if (!p) { 
-               printk("__iounmap: bad address %p\n", addr);
-               return;
-       }
-
-       if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
-               /* p->size includes the guard page, but cpa doesn't like that */
-               change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
-                                (p->size - PAGE_SIZE) >> PAGE_SHIFT,
-                                PAGE_KERNEL);                           
-               global_flush_tlb();
-       } 
-       kfree(p); 
-}
-
-#if defined(__i386__)
-void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-{
-       unsigned long offset, last_addr;
-       unsigned int nrpages;
-       enum fixed_addresses idx;
-
-       /* Don't allow wraparound or zero size */
-       last_addr = phys_addr + size - 1;
-       if (!size || last_addr < phys_addr)
-               return NULL;
-
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-       /*
-        * Don't remap the low PCI/ISA area, it's always mapped..
-        */
-       if (phys_addr >= 0x0 && last_addr < 0x100000)
-               return isa_bus_to_virt(phys_addr);
-#endif
-
-       /*
-        * Mappings have to be page-aligned
-        */
-       offset = phys_addr & ~PAGE_MASK;
-       phys_addr &= PAGE_MASK;
-       size = PAGE_ALIGN(last_addr) - phys_addr;
-
-       /*
-        * Mappings have to fit in the FIX_BTMAP area.
-        */
-       nrpages = size >> PAGE_SHIFT;
-       if (nrpages > NR_FIX_BTMAPS)
-               return NULL;
-
-       /*
-        * Ok, go for it..
-        */
-       idx = FIX_BTMAP_BEGIN;
-       while (nrpages > 0) {
-               set_fixmap(idx, phys_addr);
-               phys_addr += PAGE_SIZE;
-               --idx;
-               --nrpages;
-       }
-       return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
-}
-
-void __init bt_iounmap(void *addr, unsigned long size)
-{
-       unsigned long virt_addr;
-       unsigned long offset;
-       unsigned int nrpages;
-       enum fixed_addresses idx;
-
-       virt_addr = (unsigned long)addr;
-       if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
-               return;
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-       if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
-               return;
-#endif
-       offset = virt_addr & ~PAGE_MASK;
-       nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
-
-       idx = FIX_BTMAP_BEGIN;
-       while (nrpages > 0) {
-               clear_fixmap(idx);
-               --idx;
-               --nrpages;
-       }
-}
-#endif /* defined(__i386__) */
-
-#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
-
-/* These hacky macros avoid phys->machine translations. */
-#define __direct_pte(x) ((pte_t) { (x) } )
-#define __direct_mk_pte(page_nr,pgprot) \
-  __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
-#define direct_mk_pte_phys(physpage, pgprot) \
-  __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
-
-static inline void direct_remap_area_pte(pte_t *pte, 
-                                        unsigned long address, 
-                                        unsigned long size,
-                                        mmu_update_t **v)
-{
-       unsigned long end;
-
-       address &= ~PMD_MASK;
-       end = address + size;
-       if (end > PMD_SIZE)
-               end = PMD_SIZE;
-       if (address >= end)
-               BUG();
-
-       do {
-               (*v)->ptr = virt_to_machine(pte);
-               (*v)++;
-               address += PAGE_SIZE;
-               pte++;
-       } while (address && (address < end));
-}
-
-static inline int direct_remap_area_pmd(struct mm_struct *mm,
-                                       pmd_t *pmd, 
-                                       unsigned long address, 
-                                       unsigned long size,
-                                       mmu_update_t **v)
-{
-       unsigned long end;
-
-       address &= ~PGDIR_MASK;
-       end = address + size;
-       if (end > PGDIR_SIZE)
-               end = PGDIR_SIZE;
-       if (address >= end)
-               BUG();
-       do {
-               pte_t *pte = (mm == &init_mm) ? 
-                       pte_alloc_kernel(mm, pmd, address) :
-                       pte_alloc_map(mm, pmd, address);
-               if (!pte)
-                       return -ENOMEM;
-               direct_remap_area_pte(pte, address, end - address, v);
-               pte_unmap(pte);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address && (address < end));
-       return 0;
-}
- 
-int __direct_remap_area_pages(struct mm_struct *mm,
-                             unsigned long address, 
-                             unsigned long size, 
-                             mmu_update_t *v)
-{
-       pgd_t * dir;
-       unsigned long end = address + size;
-       int error;
-
-#if defined(__i386__)
-       dir = pgd_offset(mm, address);
-#elif defined (__x86_64)
-        dir = (mm == &init_mm) ?
-               pgd_offset_k(address):
-               pgd_offset(mm, address);
-#endif
-       if (address >= end)
-               BUG();
-       spin_lock(&mm->page_table_lock);
-       do {
-               pud_t *pud;
-               pmd_t *pmd;
-
-               error = -ENOMEM;
-               pud = pud_alloc(mm, dir, address);
-               if (!pud)
-                       break;
-               pmd = pmd_alloc(mm, pud, address);
-               if (!pmd)
-                       break;
-               error = 0;
-               direct_remap_area_pmd(mm, pmd, address, end - address, &v);
-               address = (address + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-
-       } while (address && (address < end));
-       spin_unlock(&mm->page_table_lock);
-       return error;
-}
-
-
-int direct_remap_area_pages(struct mm_struct *mm,
-                           unsigned long address, 
-                           unsigned long machine_addr,
-                           unsigned long size, 
-                           pgprot_t prot,
-                           domid_t  domid)
-{
-       int i;
-       unsigned long start_address;
-#define MAX_DIRECTMAP_MMU_QUEUE 130
-       mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *v = u;
-
-       start_address = address;
-
-       flush_cache_all();
-
-       for (i = 0; i < size; i += PAGE_SIZE) {
-               if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) {
-                       /* Fill in the PTE pointers. */
-                       __direct_remap_area_pages(mm,
-                                                 start_address, 
-                                                 address-start_address, 
-                                                 u);
- 
-                       if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
-                               return -EFAULT;
-                       v = u;
-                       start_address = address;
-               }
-
-               /*
-                * Fill in the machine address: PTE ptr is done later by
-                * __direct_remap_area_pages(). 
-                */
-               v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
-
-               machine_addr += PAGE_SIZE;
-               address += PAGE_SIZE; 
-               v++;
-       }
-
-       if (v != u) {
-               /* get the ptep's filled in */
-               __direct_remap_area_pages(mm,
-                                         start_address, 
-                                         address-start_address, 
-                                         u);
-               if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
-                       return -EFAULT;
-       }
-
-       flush_tlb_all();
-
-       return 0;
-}
-
-EXPORT_SYMBOL(direct_remap_area_pages);
-
-static int lookup_pte_fn(
-    pte_t *pte, struct page *pte_page, unsigned long addr, void *data) 
-{
-    unsigned long *ptep = (unsigned long *)data;
-    if (ptep) *ptep = (pfn_to_mfn(page_to_pfn(pte_page)) << PAGE_SHIFT)
-                  | ((unsigned long)pte & ~PAGE_MASK);
-    return 0;
-}
-
-int create_lookup_pte_addr(struct mm_struct *mm, 
-                           unsigned long address,
-                           unsigned long *ptep)
-{
-    return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep);
-}
-
-EXPORT_SYMBOL(create_lookup_pte_addr);
-
-static int noop_fn(
-    pte_t *pte, struct page *pte_page, unsigned long addr, void *data) 
-{
-    return 0;
-}
-
-int touch_pte_range(struct mm_struct *mm,
-                    unsigned long address,
-                    unsigned long size)
-{
-    return generic_page_range(mm, address, size, noop_fn, NULL);
-}
-
-EXPORT_SYMBOL(touch_pte_range);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>