Index: 2005-12-16/include/asm-i386/mach-default/mach_page.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ 2005-12-16/include/asm-i386/mach-default/mach_page.h 2005-12-21 15:04:44.420002440 +0100 @@ -0,0 +1,28 @@ +#ifndef __ASM_MACH_PAGE_H +#define __ASM_MACH_PAGE_H + +#ifndef __ASSEMBLY__ + +#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE + +#ifdef CONFIG_X86_PAE +#define pmd_val(x) ((x).pmd) +#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) +#define __pmd(x) ((pmd_t) { (x) } ) +#else +#define pte_val(x) ((x).pte_low) +#endif +#define pte_val_ma pte_val + +#define pgd_val(x) ((x).pgd) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pte_ma __pte +#define __pgd(x) ((pgd_t) { (x) } ) + +#endif /* __ASSEMBLY__ */ + +#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) + +#endif /* __ASM_MACH_PAGE_H */ Index: 2005-12-16/include/asm-i386/mach-xen/asm/page.h =================================================================== --- 2005-12-16.orig/include/asm-i386/mach-xen/asm/page.h 2005-12-21 14:18:28.784963104 +0100 +++ /dev/null 1970-01-01 00:00:00.000000000 +0000 @@ -1,272 +0,0 @@ -#ifndef _I386_PAGE_H -#define _I386_PAGE_H - -/* PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT 12 -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) - -#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) -#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - -#include -#include -#include -#include -#include -#include -#include - -#define arch_free_page(_page,_order) \ -({ int foreign = PageForeign(_page); \ - if (foreign) \ - (PageForeignDestructor(_page))(_page); \ - foreign; \ -}) -#define HAVE_ARCH_FREE_PAGE - -#ifdef CONFIG_XEN_SCRUB_PAGES -#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT) -#else -#define scrub_pages(_p,_n) ((void)0) -#endif - -#ifdef CONFIG_X86_USE_3DNOW - -#include - -#define clear_page(page) mmx_clear_page((void *)(page)) -#define copy_page(to,from) mmx_copy_page(to,from) - -#else - -#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) -#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE - -/* - * On older X86 processors it's not a win to use MMX here it seems. - * Maybe the K6-III ? - */ - -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) -#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) - -#endif - -#define clear_user_page(page, vaddr, pg) clear_page(page) -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) - -/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ -#define INVALID_P2M_ENTRY (~0UL) -#define FOREIGN_FRAME(m) ((m) | (1UL<<31)) -extern unsigned long *phys_to_machine_mapping; -#define pfn_to_mfn(pfn) \ -(phys_to_machine_mapping[(unsigned int)(pfn)] & ~(1UL<<31)) -#define phys_to_machine_mapping_valid(pfn) \ - (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY) -static inline unsigned long mfn_to_pfn(unsigned long mfn) -{ - unsigned long pfn; - - /* - * The array access can fail (e.g., device space beyond end of RAM). - * In such cases it doesn't matter what we return (we return garbage), - * but we must handle the fault without crashing! - */ - asm ( - "1: movl %1,%0\n" - "2:\n" - ".section __ex_table,\"a\"\n" - " .align 4\n" - " .long 1b,2b\n" - ".previous" - : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]) ); - - return pfn; -} - -static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ - phys_to_machine_mapping[pfn] = mfn; -} - -/* Definitions for machine and pseudophysical addresses. */ -#ifdef CONFIG_X86_PAE -typedef unsigned long long paddr_t; -typedef unsigned long long maddr_t; -#else -typedef unsigned long paddr_t; -typedef unsigned long maddr_t; -#endif - -static inline maddr_t phys_to_machine(paddr_t phys) -{ - maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); - machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); - return machine; -} -static inline paddr_t machine_to_phys(maddr_t machine) -{ - paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); - phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); - return phys; -} - -/* - * These are used to make use of C type-checking.. - */ -extern int nx_enabled; -#ifdef CONFIG_X86_PAE -extern unsigned long long __supported_pte_mask; -typedef struct { unsigned long pte_low, pte_high; } pte_t; -typedef struct { unsigned long long pmd; } pmd_t; -typedef struct { unsigned long long pgd; } pgd_t; -typedef struct { unsigned long long pgprot; } pgprot_t; -#define __pte(x) ({ unsigned long long _x = (x); \ - if (_x & 1) _x = phys_to_machine(_x); \ - ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); }) -#define __pgd(x) ({ unsigned long long _x = (x); \ - (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); }) -#define __pmd(x) ({ unsigned long long _x = (x); \ - (((_x)&1) ? ((pmd_t) {phys_to_machine(_x)}) : ((pmd_t) {(_x)})); }) -static inline unsigned long long pte_val(pte_t x) -{ - unsigned long long ret; - - if (x.pte_low) { - ret = x.pte_low | (unsigned long long)x.pte_high << 32; - ret = machine_to_phys(ret) | 1; - } else { - ret = 0; - } - return ret; -} -static inline unsigned long long pmd_val(pmd_t x) -{ - unsigned long long ret = x.pmd; - if (ret) ret = machine_to_phys(ret) | 1; - return ret; -} -static inline unsigned long long pgd_val(pgd_t x) -{ - unsigned long long ret = x.pgd; - if (ret) ret = machine_to_phys(ret) | 1; - return ret; -} -static inline unsigned long long pte_val_ma(pte_t x) -{ - return (unsigned long long)x.pte_high << 32 | x.pte_low; -} -#define HPAGE_SHIFT 21 -#else -typedef struct { unsigned long pte_low; } pte_t; -typedef struct { unsigned long pgd; } pgd_t; -typedef struct { unsigned long pgprot; } pgprot_t; -#define boot_pte_t pte_t /* or would you rather have a typedef */ -#define pte_val(x) (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \ - (x).pte_low) -#define pte_val_ma(x) ((x).pte_low) -#define __pte(x) ({ unsigned long _x = (x); \ - (((_x)&1) ? ((pte_t) {phys_to_machine(_x)}) : ((pte_t) {(_x)})); }) -#define __pgd(x) ({ unsigned long _x = (x); \ - (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); }) -static inline unsigned long pgd_val(pgd_t x) -{ - unsigned long ret = x.pgd; - if (ret) ret = machine_to_phys(ret) | 1; - return ret; -} -#define HPAGE_SHIFT 22 -#endif -#define PTE_MASK PAGE_MASK - -#ifdef CONFIG_HUGETLB_PAGE -#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) -#define HPAGE_MASK (~(HPAGE_SIZE - 1)) -#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) -#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#endif - -#define pgprot_val(x) ((x).pgprot) - -#define __pte_ma(x) ((pte_t) { (x) } ) -#define __pgprot(x) ((pgprot_t) { (x) } ) - -#endif /* !__ASSEMBLY__ */ - -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - -/* - * This handles the memory map.. We could make this a config - * option, but too many people screw it up, and too few need - * it. - * - * A __PAGE_OFFSET of 0xC0000000 means that the kernel has - * a virtual address space of one gigabyte, which limits the - * amount of physical memory you can use to about 950MB. - * - * If you want more physical memory than this then see the CONFIG_HIGHMEM4G - * and CONFIG_HIGHMEM64G options in the kernel configuration. - */ - -#ifndef __ASSEMBLY__ - -/* - * This much address space is reserved for vmalloc() and iomap() - * as well as fixmap mappings. - */ -extern unsigned int __VMALLOC_RESERVE; - -extern int sysctl_legacy_va_layout; - -extern int page_is_ram(unsigned long pagenr); - -#endif /* __ASSEMBLY__ */ - -#ifdef __ASSEMBLY__ -#define __PAGE_OFFSET (0xC0000000) -#define __PHYSICAL_START CONFIG_PHYSICAL_START -#else -#define __PAGE_OFFSET (0xC0000000UL) -#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) -#endif -#define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START) - -#undef LOAD_OFFSET -#define LOAD_OFFSET 0 - - -#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) -#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) -#define MAXMEM (HYPERVISOR_VIRT_START-__PAGE_OFFSET-__VMALLOC_RESERVE) -#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) -#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) -#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) -#ifdef CONFIG_FLATMEM -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) -#define pfn_valid(pfn) ((pfn) < max_mapnr) -#endif /* CONFIG_FLATMEM */ -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) - -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) - -#define VM_DATA_DEFAULT_FLAGS \ - (VM_READ | VM_WRITE | \ - ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -/* VIRT <-> MACHINE conversion */ -#define virt_to_machine(v) (phys_to_machine(__pa(v))) -#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) -#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) - -#endif /* __KERNEL__ */ - -#include - -#endif /* _I386_PAGE_H */ Index: 2005-12-16/include/asm-i386/mach-xen/mach_page.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ 2005-12-16/include/asm-i386/mach-xen/mach_page.h 2005-12-21 15:04:33.097723688 +0100 @@ -0,0 +1,94 @@ +#ifndef __ASM_MACH_PAGE_H +#define __ASM_MACH_PAGE_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include +#include +#include +#include + +#define arch_free_page(_page,_order) \ +({ int foreign = PageForeign(_page); \ + if (foreign) \ + (PageForeignDestructor(_page))(_page); \ + foreign; \ +}) +#define HAVE_ARCH_FREE_PAGE + +#ifdef CONFIG_XEN_SCRUB_PAGES +#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT) +#else +#define scrub_pages(_p,_n) ((void)0) +#endif + +#ifndef CONFIG_X86_USE_3DNOW +#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE +#endif + +#ifdef CONFIG_X86_PAE +#define __pte(x) ({ unsigned long long _x = (x); \ + if (_x & 1) _x = phys_to_machine(_x); \ + ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); }) +#define __pgd(x) ({ unsigned long long _x = (x); \ + (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); }) +#define __pmd(x) ({ unsigned long long _x = (x); \ + (((_x)&1) ? ((pmd_t) {phys_to_machine(_x)}) : ((pmd_t) {(_x)})); }) +static inline unsigned long long pte_val(pte_t x) +{ + unsigned long long ret; + + if (x.pte_low) { + ret = x.pte_low | (unsigned long long)x.pte_high << 32; + ret = machine_to_phys(ret) | 1; + } else { + ret = 0; + } + return ret; +} +static inline unsigned long long pmd_val(pmd_t x) +{ + unsigned long long ret = x.pmd; + if (ret) ret = machine_to_phys(ret) | 1; + return ret; +} +static inline unsigned long long pgd_val(pgd_t x) +{ + unsigned long long ret = x.pgd; + if (ret) ret = machine_to_phys(ret) | 1; + return ret; +} +static inline unsigned long long pte_val_ma(pte_t x) +{ + return (unsigned long long)x.pte_high << 32 | x.pte_low; +} +#else +#define pte_val(x) (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \ + (x).pte_low) +#define pte_val_ma(x) ((x).pte_low) +#define __pte(x) ({ unsigned long _x = (x); \ + (((_x)&1) ? ((pte_t) {phys_to_machine(_x)}) : ((pte_t) {(_x)})); }) +#define __pgd(x) ({ unsigned long _x = (x); \ + (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); }) +static inline unsigned long pgd_val(pgd_t x) +{ + unsigned long ret = x.pgd; + if (ret) ret = machine_to_phys(ret) | 1; + return ret; +} +#endif + +#define __pte_ma(x) ((pte_t) { (x) } ) + +#endif /* __ASSEMBLY__ */ + +#undef LOAD_OFFSET +#define LOAD_OFFSET 0 + +#define MAXMEM (HYPERVISOR_VIRT_START-__PAGE_OFFSET-__VMALLOC_RESERVE) + +#endif /* __ASM_MACH_PAGE_H */ Index: 2005-12-16/include/asm-i386/mach-xen/mach_xlat.h =================================================================== --- 2005-12-16.orig/include/asm-i386/mach-xen/mach_xlat.h 2005-12-21 14:18:28.785962952 +0100 +++ 2005-12-16/include/asm-i386/mach-xen/mach_xlat.h 2005-12-21 14:18:13.000000000 +0100 @@ -47,4 +47,65 @@ #define virt_to_bus(_x) phys_to_machine(__pa(_x)) #define bus_to_virt(_x) __va(machine_to_phys(_x)) +/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ +#define INVALID_P2M_ENTRY (~0UL) +#define FOREIGN_FRAME(m) ((m) | (1UL<<31)) +extern unsigned long *phys_to_machine_mapping; +#define pfn_to_mfn(pfn) \ +(phys_to_machine_mapping[(unsigned int)(pfn)] & ~(1UL<<31)) +#define phys_to_machine_mapping_valid(pfn) \ + (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY) +static inline unsigned long mfn_to_pfn(unsigned long mfn) +{ + unsigned long pfn; + + /* + * The array access can fail (e.g., device space beyond end of RAM). + * In such cases it doesn't matter what we return (we return garbage), + * but we must handle the fault without crashing! + */ + asm ( + "1: movl %1,%0\n" + "2:\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 1b,2b\n" + ".previous" + : "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]) ); + + return pfn; +} + +static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ + phys_to_machine_mapping[pfn] = mfn; +} + +/* Definitions for machine and pseudophysical addresses. */ +#ifdef CONFIG_X86_PAE +typedef unsigned long long paddr_t; +typedef unsigned long long maddr_t; +#else +typedef unsigned long paddr_t; +typedef unsigned long maddr_t; +#endif + +static inline maddr_t phys_to_machine(paddr_t phys) +{ + maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT); + machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK); + return machine; +} +static inline paddr_t machine_to_phys(maddr_t machine) +{ + paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT); + phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK); + return phys; +} + +/* VIRT <-> MACHINE conversion */ +#define virt_to_machine(v) (phys_to_machine(__pa(v))) +#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT)) +#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) + #endif /* __ASM_MACH_XLAT_H */ Index: 2005-12-16/include/asm-i386/page.h =================================================================== --- 2005-12-16.orig/include/asm-i386/page.h 2005-12-21 14:18:28.786962800 +0100 +++ 2005-12-16/include/asm-i386/page.h 2005-12-21 15:03:21.302638200 +0100 @@ -36,9 +36,6 @@ #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) -#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) -#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE - /* * These are used to make use of C type-checking.. */ @@ -49,16 +46,12 @@ typedef struct { unsigned long pte_low, typedef struct { unsigned long long pmd; } pmd_t; typedef struct { unsigned long long pgd; } pgd_t; typedef struct { unsigned long long pgprot; } pgprot_t; -#define pmd_val(x) ((x).pmd) -#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) -#define __pmd(x) ((pmd_t) { (x) } ) #define HPAGE_SHIFT 21 #else typedef struct { unsigned long pte_low; } pte_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; #define boot_pte_t pte_t /* or would you rather have a typedef */ -#define pte_val(x) ((x).pte_low) #define HPAGE_SHIFT 22 #endif #define PTE_MASK PAGE_MASK @@ -70,15 +63,13 @@ typedef struct { unsigned long pgprot; } #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif -#define pgd_val(x) ((x).pgd) #define pgprot_val(x) ((x).pgprot) - -#define __pte(x) ((pte_t) { (x) } ) -#define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) #endif /* !__ASSEMBLY__ */ +#include + /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) @@ -121,7 +112,6 @@ extern int page_is_ram(unsigned long pag #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) -#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)