diff -rN -u -p old-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c new-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c --- old-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c 2005-05-24 17:34:54.000000000 +0000 +++ new-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c 2005-06-08 03:17:21.000000000 +0000 @@ -78,7 +78,7 @@ xen_contig_memory(unsigned long vstart, pud_t *pud; pmd_t *pmd; pte_t *pte; - unsigned long pfn, i, flags; + unsigned long mfn, i, flags; scrub_pages(vstart, 1 << order); @@ -90,28 +90,27 @@ xen_contig_memory(unsigned long vstart, pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE))); pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE))); pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); - pfn = pte->pte >> PAGE_SHIFT; + mfn = pte_mfn(*pte); xen_l1_entry_update(pte, 0); phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = (u32)INVALID_P2M_ENTRY; if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, - &pfn, 1, 0) != 1) BUG(); + &mfn, 1, 0) != 1) BUG(); } /* 2. Get a new contiguous memory extent. */ if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation, - &pfn, 1, order) != 1) BUG(); + &mfn, 1, order) != 1) BUG(); /* 3. Map the new extent in place of old pages. */ for (i = 0; i < (1<>PAGE_SHIFT)+i); + mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i); phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = - pfn+i; + mfn+i; } /* Flush updates through and flush the TLB. */ xen_tlb_flush(); diff -rN -u -p old-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/hypervisor.c new-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/hypervisor.c --- old-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/hypervisor.c 2005-06-06 16:39:54.000000000 +0000 +++ new-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/hypervisor.c 2005-06-07 20:35:16.000000000 +0000 @@ -256,7 +276,7 @@ unsigned long allocate_empty_lowmem_regi pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE))); pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE))); pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); - pfn_array[i] = pte->pte >> PAGE_SHIFT; + pfn_array[i] = pte_mfn(*pte); xen_l1_entry_update(pte, 0); phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = (u32)INVALID_P2M_ENTRY; diff -rN -u -p old-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c new-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c --- old-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c 2005-06-02 23:09:31.000000000 +0000 +++ new-xen-64-4/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c 2005-06-07 15:39:50.000000000 +0000 @@ -395,7 +395,7 @@ unsigned long get_machine_pfn(unsigned l pmd_t* pmd = pmd_offset(pud, addr); pte_t *pte = pte_offset_kernel(pmd, addr); - return (pte->pte >> PAGE_SHIFT); + return pte_mfn(*pte); } #define ALIGN_TO_4K __attribute__((section(".data.page_aligned"))) diff -rN -u -p old-xen-64-4/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h new-xen-64-4/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h --- old-xen-64-4/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h 2005-05-28 09:20:36.000000000 +0000 +++ new-xen-64-4/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h 2005-06-08 02:48:53.000000000 +0000 @@ -81,7 +81,8 @@ extern inline int pud_present(pud_t pud) #define set_pte(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval).pte) #else -#define set_pte(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval.pte)) +#define set_pte(pteptr, pteval) xen_l1_entry_update(pteptr, \ + (pteval).pte & __supported_pte_mask) #if 0 static inline void set_pte(pte_t *dst, pte_t val) { @@ -90,9 +91,23 @@ static inline void set_pte(pte_t *dst, p #endif #endif -#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval)) -#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval)) -#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval)) +static inline void set_pmd(pmd_t *ptr, pmd_t val) +{ + val.pmd &= __supported_pte_mask; + xen_l2_entry_update(ptr, val); +} + +static inline void set_pud(pud_t *ptr, pud_t val) +{ + val.pud &= __supported_pte_mask; + xen_l3_entry_update(ptr, val); +} + +static inline void set_pgd(pgd_t *ptr, pgd_t val) +{ + val.pgd &= __supported_pte_mask; + xen_l4_entry_update(ptr, val); +} extern inline void pud_clear (pud_t * pud) { @@ -277,9 +292,10 @@ static inline unsigned long pud_bad(pud_ */ #define INVALID_P2M_ENTRY (~0UL) #define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1))) +#define pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT) #define pte_pfn(_pte) \ ({ \ - unsigned long mfn = (_pte).pte >> PAGE_SHIFT; \ + unsigned long mfn = pte_mfn(_pte); \ unsigned pfn = mfn_to_pfn(mfn); \ if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \ pfn = max_mapnr; /* special: force !pfn_valid() */ \