WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-merge

[Xen-merge] [PATCH 13/23] pgtable subarch headers

To: xen-merge@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-merge] [PATCH 13/23] pgtable subarch headers
From: Chris Wright <chrisw@xxxxxxxx>
Date: Mon, 08 Aug 2005 00:02:49 -0700
Delivery-date: Mon, 08 Aug 2005 07:07:13 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-merge-request@lists.xensource.com?subject=help>
List-id: xen-merge <xen-merge.lists.xensource.com>
List-post: <mailto:xen-merge@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-merge>, <mailto:xen-merge-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-merge>, <mailto:xen-merge-request@lists.xensource.com?subject=unsubscribe>
References: <20050808070236.231405000@xxxxxxxxxxxxxxxxxxxxx>
Sender: xen-merge-bounces@xxxxxxxxxxxxxxxxxxx
--- linux-2.6.12-xen0-arch.orig/include/asm-i386/pgtable.h
+++ linux-2.6.12-xen0-arch/include/asm-i386/pgtable.h
@@ -200,15 +200,6 @@ extern unsigned long long __PAGE_KERNEL,
 /* The boot page tables (all created as a single array) */
 extern unsigned long pg0[];
 
-#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(mm,addr,xp)  do { set_pte_at(mm, addr, xp, __pte(0)); } 
while (0)
-
-#define pmd_none(x)    (!pmd_val(x))
-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
-#define pmd_clear(xp)  do { set_pmd(xp, __pmd(0)); } while (0)
-#define        pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != 
_KERNPG_TABLE)
-
-
 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
 
 /*
@@ -237,6 +228,17 @@ static inline pte_t pte_mkdirty(pte_t pt
 static inline pte_t pte_mkyoung(pte_t pte)     { (pte).pte_low |= 
_PAGE_ACCESSED; return pte; }
 static inline pte_t pte_mkwrite(pte_t pte)     { (pte).pte_low |= _PAGE_RW; 
return pte; }
 
+#include <mach_pgtable.h>
+
+#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pte_clear(mm,addr,xp)  do { set_pte_at(mm, addr, xp, __pte(0)); } 
while (0)
+
+#define pmd_none(x)    (!pmd_val(x))
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp)  do { set_pmd(xp, __pmd(0)); } while (0)
+#define pmd_bad(x)     mach_pmd_bad(x)
+
+
 #ifdef CONFIG_X86_PAE
 # include <asm/pgtable-3level.h>
 #else
@@ -257,10 +259,7 @@ static inline int ptep_test_and_clear_yo
        return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low);
 }
 
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long 
addr, pte_t *ptep)
-{
-       clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
-}
+#define ptep_set_wrprotect(mm,addr,ptep) mach_ptep_set_wrprotect(mm,addr,ptep)
 
 /*
  * Macro to mark a page protection value as "uncacheable".  On processors 
which do not support
@@ -363,9 +362,9 @@ extern void noexec_setup(const char *str
 
 #if defined(CONFIG_HIGHPTE)
 #define pte_offset_map(dir, address) \
-       ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
+       ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + 
pte_index(address))
 #define pte_offset_map_nested(dir, address) \
-       ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
+       ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + 
pte_index(address))
 #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
 #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
 #else
@@ -403,10 +402,10 @@ extern void noexec_setup(const char *str
 #endif /* !CONFIG_DISCONTIGMEM */
 
 #define io_remap_page_range(vma, vaddr, paddr, size, prot)             \
-               remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+               mach_io_remap_page_range(vma, vaddr, paddr, size, prot)
 
 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               remap_pfn_range(vma, vaddr, pfn, size, prot)
+               mach_io_remap_pfn_range(vma, vaddr, pfn, size, prot)
 
 #define MK_IOSPACE_PFN(space, pfn)     (pfn)
 #define GET_IOSPACE(pfn)               0
--- linux-2.6.12-xen0-arch.orig/include/asm-i386/pgtable-2level.h
+++ linux-2.6.12-xen0-arch/include/asm-i386/pgtable-2level.h
@@ -2,6 +2,7 @@
 #define _I386_PGTABLE_2LEVEL_H
 
 #include <asm-generic/pgtable-nopmd.h>
+#include <mach_pgtable-2level.h>
 
 #define pte_ERROR(e) \
        printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
@@ -16,13 +17,13 @@
 #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+#define set_pmd(pmdptr, pmdval) mach_set_pmd(pmdptr, pmdval)
 
-#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0))
+#define ptep_get_and_clear(mm,addr,xp) mach_ptep_get_and_clear(mm,addr,xp)
 #define pte_same(a, b)         ((a).pte_low == (b).pte_low)
-#define pte_page(x)            pfn_to_page(pte_pfn(x))
+#define pte_page(x)            mach_pte_page(x)
 #define pte_none(x)            (!(x).pte_low)
-#define pte_pfn(x)             ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
+#define pte_pfn(x)             mach_pte_pfn(x)
 #define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 #define pfn_pmd(pfn, prot)     __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 
--- linux-2.6.12-xen0-arch.orig/include/asm-i386/pgtable-3level-defs.h
+++ linux-2.6.12-xen0-arch/include/asm-i386/pgtable-3level-defs.h
@@ -1,7 +1,7 @@
 #ifndef _I386_PGTABLE_3LEVEL_DEFS_H
 #define _I386_PGTABLE_3LEVEL_DEFS_H
 
-#define HAVE_SHARED_KERNEL_PMD 1
+#include <mach_pgtable-3level-defs.h>
 
 /*
  * PGDIR_SHIFT determines what a top-level page table entry can map
--- /dev/null
+++ linux-2.6.12-xen0-arch/include/asm-i386/mach-default/mach_pgtable.h
@@ -0,0 +1,18 @@
+#ifndef __ASM_MACH_PGTABLE_H
+#define __ASM_MACH_PGTABLE_H
+
+#define mach_pmd_present(x)    (pmd_val(x) & _PAGE_PRESENT)
+#define mach_pmd_bad(x)                ((pmd_val(x) & (~PAGE_MASK & 
~_PAGE_USER)) != _KERNPG_TABLE)
+
+static inline void mach_ptep_set_wrprotect(struct mm_struct *mm, unsigned long 
addr, pte_t *ptep)
+{
+       clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
+}
+
+#define mach_io_remap_page_range(vma, vaddr, paddr, size, prot)                
\
+               remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+
+#define mach_io_remap_pfn_range(vma, vaddr, pfn, size, prot)           \
+               remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#endif
--- /dev/null
+++ linux-2.6.12-xen0-arch/include/asm-i386/mach-default/mach_pgtable-2level.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_MACH_PGTABLE_2LEVEL_H
+#define _ASM_MACH_PGTABLE_2LEVEL_H
+
+#define mach_set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+
+#define mach_ptep_get_and_clear(mm,addr,xp)    __pte(xchg(&(xp)->pte_low, 0))
+#define mach_pte_page(x)               pfn_to_page(pte_pfn(x))
+#define mach_pte_pfn(x)                ((unsigned long)(((x).pte_low >> 
PAGE_SHIFT)))
+
+#endif
--- /dev/null
+++ 
linux-2.6.12-xen0-arch/include/asm-i386/mach-default/mach_pgtable-3level-defs.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_MACH_PGTABLE_3LEVEL_DEFS_H
+#define _ASM_MACH_PGTABLE_3LEVEL_DEFS_H
+
+#define HAVE_SHARED_KERNEL_PMD 1
+
+#endif
--- linux-2.6.12-xen0/include/asm-i386/mach-xen/mach_pgtable-2level-defs.h      
1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-xen0-arch/include/asm-i386/mach-xen/mach_pgtable-2level-defs.h 
2005-08-02 20:25:36.000000000 -0700
@@ -0,0 +1,6 @@
+#ifndef _ASM_MACH_PGTABLE_2LEVEL_DEFS_H
+#define _ASM_MACH_PGTABLE_2LEVEL_DEFS_H
+
+#define PTRS_PER_PGD_NO_HV     (HYPERVISOR_VIRT_START >> PGDIR_SHIFT)
+
+#endif
diff -Naur -x vmlinuz -X linux-2.6.12-xen0/Documentation/dontdiff 
linux-2.6.12-xen0/include/asm-i386/mach-xen/mach_pgtable-2level.h 
linux-2.6.12-xen0-arch/include/asm-i386/mach-xen/mach_pgtable-2level.h
--- linux-2.6.12-xen0/include/asm-i386/mach-xen/mach_pgtable-2level.h   
1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-xen0-arch/include/asm-i386/mach-xen/mach_pgtable-2level.h      
2005-08-02 21:01:23.000000000 -0700
@@ -0,0 +1,49 @@
+#ifndef _ASM_MACH_PGTABLE_2LEVEL_H
+#define _ASM_MACH_PGTABLE_2LEVEL_H
+
+#include <mach_pgtable-2level-defs.h>
+
+#ifndef CONFIG_XEN_SHADOW_MODE
+#define mach_set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
+#else
+#define mach_set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+#endif
+
+#define mach_ptep_get_and_clear(mm,addr,xp)    __pte_ma(xchg(&(xp)->pte_low, 
0))
+
+/*
+ * We detect special mappings in one of two ways:
+ *  1. If the MFN is an I/O page then Xen will set the m2p entry
+ *     to be outside our maximum possible pseudophys range.
+ *  2. If the MFN belongs to a different domain then we will certainly
+ *     not have MFN in our p2m table. Conversely, if the page is ours,
+ *     then we'll have p2m(m2p(MFN))==MFN.
+ * If we detect a special mapping then it doesn't have a 'struct page'.
+ * We force !pfn_valid() by returning an out-of-range pointer.
+ *
+ * NB. These checks require that, for any MFN that is not in our reservation,
+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
+ * 
+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
+ *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
+ *      require. In all the cases we care about, the high bit gets shifted out
+ *      (e.g., phys_to_machine()) so behaviour there is correct.
+ */
+#define INVALID_P2M_ENTRY (~0U)
+#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
+#define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
+#define mach_pte_pfn(_pte)                                                     
\
+({                                                                     \
+       unsigned long mfn = pte_mfn(_pte);                              \
+       unsigned long pfn = mfn_to_pfn(mfn);                            \
+       if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn))             \
+               pfn = max_mapnr; /* special: force !pfn_valid() */      \
+       pfn;                                                            \
+})
+
+#define mach_pte_page(_pte) pfn_to_page(pte_pfn(_pte))
+#define pfn_pte_ma(pfn, prot)  __pte_ma(((pfn) << PAGE_SHIFT) | 
pgprot_val(prot))
+
+#endif
diff -Naur -x vmlinuz -X linux-2.6.12-xen0/Documentation/dontdiff 
linux-2.6.12-xen0/include/asm-i386/mach-xen/mach_pgtable-3level-defs.h 
linux-2.6.12-xen0-arch/include/asm-i386/mach-xen/mach_pgtable-3level-defs.h
--- linux-2.6.12-xen0/include/asm-i386/mach-xen/mach_pgtable-3level-defs.h      
1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.12-xen0-arch/include/asm-i386/mach-xen/mach_pgtable-3level-defs.h 
2005-08-02 20:24:39.000000000 -0700
@@ -0,0 +1,7 @@
+#ifndef _ASM_MACH_PGTABLE_3LEVEL_DEFS_H
+#define _ASM_MACH_PGTABLE_3LEVEL_DEFS_H
+
+#define HAVE_SHARED_KERNEL_PMD 0
+#define PTRS_PER_PGD_NO_HV 4
+
+#endif
diff -Naur -x vmlinuz -X linux-2.6.12-xen0/Documentation/dontdiff 
linux-2.6.12-xen0/include/asm-i386/mach-xen/mach_pgtable.h 
linux-2.6.12-xen0-arch/include/asm-i386/mach-xen/mach_pgtable.h
--- linux-2.6.12-xen0/include/asm-i386/mach-xen/mach_pgtable.h  1969-12-31 
16:00:00.000000000 -0800
+++ linux-2.6.12-xen0-arch/include/asm-i386/mach-xen/mach_pgtable.h     
2005-08-03 01:36:51.000000000 -0700
@@ -0,0 +1,98 @@
+#ifndef __ASM_MACH_PGTABLE_H
+#define __ASM_MACH_PGTABLE_H
+
+#include <xen_hypervisor.h>
+
+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
+   can temporarily clear it. */
+#define mach_pmd_present(x)    (pmd_val(x))
+#define mach_pmd_bad(x)                ((pmd_val(x) & (~PAGE_MASK & 
~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
+
+static inline void mach_ptep_set_wrprotect(struct mm_struct *mm, unsigned long 
addr, pte_t *ptep)
+{
+       if (pte_write(*ptep))
+               clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
+}
+
+#define mach_ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) 
\
+       do {                                                              \
+               if (__dirty) {                                            \
+                       if ( likely((__vma)->vm_mm == current->mm) ) {    \
+                           HYPERVISOR_update_va_mapping((__address), 
(__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned 
long)((__vma)->vm_mm->cpu_vm_mask.bits)); \
+                       } else {                                          \
+                            xen_l1_entry_update((__ptep), (__entry)); \
+                           flush_tlb_page((__vma), (__address));         \
+                       }                                                 \
+               }                                                         \
+       } while (0)
+
+#define __HAVE_ARCH_PTEP_ESTABLISH
+#define ptep_establish(__vma, __address, __ptep, __entry)              \
+do {                                                                   \
+       ptep_set_access_flags(__vma, __address, __ptep, __entry, 1);    \
+} while (0)
+
+#define __HAVE_ARCH_PTEP_ESTABLISH_NEW
+#define ptep_establish_new(__vma, __address, __ptep, __entry)          \
+do {                                                                   \
+       if (likely((__vma)->vm_mm == current->mm)) {                    \
+               HYPERVISOR_update_va_mapping((__address),               \
+                                            __entry, 0);               \
+       } else {                                                        \
+               xen_l1_entry_update((__ptep), (__entry));       \
+       }                                                               \
+} while (0)
+
+#ifndef CONFIG_XEN_SHADOW_MODE
+void make_lowmem_page_readonly(void *va);
+void make_lowmem_page_writable(void *va);
+void make_page_readonly(void *va);
+void make_page_writable(void *va);
+void make_pages_readonly(void *va, unsigned int nr);
+void make_pages_writable(void *va, unsigned int nr);
+#else
+#define make_lowmem_page_readonly(_va) ((void)0)
+#define make_lowmem_page_writable(_va) ((void)0)
+#define make_page_readonly(_va)        ((void)0)
+#define make_page_writable(_va)        ((void)0)
+#define make_pages_readonly(_va, _nr)  ((void)0)
+#define make_pages_writable(_va, _nr)  ((void)0)
+#endif
+
+#define virt_to_ptep(__va)                                             \
+({                                                                     \
+       pgd_t *__pgd = pgd_offset_k((unsigned long)(__va));             \
+       pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va));        \
+       pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va));        \
+       pte_offset_kernel(__pmd, (unsigned long)(__va));                \
+})
+
+#define arbitrary_virt_to_machine(__va)                                        
\
+({                                                                     \
+       pte_t *__pte = virt_to_ptep(__va);                              \
+       unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK;     \
+       __pa | ((unsigned long)(__va) & (PAGE_SIZE-1));                 \
+})
+
+int direct_remap_area_pages(struct mm_struct *mm,
+                            unsigned long address, 
+                            unsigned long machine_addr,
+                            unsigned long size, 
+                            pgprot_t prot,
+                            domid_t  domid);
+int __direct_remap_area_pages(struct mm_struct *mm,
+                             unsigned long address, 
+                             unsigned long size, 
+                             mmu_update_t *v);
+
+#define mach_io_remap_page_range(vma,from,phys,size,prot) \
+direct_remap_area_pages(vma->vm_mm,from,phys,size,prot,DOMID_IO)
+
+#define mach_io_remap_pfn_range(vma,from,pfn,size,prot) \
+direct_remap_area_pages(vma->vm_mm,from,pfn<<PAGE_SHIFT,size,prot,DOMID_IO)
+
+extern void mm_pin(struct mm_struct *mm);
+extern void mm_unpin(struct mm_struct *mm);
+void mm_pin_all(void);
+
+#endif

--


_______________________________________________
Xen-merge mailing list
Xen-merge@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-merge