On Fri, 2009-06-05 at 12:05 -0400, Ian Campbell wrote:
>
> I had some patches to unify the 32 and 64 bit versions of dump page
> table at one point, since the 64 bit version does the right thing.
> I'll see if I can find or reproduce them.
Couldn't find them but please try this:
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index abe8e4b..e455d56 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -285,46 +285,12 @@ check_v8086_mode(struct pt_regs *regs, unsigned long
address,
tsk->thread.screen_bitmap |= 1 << bit;
}
-static void dump_pagetable(unsigned long address)
-{
- __typeof__(pte_val(__pte(0))) page;
-
- page = read_cr3();
- page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
-
#ifdef CONFIG_X86_PAE
- printk("*pdpt = %016Lx ", page);
- if ((page >> PAGE_SHIFT) < max_low_pfn
- && page & _PAGE_PRESENT) {
- page &= PAGE_MASK;
- page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
- & (PTRS_PER_PMD - 1)];
- printk(KERN_CONT "*pde = %016Lx ", page);
- page &= ~_PAGE_NX;
- }
+#define FMTPTE "ll"
#else
- printk("*pde = %08lx ", page);
+#define FMTPTE "l"
#endif
- /*
- * We must not directly access the pte in the highpte
- * case if the page table is located in highmem.
- * And let's rather not kmap-atomic the pte, just in case
- * it's allocated already:
- */
- if ((page >> PAGE_SHIFT) < max_low_pfn
- && (page & _PAGE_PRESENT)
- && !(page & _PAGE_PSE)) {
-
- page &= PAGE_MASK;
- page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
- & (PTRS_PER_PTE - 1)];
- printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
- }
-
- printk("\n");
-}
-
#else /* CONFIG_X86_64: */
void vmalloc_sync_all(void)
@@ -440,6 +406,10 @@ check_v8086_mode(struct pt_regs *regs, unsigned long
address,
{
}
+#define FMTPTE "ll"
+
+#endif /* CONFIG_X86_64 */
+
static int bad_address(void *p)
{
unsigned long dummy;
@@ -447,7 +417,7 @@ static int bad_address(void *p)
return probe_kernel_address((unsigned long *)p, dummy);
}
-static void dump_pagetable(unsigned long address)
+void dump_pagetable(unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
@@ -462,7 +432,7 @@ static void dump_pagetable(unsigned long address)
if (bad_address(pgd))
goto bad;
- printk("PGD %lx ", pgd_val(*pgd));
+ printk("PGD %"FMTPTE"x ", pgd_val(*pgd));
if (!pgd_present(*pgd))
goto out;
@@ -471,7 +441,7 @@ static void dump_pagetable(unsigned long address)
if (bad_address(pud))
goto bad;
- printk("PUD %lx ", pud_val(*pud));
+ printk("PUD %"FMTPTE"x ", pud_val(*pud));
if (!pud_present(*pud) || pud_large(*pud))
goto out;
@@ -479,7 +449,7 @@ static void dump_pagetable(unsigned long address)
if (bad_address(pmd))
goto bad;
- printk("PMD %lx ", pmd_val(*pmd));
+ printk("PMD %"FMTPTE"x ", pmd_val(*pmd));
if (!pmd_present(*pmd) || pmd_large(*pmd))
goto out;
@@ -487,7 +457,7 @@ static void dump_pagetable(unsigned long address)
if (bad_address(pte))
goto bad;
- printk("PTE %lx", pte_val(*pte));
+ printk("PTE %"FMTPTE"x", pte_val(*pte));
out:
printk("\n");
return;
@@ -495,8 +465,6 @@ bad:
printk("BAD\n");
}
-#endif /* CONFIG_X86_64 */
-
/*
* Workaround for K8 erratum #93 & buggy BIOS.
*
@@ -603,6 +571,10 @@ show_fault_oops(struct pt_regs *regs, unsigned long
error_code,
printk_address(regs->ip, 1);
dump_pagetable(address);
+ printk(KERN_CRIT "Fixmap KM_PTE0 @ %#lx\n", fix_to_virt(KM_PTE0));
+ dump_pagetable(fix_to_virt(KM_PTE0));
+ printk(KERN_CRIT "Fixmap KM_PTE1 @ %#lx\n", fix_to_virt(KM_PTE1));
+ dump_pagetable(fix_to_virt(KM_PTE1));
}
static noinline void
diff --git a/init/main.c b/init/main.c
index 33ce929..fee067e 100644
--- a/init/main.c
+++ b/init/main.c
@@ -807,6 +807,7 @@ static void run_init_process(char *init_filename)
static noinline int init_post(void)
__releases(kernel_lock)
{
+ extern void dump_pagetable(unsigned long address);
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
free_initmem();
@@ -815,6 +816,9 @@ static noinline int init_post(void)
system_state = SYSTEM_RUNNING;
numa_default_policy();
+ printk(KERN_CRIT "test dump_pagetable on %#lx\n", (unsigned
long)__builtin_return_address(0));
+ dump_pagetable((unsigned long)__builtin_return_address(0));
+
if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
printk(KERN_WARNING "Warning: unable to open an initial
console.\n");
diff --git a/mm/rmap.c b/mm/rmap.c
index 1652166..ae5d5a0 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -267,6 +267,7 @@ unsigned long page_address_in_vma(struct page *page, struct
vm_area_struct *vma)
pte_t *page_check_address(struct page *page, struct mm_struct *mm,
unsigned long address, spinlock_t **ptlp, int sync)
{
+ struct page *pgd_page, *pte_page;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
@@ -285,6 +286,22 @@ pte_t *page_check_address(struct page *page, struct
mm_struct *mm,
if (!pmd_present(*pmd))
return NULL;
+ pgd_page = virt_to_page(mm->pgd);
+ pte_page = pmd_page(*pmd);
+
+ if (PagePinned(pgd_page) != PagePinned(pte_page)) {
+ extern void dump_pagetable(unsigned long address);
+ printk(KERN_CRIT "L4 at %p is %s contains L2 at %p which points
at an L1 which is %s %s\n",
+ pgd, PagePinned(pgd_page) ? "pinned" : "unpinned",
+ pmd, PagePinned(pte_page) ? "pinned" : "unpinned",
+ PageHighMem(pte_page) ? "highmem" : "lowmem");
+ printk(KERN_CRIT "address %#lx\n", address);
+ dump_pagetable(address);
+ printk(KERN_CRIT "Fixmap KM_PTE0 @ %#lx\n",
fix_to_virt(KM_PTE0));
+ dump_pagetable(fix_to_virt(KM_PTE0));
+ printk(KERN_CRIT "Fixmap KM_PTE0 @ %#lx\n",
fix_to_virt(KM_PTE1));
+ dump_pagetable(fix_to_virt(KM_PTE1));
+ }
pte = pte_offset_map(pmd, address);
/* Make a quick check before getting the lock */
if (!sync && !pte_present(*pte)) {
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|