# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 090e44133d40247bc3ccbb565b644d02fdac6829
# Parent bdf1a8039d1361f47ada7d0cd08582c30469bda8
Use make_lowmem_page_readonly/writable() in preference to the
generic functions where appropriate. This prevents us using the
generic functions early during boot, when pte_pfn() does not work
(because max_mapnr is not initialised).
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r bdf1a8039d13 -r 090e44133d40
linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c Mon Nov 14
14:21:16 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c Mon Nov 14
17:13:38 2005
@@ -572,7 +572,7 @@
va < gdt_descr->address + gdt_descr->size;
va += PAGE_SIZE, f++) {
frames[f] = virt_to_mfn(va);
- make_page_readonly((void *)va);
+ make_lowmem_page_readonly((void *)va);
}
if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
BUG();
diff -r bdf1a8039d13 -r 090e44133d40
linux-2.6-xen-sparse/arch/xen/i386/mm/init.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c Mon Nov 14 14:21:16 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c Mon Nov 14 17:13:38 2005
@@ -68,7 +68,7 @@
#ifdef CONFIG_X86_PAE
pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- make_page_readonly(pmd_table);
+ make_lowmem_page_readonly(pmd_table);
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
pud = pud_offset(pgd, 0);
if (pmd_table != pmd_offset(pud, 0))
@@ -89,7 +89,7 @@
{
if (pmd_none(*pmd)) {
pte_t *page_table = (pte_t *)
alloc_bootmem_low_pages(PAGE_SIZE);
- make_page_readonly(page_table);
+ make_lowmem_page_readonly(page_table);
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
if (page_table != pte_offset_kernel(pmd, 0))
BUG();
diff -r bdf1a8039d13 -r 090e44133d40
linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c Mon Nov 14 14:21:16 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c Mon Nov 14 17:13:38 2005
@@ -199,7 +199,7 @@
{
pte_t *pte = (pte_t
*)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (pte)
- make_page_readonly(pte);
+ make_lowmem_page_readonly(pte);
return pte;
}
@@ -336,7 +336,7 @@
spin_lock_irqsave(&pgd_lock, flags);
memcpy(pmd, copy_pmd, PAGE_SIZE);
spin_unlock_irqrestore(&pgd_lock, flags);
- make_page_readonly(pmd);
+ make_lowmem_page_readonly(pmd);
set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
}
@@ -367,12 +367,12 @@
if (PTRS_PER_PMD > 1) {
for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
- make_page_writable(pmd);
+ make_lowmem_page_writable(pmd);
kmem_cache_free(pmd_cache, pmd);
}
if (!HAVE_SHARED_KERNEL_PMD) {
pmd_t *pmd = (void
*)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1);
- make_page_writable(pmd);
+ make_lowmem_page_writable(pmd);
memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
kmem_cache_free(pmd_cache, pmd);
}
@@ -382,6 +382,7 @@
}
#ifndef CONFIG_XEN_SHADOW_MODE
+asmlinkage int xprintk(const char *fmt, ...);
void make_lowmem_page_readonly(void *va)
{
pte_t *pte = virt_to_ptep(va);
@@ -399,8 +400,7 @@
pte_t *pte = virt_to_ptep(va);
set_pte(pte, pte_wrprotect(*pte));
if ((unsigned long)va >= (unsigned long)high_memory) {
- unsigned long pfn;
- pfn = pte_pfn(*pte);
+ unsigned long pfn = pte_pfn(*pte);
#ifdef CONFIG_HIGHMEM
if (pfn < highstart_pfn)
#endif
@@ -414,8 +414,7 @@
pte_t *pte = virt_to_ptep(va);
set_pte(pte, pte_mkwrite(*pte));
if ((unsigned long)va >= (unsigned long)high_memory) {
- unsigned long pfn;
- pfn = pte_pfn(*pte);
+ unsigned long pfn = pte_pfn(*pte);
#ifdef CONFIG_HIGHMEM
if (pfn < highstart_pfn)
#endif
diff -r bdf1a8039d13 -r 090e44133d40 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Mon Nov 14 14:21:16 2005
+++ b/xen/arch/x86/mm.c Mon Nov 14 17:13:38 2005
@@ -3125,7 +3125,10 @@
/* Check the new PTE. */
nl1e = l1e_from_intpte(val);
if ( unlikely(!get_page_from_l1e(nl1e, d)) )
+ {
+ MEM_LOG("ptwr_emulate: could not get_page_from_l1e()");
return X86EMUL_UNHANDLEABLE;
+ }
/* Checked successfully: do the update (write or cmpxchg). */
pl1e = map_domain_page(page_to_pfn(page));
@@ -3248,6 +3251,9 @@
goto emulate;
#endif
+ PTWR_PRINTK("ptwr_page_fault on l1 pt at va %lx, pfn %lx, eip %lx\n",
+ addr, pfn, (unsigned long)regs->eip);
+
/* Get the L2 index at which this L1 p.t. is always mapped. */
l2_idx = page->u.inuse.type_info & PGT_va_mask;
if ( unlikely(l2_idx >= PGT_va_unknown) )
@@ -3292,10 +3298,6 @@
goto emulate;
}
- PTWR_PRINTK("[%c] page_fault on l1 pt at va %lx, pt for %08lx, "
- "pfn %lx\n", PTWR_PRINT_WHICH,
- addr, l2_idx << L2_PAGETABLE_SHIFT, pfn);
-
/*
* We only allow one ACTIVE and one INACTIVE p.t. to be updated at at
* time. If there is already one, we must flush it out.
@@ -3313,6 +3315,10 @@
d->arch.ptwr[which].prev_nr_updates = 1;
goto emulate;
}
+
+ PTWR_PRINTK("[%c] batched ptwr_page_fault at va %lx, pt for %08lx, "
+ "pfn %lx\n", PTWR_PRINT_WHICH, addr,
+ l2_idx << L2_PAGETABLE_SHIFT, pfn);
d->arch.ptwr[which].l1va = addr | 1;
d->arch.ptwr[which].l2_idx = l2_idx;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|