# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 3f4d143579763d6f280146c8f0d49922c55ae82d
# Parent 47dca2f335de3493e9bcef04459a7d37ccc5ebf0
pagefault handler fixes.
diff -r 47dca2f335de -r 3f4d14357976 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Sep 9 13:20:51 2005
+++ b/xen/arch/x86/mm.c Fri Sep 9 13:28:23 2005
@@ -3185,7 +3185,7 @@
struct pfn_info *page;
l1_pgentry_t pte;
l2_pgentry_t *pl2e, l2e;
- int which;
+ int which, flags;
unsigned long l2_idx;
if ( unlikely(shadow_mode_enabled(d)) )
@@ -3206,8 +3206,24 @@
pfn = l1e_get_pfn(pte);
page = &frame_table[pfn];
+#ifdef CONFIG_X86_64
+#define WRPT_PTE_FLAGS (_PAGE_RW | _PAGE_PRESENT | _PAGE_USER)
+#else
+#define WRPT_PTE_FLAGS (_PAGE_RW | _PAGE_PRESENT)
+#endif
+
+ /*
+ * Check the required flags for a valid wrpt mapping. If the page is
+ * already writable then we can return straight to the guest (SMP race).
+ * We decide whether or not to propagate the fault by testing for write
+ * permissions in page directories by writing back to the linear mapping.
+ */
+ if ( (flags = l1e_get_flags(pte) & WRPT_PTE_FLAGS) == WRPT_PTE_FLAGS )
+ return !__put_user(
+ pte.l1, &linear_pg_table[l1_linear_offset(addr)].l1);
+
/* We are looking only for read-only mappings of p.t. pages. */
- if ( ((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) != _PAGE_PRESENT) ||
+ if ( ((flags | _PAGE_RW) != WRPT_PTE_FLAGS) ||
((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
((page->u.inuse.type_info & PGT_count_mask) == 0) ||
(page_get_owner(page) != d) )
diff -r 47dca2f335de -r 3f4d14357976 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c Fri Sep 9 13:20:51 2005
+++ b/xen/arch/x86/traps.c Fri Sep 9 13:28:23 2005
@@ -470,20 +470,32 @@
return EXCRET_fault_fixed;
}
-asmlinkage int do_page_fault(struct cpu_user_regs *regs)
-{
- unsigned long addr, fixup;
- struct vcpu *v = current;
+#ifdef HYPERVISOR_VIRT_END
+#define IN_HYPERVISOR_RANGE(va) \
+ (((va) >= HYPERVISOR_VIRT_START) && ((va) < HYPERVISOR_VIRT_END))
+#else
+#define IN_HYPERVISOR_RANGE(va) \
+ (((va) >= HYPERVISOR_VIRT_START))
+#endif
+
+static int fixup_page_fault(unsigned long addr, struct cpu_user_regs *regs)
+{
+ struct vcpu *v = current;
struct domain *d = v->domain;
- __asm__ __volatile__ ("mov %%cr2,%0" : "=r" (addr) : );
-
- DEBUGGER_trap_entry(TRAP_page_fault, regs);
-
- perfc_incrc(page_faults);
-
- if ( likely(VM_ASSIST(d, VMASST_TYPE_writable_pagetables) &&
- !shadow_mode_enabled(d)) )
+ if ( unlikely(IN_HYPERVISOR_RANGE(addr)) )
+ {
+ if ( shadow_mode_external(d) && GUEST_CONTEXT(v, regs) )
+ return shadow_fault(addr, regs);
+ if ( (addr >= PERDOMAIN_VIRT_START) && (addr < PERDOMAIN_VIRT_END) )
+ return handle_perdomain_mapping_fault(
+ addr - PERDOMAIN_VIRT_START, regs);
+ }
+ else if ( unlikely(shadow_mode_enabled(d)) )
+ {
+ return shadow_fault(addr, regs);
+ }
+ else if ( likely(VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) )
{
LOCK_BIGLOCK(d);
if ( unlikely(d->arch.ptwr[PTWR_PT_ACTIVE].l1va) &&
@@ -495,14 +507,9 @@
return EXCRET_fault_fixed;
}
- if ( ((addr < HYPERVISOR_VIRT_START)
-#if defined(__x86_64__)
- || (addr >= HYPERVISOR_VIRT_END)
-#endif
- )
- &&
- KERNEL_MODE(v, regs) &&
- ((regs->error_code & 3) == 3) && /* write-protection fault */
+ if ( KERNEL_MODE(v, regs) &&
+ /* Protection violation on write? No reserved-bit violation? */
+ ((regs->error_code & 0xb) == 0x3) &&
ptwr_do_page_fault(d, addr, regs) )
{
UNLOCK_BIGLOCK(d);
@@ -511,43 +518,51 @@
UNLOCK_BIGLOCK(d);
}
- if ( unlikely(shadow_mode_enabled(d)) &&
- ((addr < HYPERVISOR_VIRT_START) ||
-#if defined(__x86_64__)
- (addr >= HYPERVISOR_VIRT_END) ||
-#endif
- (shadow_mode_external(d) && GUEST_CONTEXT(v, regs))) &&
- shadow_fault(addr, regs) )
- return EXCRET_fault_fixed;
-
- if ( unlikely(addr >= PERDOMAIN_VIRT_START) &&
- unlikely(addr < PERDOMAIN_VIRT_END) &&
- handle_perdomain_mapping_fault(addr - PERDOMAIN_VIRT_START, regs) )
- return EXCRET_fault_fixed;
-
- if ( !GUEST_MODE(regs) )
- goto xen_fault;
+ return 0;
+}
+
+/*
+ * #PF error code:
+ * Bit 0: Protection violation (=1) ; Page not present (=0)
+ * Bit 1: Write access
+ * Bit 2: Supervisor mode
+ * Bit 3: Reserved bit violation
+ * Bit 4: Instruction fetch
+ */
+asmlinkage int do_page_fault(struct cpu_user_regs *regs)
+{
+ unsigned long addr, fixup;
+ int rc;
+
+ __asm__ __volatile__ ("mov %%cr2,%0" : "=r" (addr) : );
+
+ DEBUGGER_trap_entry(TRAP_page_fault, regs);
+
+ perfc_incrc(page_faults);
+
+ if ( unlikely((rc = fixup_page_fault(addr, regs)) != 0) )
+ return rc;
+
+ if ( unlikely(!GUEST_MODE(regs)) )
+ {
+ if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
+ {
+ perfc_incrc(copy_user_faults);
+ regs->eip = fixup;
+ return 0;
+ }
+
+ DEBUGGER_trap_fatal(TRAP_page_fault, regs);
+
+ show_registers(regs);
+ show_page_walk(addr);
+ panic("CPU%d FATAL PAGE FAULT\n"
+ "[error_code=%04x]\n"
+ "Faulting linear address: %p\n",
+ smp_processor_id(), regs->error_code, addr);
+ }
propagate_page_fault(addr, regs->error_code);
- return 0;
-
- xen_fault:
-
- if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
- {
- perfc_incrc(copy_user_faults);
- regs->eip = fixup;
- return 0;
- }
-
- DEBUGGER_trap_fatal(TRAP_page_fault, regs);
-
- show_registers(regs);
- show_page_walk(addr);
- panic("CPU%d FATAL PAGE FAULT\n"
- "[error_code=%04x]\n"
- "Faulting linear address: %p\n",
- smp_processor_id(), regs->error_code, addr);
return 0;
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|