x86-64: construct static, uniform parts of page tables at build time ... rather than at boot time, removing unnecessary redundancy between EFI and legacy boot code. Signed-off-by: Jan Beulich --- a/xen/arch/x86/boot/head.S +++ b/xen/arch/x86/boot/head.S @@ -123,46 +123,19 @@ __start: /* Check for availability of long mode. */ bt $29,%edx jnc bad_cpu - /* Initialise L2 identity-map and xen page table entries (16MB). */ - mov $sym_phys(l2_xenmap),%esi + /* Initialise L2 boot-map page table entries (16MB). */ mov $sym_phys(l2_bootmap),%edx - mov $0x1e3,%eax /* PRESENT+RW+A+D+2MB+GLOBAL */ + mov $PAGE_HYPERVISOR|_PAGE_PSE,%eax mov $8,%ecx -1: mov %eax,(%esi) - add $8,%esi - mov %eax,(%edx) +1: mov %eax,(%edx) add $8,%edx add $(1<= __page_tables_start && \ + (intpte_t *)(v) < __page_tables_end) + #define PE_BASE_RELOC_ABS 0 #define PE_BASE_RELOC_HIGHLOW 3 #define PE_BASE_RELOC_DIR64 10 @@ -604,11 +608,19 @@ static void __init relocate_image(unsign break; case PE_BASE_RELOC_HIGHLOW: if ( delta ) + { *(u32 *)addr += delta; + if ( in_page_tables(addr) ) + *(u32 *)addr += xen_phys_start; + } break; case PE_BASE_RELOC_DIR64: if ( delta ) + { *(u64 *)addr += delta; + if ( in_page_tables(addr) ) + *(intpte_t *)addr += xen_phys_start; + } break; default: blexit(L"Unsupported relocation type\r\n"); @@ -1113,43 +1125,21 @@ efi_start(EFI_HANDLE ImageHandle, EFI_SY *(u16 *)(*trampoline_ptr + (long)trampoline_ptr) = trampoline_phys >> 4; - /* Initialise L2 identity-map and xen page table entries (16MB). */ + /* Initialise L2 identity-map and boot-map page table entries (16MB). */ for ( i = 0; i < 8; ++i ) { unsigned int slot = (xen_phys_start >> L2_PAGETABLE_SHIFT) + i; paddr_t addr = slot << L2_PAGETABLE_SHIFT; l2_identmap[slot] = l2e_from_paddr(addr, PAGE_HYPERVISOR|_PAGE_PSE); - l2_xenmap[i] = l2e_from_paddr(addr, PAGE_HYPERVISOR|_PAGE_PSE); slot &= L2_PAGETABLE_ENTRIES - 1; l2_bootmap[slot] = l2e_from_paddr(addr, __PAGE_HYPERVISOR|_PAGE_PSE); } - /* Initialise L2 fixmap page directory entry. */ - l2_fixmap[l2_table_offset(FIXADDR_TOP - 1)] = - l2e_from_paddr((UINTN)l1_fixmap, __PAGE_HYPERVISOR); - /* Initialise L3 identity-map page directory entries. */ - for ( i = 0; i < ARRAY_SIZE(l2_identmap) / L2_PAGETABLE_ENTRIES; ++i ) - l3_identmap[i] = l3e_from_paddr((UINTN)(l2_identmap + - i * L2_PAGETABLE_ENTRIES), - __PAGE_HYPERVISOR); - /* Initialise L3 xen-map and fixmap page directory entries. */ - l3_xenmap[l3_table_offset(XEN_VIRT_START)] = - l3e_from_paddr((UINTN)l2_xenmap, __PAGE_HYPERVISOR); - l3_xenmap[l3_table_offset(FIXADDR_TOP - 1)] = - l3e_from_paddr((UINTN)l2_fixmap, __PAGE_HYPERVISOR); /* Initialise L3 boot-map page directory entries. */ l3_bootmap[l3_table_offset(xen_phys_start)] = l3e_from_paddr((UINTN)l2_bootmap, __PAGE_HYPERVISOR); l3_bootmap[l3_table_offset(xen_phys_start + (8 << L2_PAGETABLE_SHIFT) - 1)] = l3e_from_paddr((UINTN)l2_bootmap, __PAGE_HYPERVISOR); - /* Hook identity-map, xen-map, and boot-map L3 tables into PML4. */ - idle_pg_table[0] = l4e_from_paddr((UINTN)l3_bootmap, __PAGE_HYPERVISOR); - idle_pg_table[l4_table_offset(DIRECTMAP_VIRT_START)] = - l4e_from_paddr((UINTN)l3_identmap, __PAGE_HYPERVISOR); - idle_pg_table[l4_table_offset(XEN_VIRT_START)] = - l4e_from_paddr((UINTN)l3_xenmap, __PAGE_HYPERVISOR); - /* Hook 4kB mappings of first 2MB of memory into L2. */ - l2_identmap[0] = l2e_from_paddr((UINTN)l1_identmap, __PAGE_HYPERVISOR); if ( gop ) { --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -49,24 +49,6 @@ unsigned int __read_mostly pfn_pdx_hole_ unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START; -/* Top-level master (and idle-domain) page directory. */ -l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) - idle_pg_table[L4_PAGETABLE_ENTRIES]; - -/* Enough page directories to map bottom 4GB of the memory map. */ -l3_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) - l3_identmap[L3_PAGETABLE_ENTRIES]; - -/* Enough page directories to map the Xen text and static data. */ -l3_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) - l3_xenmap[L3_PAGETABLE_ENTRIES]; -l2_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) - l2_xenmap[L2_PAGETABLE_ENTRIES]; - -/* Enough page directories to map the early fixmap space. */ -l2_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) - l2_fixmap[L2_PAGETABLE_ENTRIES]; - /* Enough page directories to map into the bottom 1GB. */ l3_pgentry_t __attribute__ ((__section__ (".bss.page_aligned"))) l3_bootmap[L3_PAGETABLE_ENTRIES]; --- a/xen/arch/x86/xen.lds.S +++ b/xen/arch/x86/xen.lds.S @@ -42,6 +42,10 @@ PHDRS } SECTIONS { +#if defined(__x86_64__) && !defined(EFI) + . = __XEN_VIRT_START; + __image_base__ = .; +#endif . = __XEN_VIRT_START + 0x100000; _start = .; .text : { --- a/xen/include/asm-x86/page.h +++ b/xen/include/asm-x86/page.h @@ -304,11 +304,8 @@ extern l2_pgentry_t idle_pg_table_l2[ extern l2_pgentry_t *compat_idle_pg_table_l2; extern unsigned int m2p_compat_vstart; extern l2_pgentry_t l2_xenmap[L2_PAGETABLE_ENTRIES], - l2_fixmap[L2_PAGETABLE_ENTRIES], l2_bootmap[L2_PAGETABLE_ENTRIES]; -extern l3_pgentry_t l3_xenmap[L3_PAGETABLE_ENTRIES], - l3_identmap[L3_PAGETABLE_ENTRIES], - l3_bootmap[L3_PAGETABLE_ENTRIES]; +extern l3_pgentry_t l3_bootmap[L3_PAGETABLE_ENTRIES]; #endif extern l2_pgentry_t l2_identmap[4*L2_PAGETABLE_ENTRIES]; extern l1_pgentry_t l1_identmap[L1_PAGETABLE_ENTRIES],