# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1201276940 0
# Node ID 76601c290fa9e1c4c1f307e58eea62aee5c26c30
# Parent 923f2f7365079861e0e2341de22839376183208e
x86: First 1MB of memory should be mapped with 4kB mappings to avoid
conflict with fixed-range MTRRs. While there, we now map the VGA hole
as uncacheable.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
xen/arch/x86/boot/head.S | 36 +++++++++++++++++++++++++++++++++++-
xen/arch/x86/mm.c | 21 ++++++++++++++++-----
xen/arch/x86/setup.c | 21 ++++++++++++++++++---
xen/arch/x86/x86_32/mm.c | 6 ++++++
4 files changed, 75 insertions(+), 9 deletions(-)
diff -r 923f2f736507 -r 76601c290fa9 xen/arch/x86/boot/head.S
--- a/xen/arch/x86/boot/head.S Fri Jan 25 13:42:36 2008 +0000
+++ b/xen/arch/x86/boot/head.S Fri Jan 25 16:02:20 2008 +0000
@@ -126,7 +126,7 @@ 1: mov %eax,(%edi)
/* Initialise L3 xen-map page directory entry. */
mov $(sym_phys(l2_xenmap)+7),%eax
mov %eax,sym_phys(l3_xenmap) + (50*8)
- /* Hook indentity-map and xen-map L3 tables into PML4. */
+ /* Hook identity-map and xen-map L3 tables into PML4. */
mov $(sym_phys(l3_identmap)+7),%eax
mov %eax,sym_phys(idle_pg_table) + ( 0*8) /* PML4[ 0]: 1:1 map */
mov %eax,sym_phys(idle_pg_table) + (262*8) /* PML4[262]: 1:1 map */
@@ -162,6 +162,40 @@ 1: stosl /* low mappings cover up
jne 1b
#endif
+ /* Initialize 4kB mappings of first 2MB or 4MB of memory. */
+ mov $sym_phys(l1_identmap),%edi
+ mov $0x263,%eax /* PRESENT+RW+A+D+SMALL_PAGES */
+#if defined(__x86_64__)
+ or $0x100,%eax /* GLOBAL */
+#endif
+ xor %ecx,%ecx
+1: stosl
+#if CONFIG_PAGING_LEVELS >= 3
+ add $4,%edi
+#endif
+ add $PAGE_SIZE,%eax
+ inc %ecx
+ /* VGA hole (0xa0000-0xc0000) should be mapped UC. */
+ cmp $0xa0,%ecx
+ jne 2f
+ or $0x10,%eax /* +PCD */
+2: cmp $0xc0,%ecx
+ jne 2f
+ and $~0x10,%eax /* -PCD */
+2: cmp $L1_PAGETABLE_ENTRIES,%ecx
+ jne 1b
+ sub $(PAGE_SIZE-0x63),%edi
+#if defined(__x86_64__)
+ mov %edi,sym_phys(l2_identmap)
+ mov %edi,sym_phys(l2_xenmap)
+#elif defined(CONFIG_X86_PAE)
+ mov %edi,sym_phys(idle_pg_table_l2)
+ mov %edi,sym_phys(idle_pg_table_l2) + (__PAGE_OFFSET>>18)
+#else
+ mov %edi,sym_phys(idle_pg_table)
+ mov %edi,sym_phys(idle_pg_table) + (__PAGE_OFFSET>>20)
+#endif
+
/* Copy bootstrap trampoline to low memory, below 1MB. */
mov $sym_phys(trampoline_start),%esi
mov $bootsym_phys(trampoline_start),%edi
diff -r 923f2f736507 -r 76601c290fa9 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Fri Jan 25 13:42:36 2008 +0000
+++ b/xen/arch/x86/mm.c Fri Jan 25 16:02:20 2008 +0000
@@ -113,6 +113,15 @@
#include <xsm/xsm.h>
#include <xen/trace.h>
+/*
+ * Mapping of first 2 or 4 megabytes of memory. This is mapped with 4kB
+ * mappings to avoid type conflicts with fixed-range MTRRs covering the
+ * lowest megabyte of physical memory. In any case the VGA hole should be
+ * mapped with type UC.
+ */
+l1_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
+ l1_identmap[L1_PAGETABLE_ENTRIES];
+
#define MEM_LOG(_f, _a...) gdprintk(XENLOG_WARNING , _f "\n" , ## _a)
/*
@@ -3912,16 +3921,18 @@ void __set_fixmap(
void memguard_init(void)
{
+ unsigned long start = max_t(unsigned long, xen_phys_start, 1UL << 20);
map_pages_to_xen(
- (unsigned long)__va(xen_phys_start),
- xen_phys_start >> PAGE_SHIFT,
- (xenheap_phys_end - xen_phys_start) >> PAGE_SHIFT,
+ (unsigned long)__va(start),
+ start >> PAGE_SHIFT,
+ (xenheap_phys_end - start) >> PAGE_SHIFT,
__PAGE_HYPERVISOR|MAP_SMALL_PAGES);
#ifdef __x86_64__
+ BUG_ON(start != xen_phys_start);
map_pages_to_xen(
XEN_VIRT_START,
- xen_phys_start >> PAGE_SHIFT,
- (__pa(&_end) + PAGE_SIZE - 1 - xen_phys_start) >> PAGE_SHIFT,
+ start >> PAGE_SHIFT,
+ (__pa(&_end) + PAGE_SIZE - 1 - start) >> PAGE_SHIFT,
__PAGE_HYPERVISOR|MAP_SMALL_PAGES);
#endif
}
diff -r 923f2f736507 -r 76601c290fa9 xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c Fri Jan 25 13:42:36 2008 +0000
+++ b/xen/arch/x86/setup.c Fri Jan 25 16:02:20 2008 +0000
@@ -258,8 +258,10 @@ static void __init bootstrap_map(unsigne
static void __init bootstrap_map(unsigned long start, unsigned long end)
{
unsigned long mask = (1UL << L2_PAGETABLE_SHIFT) - 1;
- start = start & ~mask;
+ start = max_t(unsigned long, start & ~mask, 16UL << 20);
end = (end + mask) & ~mask;
+ if ( start >= end )
+ return;
if ( end > BOOTSTRAP_DIRECTMAP_END )
panic("Cannot access memory beyond end of "
"bootstrap direct-map area\n");
@@ -642,7 +644,7 @@ void __init __start_xen(unsigned long mb
l4_pgentry_t *pl4e;
l3_pgentry_t *pl3e;
l2_pgentry_t *pl2e;
- int i, j;
+ int i, j, k;
/* Select relocation address. */
e = (e - (opt_xenheap_megabytes << 20)) & ~mask;
@@ -678,12 +680,25 @@ void __init __start_xen(unsigned long mb
continue;
*pl3e = l3e_from_intpte(l3e_get_intpte(*pl3e) +
xen_phys_start);
+ pl2e = l3e_to_l2e(*pl3e);
+ for ( k = 0; k < L2_PAGETABLE_ENTRIES; k++, pl2e++ )
+ {
+ /* Not present, PSE, or already relocated? */
+ if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) ||
+ (l2e_get_flags(*pl2e) & _PAGE_PSE) ||
+ (l2e_get_pfn(*pl2e) > 0x1000) )
+ continue;
+ *pl2e = l2e_from_intpte(l2e_get_intpte(*pl2e) +
+ xen_phys_start);
+ }
}
}
/* The only data mappings to be relocated are in the Xen area. */
pl2e = __va(__pa(l2_xenmap));
- for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++, pl2e++ )
+ *pl2e++ = l2e_from_pfn(xen_phys_start >> PAGE_SHIFT,
+ PAGE_HYPERVISOR | _PAGE_PSE);
+ for ( i = 1; i < L2_PAGETABLE_ENTRIES; i++, pl2e++ )
{
if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
continue;
diff -r 923f2f736507 -r 76601c290fa9 xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c Fri Jan 25 13:42:36 2008 +0000
+++ b/xen/arch/x86/x86_32/mm.c Fri Jan 25 16:02:20 2008 +0000
@@ -38,6 +38,8 @@ l2_pgentry_t __attribute__ ((__section__
idle_pg_table_l2[L2_PAGETABLE_ENTRIES];
#endif
+extern l1_pgentry_t l1_identmap[L1_PAGETABLE_ENTRIES];
+
unsigned int PAGE_HYPERVISOR = __PAGE_HYPERVISOR;
unsigned int PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE;
@@ -90,6 +92,8 @@ void __init paging_init(void)
(_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT) )
l2e_add_flags(idle_pg_table_l2[l2_linear_offset(v)],
_PAGE_GLOBAL);
+ for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
+ l1e_add_flags(l1_identmap[i], _PAGE_GLOBAL);
}
/*
@@ -150,6 +154,8 @@ void __init zap_low_mappings(l2_pgentry_
l2e_write(&dom0_l2[i], l2e_empty());
/* Now zap mappings in the idle pagetables. */
+ BUG_ON(l2e_get_pfn(idle_pg_table_l2[0]) != virt_to_mfn(l1_identmap));
+ l2e_write_atomic(&idle_pg_table_l2[0], l2e_empty());
destroy_xen_mappings(0, HYPERVISOR_VIRT_START);
flush_all(FLUSH_TLB_GLOBAL);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|