ChangeSet 1.1498, 2005/05/21 10:51:43+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx
Fix mapping of MPT for x86/64 guests. Remove a few more uses of
l?e_create_phys().
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
domain.c | 4 ++--
mm.c | 14 ++++++++------
x86_32/mm.c | 20 +++++++++-----------
x86_64/mm.c | 50 ++++++++++++++++++++++----------------------------
4 files changed, 41 insertions(+), 47 deletions(-)
diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c 2005-05-22 13:03:01 -04:00
+++ b/xen/arch/x86/domain.c 2005-05-22 13:03:01 -04:00
@@ -275,12 +275,12 @@
d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] =
- l2e_create_phys(__pa(d->arch.mm_perdomain_pt),
+ l2e_create_page(virt_to_page(d->arch.mm_perdomain_pt),
__PAGE_HYPERVISOR);
d->arch.mm_perdomain_l3 = (l3_pgentry_t *)alloc_xenheap_page();
memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
- l3e_create_phys(__pa(d->arch.mm_perdomain_l2),
+ l3e_create_page(virt_to_page(d->arch.mm_perdomain_l2),
__PAGE_HYPERVISOR);
#endif
diff -Nru a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c 2005-05-22 13:03:01 -04:00
+++ b/xen/arch/x86/mm.c 2005-05-22 13:03:01 -04:00
@@ -700,8 +700,9 @@
pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
l2e_create_pfn(pfn, __PAGE_HYPERVISOR);
pl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
- l2e_create_phys(__pa(page_get_owner(page)->arch.mm_perdomain_pt),
- __PAGE_HYPERVISOR);
+ l2e_create_page(
+ virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt),
+ __PAGE_HYPERVISOR);
#endif
unmap_domain_mem(pl2e);
@@ -770,8 +771,9 @@
pl4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
l4e_create_pfn(pfn, __PAGE_HYPERVISOR);
pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_create_phys(__pa(page_get_owner(page)->arch.mm_perdomain_l3),
- __PAGE_HYPERVISOR);
+ l4e_create_page(
+ virt_to_page(page_get_owner(page)->arch.mm_perdomain_l3),
+ __PAGE_HYPERVISOR);
return 1;
@@ -2880,7 +2882,7 @@
{
pl1e = page_to_virt(alloc_xen_pagetable());
clear_page(pl1e);
- *pl2e = l2e_create_phys(__pa(pl1e), __PAGE_HYPERVISOR);
+ *pl2e = l2e_create_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
}
else if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
{
@@ -2889,7 +2891,7 @@
pl1e[i] = l1e_create_pfn(
l2e_get_pfn(*pl2e) + i,
l2e_get_flags(*pl2e) & ~_PAGE_PSE);
- *pl2e = l2e_create_phys(__pa(pl1e), __PAGE_HYPERVISOR);
+ *pl2e = l2e_create_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
local_flush_tlb_pge();
}
diff -Nru a/xen/arch/x86/x86_32/mm.c b/xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c 2005-05-22 13:03:01 -04:00
+++ b/xen/arch/x86/x86_32/mm.c 2005-05-22 13:03:01 -04:00
@@ -58,13 +58,13 @@
{
void *ioremap_pt;
unsigned long v;
- struct pfn_info *pg;
+ struct pfn_info *m2p_pg;
/* Allocate and map the machine-to-phys table. */
- if ( (pg = alloc_domheap_pages(NULL, 10)) == NULL )
+ if ( (m2p_pg = alloc_domheap_pages(NULL, 10)) == NULL )
panic("Not enough memory to bootstrap Xen.\n");
idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)] =
- l2e_create_phys(page_to_phys(pg), __PAGE_HYPERVISOR | _PAGE_PSE);
+ l2e_create_page(m2p_pg, __PAGE_HYPERVISOR | _PAGE_PSE);
memset((void *)RDWR_MPT_VIRT_START, 0x55, 4UL << 20);
/* Xen 4MB mappings can all be GLOBAL. */
@@ -82,27 +82,25 @@
ioremap_pt = (void *)alloc_xenheap_page();
clear_page(ioremap_pt);
idle_pg_table[l2_table_offset(IOREMAP_VIRT_START)] =
- l2e_create_phys(__pa(ioremap_pt), __PAGE_HYPERVISOR);
+ l2e_create_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR);
- /* Create read-only mapping of MPT for guest-OS use.
+ /*
+ * Create read-only mapping of MPT for guest-OS use.
* NB. Remove the global bit so that shadow_mode_translate()==true domains
* can reused this address space for their phys-to-machine mapping.
*/
idle_pg_table[l2_table_offset(RO_MPT_VIRT_START)] =
- l2e_create_pfn(
- l2e_get_pfn(idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)]),
- l2e_get_flags(idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)])
- & ~(_PAGE_RW | _PAGE_GLOBAL));
+ l2e_create_page(m2p_pg, (__PAGE_HYPERVISOR | _PAGE_PSE) & ~_PAGE_RW);
/* Set up mapping cache for domain pages. */
mapcache = (l1_pgentry_t *)alloc_xenheap_page();
clear_page(mapcache);
idle_pg_table[l2_table_offset(MAPCACHE_VIRT_START)] =
- l2e_create_phys(__pa(mapcache), __PAGE_HYPERVISOR);
+ l2e_create_page(virt_to_page(mapcache), __PAGE_HYPERVISOR);
/* Set up linear page table mapping. */
idle_pg_table[l2_table_offset(LINEAR_PT_VIRT_START)] =
- l2e_create_phys(__pa(idle_pg_table), __PAGE_HYPERVISOR);
+ l2e_create_page(virt_to_page(idle_pg_table), __PAGE_HYPERVISOR);
}
void __init zap_low_mappings(void)
diff -Nru a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c 2005-05-22 13:03:01 -04:00
+++ b/xen/arch/x86/x86_64/mm.c 2005-05-22 13:03:01 -04:00
@@ -75,48 +75,42 @@
void __init paging_init(void)
{
unsigned long i;
- l3_pgentry_t *l3rw, *l3ro;
+ l3_pgentry_t *l3_ro_mpt;
+ l2_pgentry_t *l2_ro_mpt;
struct pfn_info *pg;
+ /* Create user-accessible L2 directory to map the MPT for guests. */
+ l3_ro_mpt = (l3_pgentry_t *)alloc_xenheap_page();
+ clear_page(l3_ro_mpt);
+ idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
+ l4e_create_page(
+ virt_to_page(l3_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
+ l2_ro_mpt = (l2_pgentry_t *)alloc_xenheap_page();
+ clear_page(l2_ro_mpt);
+ l3_ro_mpt[l3_table_offset(RO_MPT_VIRT_START)] =
+ l3e_create_page(
+ virt_to_page(l2_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
+ l2_ro_mpt += l2_table_offset(RO_MPT_VIRT_START);
+
/*
* Allocate and map the machine-to-phys table.
- * This also ensures L3 is present for ioremap().
+ * This also ensures L3 is present for fixmaps.
*/
for ( i = 0; i < max_page; i += ((1UL << L2_PAGETABLE_SHIFT) / 8) )
{
- pg = alloc_domheap_pages(
- NULL, L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT);
+ pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER);
if ( pg == NULL )
panic("Not enough memory for m2p table\n");
map_pages_to_xen(
RDWR_MPT_VIRT_START + i*8, page_to_pfn(pg),
- 1UL << (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT),
- PAGE_HYPERVISOR | _PAGE_USER);
+ 1UL << PAGETABLE_ORDER,
+ PAGE_HYPERVISOR);
memset((void *)(RDWR_MPT_VIRT_START + i*8), 0x55,
1UL << L2_PAGETABLE_SHIFT);
+ *l2_ro_mpt++ = l2e_create_page(
+ pg, _PAGE_GLOBAL|_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT);
+ BUG_ON(((unsigned long)l2_ro_mpt & ~PAGE_MASK) == 0);
}
-
- /*
- * Above we mapped the M2P table as user-accessible and read-writable.
- * Fix security by denying user access at the top level of the page table.
- */
- l4e_remove_flags(&idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)],
- _PAGE_USER);
-
- /* Create read-only mapping of MPT for guest-OS use. */
- l3ro = (l3_pgentry_t *)alloc_xenheap_page();
- clear_page(l3ro);
- idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
- l4e_create_phys(__pa(l3ro),
- (__PAGE_HYPERVISOR | _PAGE_USER) & ~_PAGE_RW);
-
- /* Copy the L3 mappings from the RDWR_MPT area. */
- l3rw = l4e_to_l3e(
- idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)]);
- l3rw += l3_table_offset(RDWR_MPT_VIRT_START);
- l3ro += l3_table_offset(RO_MPT_VIRT_START);
- memcpy(l3ro, l3rw,
- (RDWR_MPT_VIRT_END - RDWR_MPT_VIRT_START) >> L3_PAGETABLE_SHIFT);
/* Set up linear page table mapping. */
idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] =
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|