# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 69388eba4c037a2c4fe2965e3d8b52b117c8028d
# Parent 02899109a3ace551b365fb9a1d7bf6d614e2a03a
[XEN] x86-64: Since all memory is visible to Xen on x86-64, there is
no need to allocate from the special Xen heap for allocations specific
to this subarch.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---
xen/arch/x86/domain.c | 23 ++++++++++++-----------
xen/arch/x86/x86_64/mm.c | 36 ++++++++++++++++++++----------------
2 files changed, 32 insertions(+), 27 deletions(-)
diff -r 02899109a3ac -r 69388eba4c03 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Mon Nov 13 13:40:21 2006 +0000
+++ b/xen/arch/x86/domain.c Mon Nov 13 13:50:14 2006 +0000
@@ -166,6 +166,9 @@ void vcpu_destroy(struct vcpu *v)
int arch_domain_create(struct domain *d)
{
+#ifdef __x86_64__
+ struct page_info *pg;
+#endif
l1_pgentry_t gdt_l1e;
int vcpuid, pdpt_order;
int i, rc = -ENOMEM;
@@ -194,19 +197,17 @@ int arch_domain_create(struct domain *d)
#else /* __x86_64__ */
- d->arch.mm_perdomain_l2 = alloc_xenheap_page();
- d->arch.mm_perdomain_l3 = alloc_xenheap_page();
- if ( (d->arch.mm_perdomain_l2 == NULL) ||
- (d->arch.mm_perdomain_l3 == NULL) )
+ if ( (pg = alloc_domheap_page(NULL)) == NULL )
goto fail;
-
- memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
+ d->arch.mm_perdomain_l2 = clear_page(page_to_virt(pg));
for ( i = 0; i < (1 << pdpt_order); i++ )
d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)+i] =
l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i,
__PAGE_HYPERVISOR);
- memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
+ if ( (pg = alloc_domheap_page(NULL)) == NULL )
+ goto fail;
+ d->arch.mm_perdomain_l3 = clear_page(page_to_virt(pg));
d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =
l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2),
__PAGE_HYPERVISOR);
@@ -240,8 +241,8 @@ int arch_domain_create(struct domain *d)
fail:
free_xenheap_page(d->shared_info);
#ifdef __x86_64__
- free_xenheap_page(d->arch.mm_perdomain_l2);
- free_xenheap_page(d->arch.mm_perdomain_l3);
+ free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
+ free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
#endif
free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order);
return rc;
@@ -265,8 +266,8 @@ void arch_domain_destroy(struct domain *
get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t)));
#ifdef __x86_64__
- free_xenheap_page(d->arch.mm_perdomain_l2);
- free_xenheap_page(d->arch.mm_perdomain_l3);
+ free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
+ free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
#endif
free_xenheap_page(d->shared_info);
diff -r 02899109a3ac -r 69388eba4c03 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c Mon Nov 13 13:40:21 2006 +0000
+++ b/xen/arch/x86/x86_64/mm.c Mon Nov 13 13:50:14 2006 +0000
@@ -76,17 +76,17 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l
void __init paging_init(void)
{
- unsigned long i, mpt_size;
+ unsigned long i, mpt_size, va;
l3_pgentry_t *l3_ro_mpt;
l2_pgentry_t *l2_ro_mpt = NULL;
- struct page_info *pg;
+ struct page_info *l1_pg, *l2_pg;
/* Create user-accessible L2 directory to map the MPT for guests. */
- l3_ro_mpt = alloc_xenheap_page();
- clear_page(l3_ro_mpt);
+ if ( (l2_pg = alloc_domheap_page(NULL)) == NULL )
+ goto nomem;
+ l3_ro_mpt = clear_page(page_to_virt(l2_pg));
idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
- l4e_from_page(
- virt_to_page(l3_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
+ l4e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER);
/*
* Allocate and map the machine-to-phys table.
@@ -96,33 +96,37 @@ void __init paging_init(void)
mpt_size &= ~((1UL << L2_PAGETABLE_SHIFT) - 1UL);
for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
{
- if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL )
- panic("Not enough memory for m2p table\n");
+ if ( (l1_pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL )
+ goto nomem;
map_pages_to_xen(
- RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), page_to_mfn(pg),
+ RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT),
+ page_to_mfn(l1_pg),
1UL << PAGETABLE_ORDER,
PAGE_HYPERVISOR);
memset((void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)), 0x55,
1UL << L2_PAGETABLE_SHIFT);
if ( !((unsigned long)l2_ro_mpt & ~PAGE_MASK) )
{
- unsigned long va = RO_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT);
-
- l2_ro_mpt = alloc_xenheap_page();
- clear_page(l2_ro_mpt);
+ if ( (l2_pg = alloc_domheap_page(NULL)) == NULL )
+ goto nomem;
+ va = RO_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT);
+ l2_ro_mpt = clear_page(page_to_virt(l2_pg));
l3_ro_mpt[l3_table_offset(va)] =
- l3e_from_page(
- virt_to_page(l2_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
+ l3e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER);
l2_ro_mpt += l2_table_offset(va);
}
/* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */
*l2_ro_mpt++ = l2e_from_page(
- pg, /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT);
+ l1_pg, /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT);
}
/* Set up linear page table mapping. */
idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] =
l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR);
+ return;
+
+ nomem:
+ panic("Not enough memory for m2p table\n");
}
void __init setup_idle_pagetable(void)
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|