3 files changed, 73 insertions(+), 40 deletions(-)
xen/arch/powerpc/domain_build.c | 4 +
xen/arch/powerpc/mm.c | 101 +++++++++++++++++++++++------------
xen/arch/powerpc/ofd_fixup_memory.c | 8 --
# HG changeset patch
# User Ryan Harper <ryanh@xxxxxxxxxx>
# Date 1172103252 21600
# Node ID 33f05ec503bfabccd119f06b30037b618f8d05b9
# Parent 35fd77200dff7e73fe3959b5dbfa6088c691c502
[PATCH] xen: implement guest_physmap_{add/remove}_page for ppc
Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx>
diff -r 35fd77200dff -r 33f05ec503bf xen/arch/powerpc/domain_build.c
--- a/xen/arch/powerpc/domain_build.c Wed Feb 21 18:14:12 2007 -0600
+++ b/xen/arch/powerpc/domain_build.c Wed Feb 21 18:14:12 2007 -0600
@@ -178,10 +178,12 @@ int construct_dom0(struct domain *d,
rma_sz = rma_size(d->arch.rma_order);
rma = page_to_maddr(d->arch.rma_page);
- /* make sure we are at least as big as the RMA */
+ /* if requested dom0 RAM amount is more than the RMA, then alloc
+ the rest in cpu-defined extent sized chunks */
if (dom0_nrpages > rma_nrpages)
dom0_nrpages = allocate_extents(d, dom0_nrpages, rma_nrpages);
+ /* make sure we are at least as big as the RMA */
ASSERT(d->tot_pages == dom0_nrpages);
ASSERT(d->tot_pages >= rma_nrpages);
diff -r 35fd77200dff -r 33f05ec503bf xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c Wed Feb 21 18:14:12 2007 -0600
+++ b/xen/arch/powerpc/mm.c Wed Feb 21 18:14:12 2007 -0600
@@ -319,11 +319,15 @@ void free_extents(struct domain *d)
}
}
+/* allocate rma_nrpages - nrpages more memory for domain in proper size */
uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages)
{
uint ext_order;
uint ext_nrpages;
uint total_nrpages;
+ ulong mfn;
+ ulong gpfn = rma_nrpages; /* starting pfn at end of RMA */
+ int i;
struct page_info *pg;
ext_order = cpu_extent_order();
@@ -338,10 +342,13 @@ uint allocate_extents(struct domain *d,
if (pg == NULL)
return total_nrpages;
- if (add_extent(d, pg, ext_order) < 0) {
- free_domheap_pages(pg, ext_order);
- return total_nrpages;
- }
+ /* build p2m mapping for newly allocated extent */
+ mfn = page_to_mfn(pg);
+ for ( i = 0; i < (1 << ext_order); i++ )
+ guest_physmap_add_page(d, gpfn + i, mfn + i);
+
+ /* bump starting pfn by extent size pages */
+ gpfn += ext_nrpages;
total_nrpages += ext_nrpages;
}
@@ -353,6 +360,7 @@ int allocate_rma(struct domain *d, unsig
struct vcpu *v;
ulong rma_base;
ulong rma_sz;
+ ulong mfn = INVALID_MFN;
int i;
if (d->arch.rma_page)
@@ -374,10 +382,15 @@ int allocate_rma(struct domain *d, unsig
printk("allocated RMA for Dom[%d]: 0x%lx[0x%lx]\n",
d->domain_id, rma_base, rma_sz);
+ mfn = page_to_mfn(d->arch.rma_page);
+
for (i = 0; i < (1 << d->arch.rma_order); i++ ) {
/* Add in any extra CPUs that need flushing because of this page. */
d->arch.rma_page[i].count_info |= PGC_page_RMA;
clear_page((void *)page_to_maddr(&d->arch.rma_page[i]));
+
+ /* setup p2m mapping for RMA */
+ guest_physmap_add_page(d, i, mfn+i);
}
d->shared_info = (shared_info_t *)
@@ -403,7 +416,6 @@ ulong pfn2mfn(struct domain *d, ulong pf
{
ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
ulong rma_size_mfn = 1UL << d->arch.rma_order;
- struct page_extents *pe;
ulong mfn = INVALID_MFN;
int t = PFN_TYPE_NONE;
ulong foreign_map_pfn = 1UL << cpu_foreign_map_order();
@@ -424,18 +436,9 @@ ulong pfn2mfn(struct domain *d, ulong pf
t = PFN_TYPE_RMA;
mfn = pfn + rma_base_mfn;
} else {
- ulong cur_pfn = rma_size_mfn;
-
- list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
- uint pe_pages = 1UL << pe->order;
- uint end_pfn = cur_pfn + pe_pages;
-
- if (pfn >= cur_pfn && pfn < end_pfn) {
- t = PFN_TYPE_LOGICAL;
- mfn = page_to_mfn(pe->pg) + (pfn - cur_pfn);
- break;
- }
- cur_pfn += pe_pages;
+ if ( pfn < d->max_pages ) {
+ t = PFN_TYPE_LOGICAL;
+ mfn = d->arch.p2m[pfn];
}
}
#ifdef DEBUG
@@ -483,12 +486,13 @@ ulong pfn2mfn(struct domain *d, ulong pf
return mfn;
}
+/* mfn_to_pfn */
unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn)
{
- struct page_extents *pe;
ulong cur_pfn;
ulong gnttab_mfn;
ulong rma_mfn;
+ uint ext_nrpages = (1 << cpu_extent_order());
/* grant? */
gnttab_mfn = gnttab_shared_mfn(d, d->grant_table, 0);
@@ -504,17 +508,15 @@ unsigned long mfn_to_gmfn(struct domain
mfn < (rma_mfn + (1 << d->arch.rma_order)))
return mfn - rma_mfn;
- /* Extent? */
- cur_pfn = 1UL << d->arch.rma_order;
- list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
- uint pe_pages = 1UL << pe->order;
- uint b_mfn = page_to_mfn(pe->pg);
- uint e_mfn = b_mfn + pe_pages;
-
- if (mfn >= b_mfn && mfn < e_mfn) {
+ /* check extents (cpu-defined contiguous chunks after RMA) */
+ cur_pfn = 1UL << d->arch.rma_order; /* start looking after RMA */
+ for ( ; cur_pfn < d->max_pages; cur_pfn += ext_nrpages )
+ {
+ uint b_mfn = d->arch.p2m[cur_pfn];
+ uint e_mfn = b_mfn + ext_nrpages;
+
+ if (mfn >= b_mfn && mfn < e_mfn)
return cur_pfn + (mfn - b_mfn);
- }
- cur_pfn += pe_pages;
}
return INVALID_M2P_ENTRY;
}
@@ -522,13 +524,48 @@ void guest_physmap_add_page(
void guest_physmap_add_page(
struct domain *d, unsigned long gpfn, unsigned long mfn)
{
- printk("%s(%d, 0x%lx, 0x%lx)\n", __func__, d->domain_id, gpfn, mfn);
-}
+ if ( page_get_owner(mfn_to_page(mfn)) != d )
+ {
+ printk("Won't map foriegn mfn 0x%lx for DOM%d\n", mfn, d->domain_id);
+ return;
+ }
+
+ /* check that pfn is within guest table */
+ if ( gpfn >= d->max_pages ) {
+ printk("Won't map invalid pfn 0x%lx for DOM%d\n", gpfn, d->domain_id);
+ return;
+ }
+
+ /* warn if there is an existing mapping */
+ /* XXX: probably shouldn't let this happen, but
+ current interface doesn't throw errors. =( */
+ if ( d->arch.p2m[gpfn] != INVALID_MFN )
+ printk("Ack! PFN aliased. pfn%lx, old mfn=%lx, new mfn=%lx\n",
+ gpfn, d->arch.p2m[gpfn], mfn);
+
+ /* pfn and mfn ok, map it */
+ d->arch.p2m[gpfn] = mfn;
+}
+
void guest_physmap_remove_page(
struct domain *d, unsigned long gpfn, unsigned long mfn)
{
- panic("%s\n", __func__);
-}
+ if ( page_get_owner(mfn_to_page(mfn)) != d )
+ {
+ printk("Won't unmap foriegn mfn 0x%lx for DOM%d\n", mfn, d->domain_id);
+ return;
+ }
+
+ /* check that pfn is within guest table */
+ if ( gpfn >= d->max_pages ) {
+ printk("Won't unmap invalid pfn 0x%lx for DOM%d\n", gpfn,
d->domain_id);
+ return;
+ }
+
+ /* pfn and mfn ok, unmap it */
+ d->arch.p2m[gpfn] = INVALID_MFN;
+}
+
void shadow_drop_references(
struct domain *d, struct page_info *page)
{
diff -r 35fd77200dff -r 33f05ec503bf xen/arch/powerpc/ofd_fixup_memory.c
--- a/xen/arch/powerpc/ofd_fixup_memory.c Wed Feb 21 18:14:12 2007 -0600
+++ b/xen/arch/powerpc/ofd_fixup_memory.c Wed Feb 21 18:14:12 2007 -0600
@@ -87,17 +87,11 @@ static void ofd_memory_extent_nodes(void
ulong start;
ulong size;
ofdn_t n;
- struct page_extents *pe;
ulong cur_pfn = 1UL << d->arch.rma_order;
start = cur_pfn << PAGE_SHIFT;
- size = 0;
- list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
+ size = (d->tot_pages - cur_pfn) << PAGE_SHIFT;
- size += 1UL << (pe->order + PAGE_SHIFT);
- if (pe->order != cpu_extent_order())
- panic("we don't handle this yet\n");
- }
n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory,
start, size);
BUG_ON(n <= 0);
_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel
|