# HG changeset patch
# User Hollis Blanchard <hollisb@xxxxxxxxxx>
# Node ID 5fe200a45698143f3614cad2b206d4f285a9d31c
# Parent e58d85332e0cc2a4696eb14a6adbc9086d427573
[XEN][POWERPC] split out an allocate_rma() function from arch_domain_create()
Signed-off-by: Hollis Blanchard <hollisb@xxxxxxxxxx>
---
xen/arch/powerpc/domain.c | 34 ++++++++++++++--------------------
xen/arch/powerpc/mm.c | 23 +++++++++++++++++++++++
xen/include/asm-powerpc/mm.h | 2 ++
3 files changed, 39 insertions(+), 20 deletions(-)
diff -r e58d85332e0c -r 5fe200a45698 xen/arch/powerpc/domain.c
--- a/xen/arch/powerpc/domain.c Fri Aug 25 14:48:07 2006 -0500
+++ b/xen/arch/powerpc/domain.c Fri Aug 25 15:09:36 2006 -0500
@@ -76,8 +76,9 @@ int arch_domain_create(struct domain *d)
{
unsigned long rma_base;
unsigned long rma_sz;
- uint htab_order;
- uint nr_pages;
+ uint rma_order_pages;
+ uint htab_order_pages;
+ int rc;
if (d->domain_id == IDLE_DOMAIN_ID) {
d->shared_info = (void *)alloc_xenheap_page();
@@ -86,23 +87,16 @@ int arch_domain_create(struct domain *d)
return 0;
}
- d->arch.rma_order = cpu_default_rma_order_pages();
- rma_sz = rma_size(d->arch.rma_order);
-
/* allocate the real mode area */
- nr_pages = 1UL << d->arch.rma_order;
- d->max_pages = nr_pages;
+ rma_order_pages = cpu_default_rma_order_pages();
+ d->max_pages = 1UL << rma_order_pages;
d->tot_pages = 0;
- d->arch.rma_page = alloc_domheap_pages(d, d->arch.rma_order, 0);
- if (NULL == d->arch.rma_page)
- return 1;
-
+
+ rc = allocate_rma(d, rma_order_pages);
+ if (rc)
+ return rc;
rma_base = page_to_maddr(d->arch.rma_page);
-
- BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
-
- printk("clearing RMO: 0x%lx[0x%lx]\n", rma_base, rma_sz);
- memset((void *)rma_base, 0, rma_sz);
+ rma_sz = rma_size(rma_order_pages);
d->shared_info = (shared_info_t *)
(rma_addr(&d->arch, RMA_SHARED_INFO) + rma_base);
@@ -113,12 +107,12 @@ int arch_domain_create(struct domain *d)
/* FIXME: we need to the the maximum addressible memory for this
* domain to calculate this correctly. It should probably be set
* by the managment tools */
- htab_order = d->arch.rma_order - 6; /* (1/64) */
+ htab_order_pages = rma_order_pages - 6; /* (1/64) */
if (test_bit(_DOMF_privileged, &d->domain_flags)) {
/* bump the htab size of privleged domains */
- ++htab_order;
- }
- htab_alloc(d, htab_order);
+ ++htab_order_pages;
+ }
+ htab_alloc(d, htab_order_pages);
return 0;
}
diff -r e58d85332e0c -r 5fe200a45698 xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c Fri Aug 25 14:48:07 2006 -0500
+++ b/xen/arch/powerpc/mm.c Fri Aug 25 15:09:36 2006 -0500
@@ -239,6 +239,29 @@ static int mfn_in_hole(ulong mfn)
return 0;
}
+int allocate_rma(struct domain *d, unsigned int order_pages)
+{
+ ulong rma_base;
+ ulong rma_sz = rma_size(order_pages);
+
+ d->arch.rma_page = alloc_domheap_pages(d, order_pages, 0);
+ if (d->arch.rma_page == NULL) {
+ DPRINTK("Could not allocate order_pages=%d RMA for domain %u\n",
+ order_pages, d->domain_id);
+ return -ENOMEM;
+ }
+ d->arch.rma_order = order_pages;
+
+ rma_base = page_to_maddr(d->arch.rma_page);
+ BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
+
+ /* XXX */
+ printk("clearing RMA: 0x%lx[0x%lx]\n", rma_base, rma_sz);
+ memset((void *)rma_base, 0, rma_sz);
+
+ return 0;
+}
+
ulong pfn2mfn(struct domain *d, long pfn, int *type)
{
ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
diff -r e58d85332e0c -r 5fe200a45698 xen/include/asm-powerpc/mm.h
--- a/xen/include/asm-powerpc/mm.h Fri Aug 25 14:48:07 2006 -0500
+++ b/xen/include/asm-powerpc/mm.h Fri Aug 25 15:09:36 2006 -0500
@@ -258,6 +258,8 @@ static inline unsigned long gmfn_to_mfn(
#define mfn_to_gmfn(_d, mfn) (mfn)
+extern int allocate_rma(struct domain *d, unsigned int order_pages);
+
extern int steal_page(struct domain *d, struct page_info *page,
unsigned int memflags);
_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel
|