14 files changed, 237 insertions(+), 145 deletions(-)
xen/arch/powerpc/Makefile | 1
xen/arch/powerpc/domain.c | 3
xen/arch/powerpc/domain_build.c | 26 ++++
xen/arch/powerpc/domctl.c | 18 ++-
xen/arch/powerpc/iommu.c | 3
xen/arch/powerpc/memory.c | 1
xen/arch/powerpc/mm.c | 192 ++++++++++++++++++-----------------
xen/arch/powerpc/ofd_fixup_memory.c | 33 ++++--
xen/arch/powerpc/platform.c | 35 ++++++
xen/arch/powerpc/powerpc64/ppc970.c | 8 -
xen/common/memory.c | 3
xen/include/asm-powerpc/domain.h | 3
xen/include/asm-powerpc/mm.h | 29 +----
xen/include/asm-powerpc/platform.h | 27 ++++
# HG changeset patch
# User Ryan Harper <ryanh@xxxxxxxxxx>
# Date 1172776732 21600
# Node ID 76a5923eaed2cc0f3ea2f9b1ac26c82fd8e7bc2b
# Parent 539e61f7482e832ffc372f0fa8d745202f86baa4
[PATCH] xen: implement guest_physmap_{add/remove}_page for ppc
Introduce an machine to guest physical array (m2p) table. This array is the
same size(# of elements) as the frame table, and maps mfns to a guest pfn. This
m2p array us used in the mfn_to_gmfn() function. With the existence of both an
m2p and p2m table, we implement the ppc version of
guest_physmap_{add/remove}_page().
Modify construct_dom0 to initialize the p2m array. In doing so, we need to
account for the platform iohole in dom0 by introducing
platform_iohole_{base/size}() functions.
If dom0_mem is larger than the start of the platform iohole, then we allocate
platform_iohole_size() space in the p2m array and shift any memory above the
platform_iohole_base() past the end of the iohole. This is reflected in the
p2m_size and when we allocate dom0's memory, account for this shifting (this is
done in allocate_extents()).
To ensure proper dom0 memory alignment w.r.t extents, introduce a macro,
EXTENT_UP(), which will round dom0's requested memory size up to the next cpu
extent order and inform users we've done so.
With the above in place, we eliminate the domain extent list, update dom0's
device tree memory node construction accordingly, simplify pfn2mfn() and
mfn_to_gmfn() by removing the extent list walking, killed extent list struct and
extent functions. As a nice side-effect, we've increased mapping performance.
Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx>
diff -r 539e61f7482e -r 76a5923eaed2 xen/arch/powerpc/Makefile
--- a/xen/arch/powerpc/Makefile Thu Mar 01 13:18:52 2007 -0600
+++ b/xen/arch/powerpc/Makefile Thu Mar 01 13:18:52 2007 -0600
@@ -33,6 +33,7 @@ obj-y += ofd_fixup.o
obj-y += ofd_fixup.o
obj-y += ofd_fixup_memory.o
obj-y += physdev.o
+obj-y += platform.o
obj-y += rtas.o
obj-y += setup.o
obj-y += shadow.o
diff -r 539e61f7482e -r 76a5923eaed2 xen/arch/powerpc/domain.c
--- a/xen/arch/powerpc/domain.c Thu Mar 01 13:18:52 2007 -0600
+++ b/xen/arch/powerpc/domain.c Thu Mar 01 13:18:52 2007 -0600
@@ -87,8 +87,6 @@ int arch_domain_create(struct domain *d)
d->arch.large_page_sizes = cpu_large_page_orders(
d->arch.large_page_order, ARRAY_SIZE(d->arch.large_page_order));
- INIT_LIST_HEAD(&d->arch.extent_list);
-
d->arch.foreign_mfn_count = 1024;
d->arch.foreign_mfns = xmalloc_array(uint, d->arch.foreign_mfn_count);
BUG_ON(d->arch.foreign_mfns == NULL);
@@ -310,7 +308,6 @@ void domain_relinquish_resources(struct
{
relinquish_memory(d, &d->xenpage_list);
relinquish_memory(d, &d->page_list);
- free_extents(d);
xfree(d->arch.foreign_mfns);
return;
}
diff -r 539e61f7482e -r 76a5923eaed2 xen/arch/powerpc/domain_build.c
--- a/xen/arch/powerpc/domain_build.c Thu Mar 01 13:18:52 2007 -0600
+++ b/xen/arch/powerpc/domain_build.c Thu Mar 01 13:18:52 2007 -0600
@@ -16,6 +16,7 @@
* Copyright IBM Corp. 2005, 2007
*
* Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
+ * Ryan Harper <ryanh@xxxxxxxxxx>
*/
#include <xen/config.h>
@@ -27,7 +28,9 @@
#include <xen/shadow.h>
#include <xen/domain.h>
#include <xen/version.h>
+#include <xen/shadow.h>
#include <asm/processor.h>
+#include <asm/platform.h>
#include <asm/papr.h>
#include <public/arch-powerpc.h>
#include <public/libelf.h>
@@ -52,6 +55,9 @@ boolean_param("dom0_shadow", opt_dom0_sh
/* adapted from common/elf.c */
#define RM_MASK(a,l) ((a) & ((1UL << (l)) - 1))
+
+#define EXTENT_SIZE (1 << cpu_extent_order())
+#define EXTENT_UP(x) ((((x)+(EXTENT_SIZE-1))>>cpu_extent_order())*EXTENT_SIZE)
int construct_dom0(struct domain *d,
unsigned long image_start, unsigned long image_len,
@@ -71,6 +77,7 @@ int construct_dom0(struct domain *d,
ulong eomem;
int preempt = 0;
int vcpu;
+ ulong p2m_size;
/* Sanity! */
BUG_ON(d->domain_id != 0);
@@ -114,6 +121,25 @@ int construct_dom0(struct domain *d,
printk("Forcing DOM0 memory size to %u MiB\n",
((rma_nrpages << PAGE_SHIFT) >> 20));
}
+
+ /* ensure dom0 is cpu_extent_order aligned, round up if
+ not and let user know we did so */
+ if ( dom0_nrpages != EXTENT_UP(dom0_nrpages)) {
+ printk("Aligning DOM0 to cpu extent order restrictions\n");
+ dom0_nrpages = EXTENT_UP(dom0_nrpages);
+ printk("Forcing DOM0 memory size to %u MiB\n",
+ ((dom0_nrpages << PAGE_SHIFT) >> 20));
+ }
+
+ /* if we extend into IO range, add in IO range */
+ p2m_size = dom0_nrpages;
+ if (p2m_size > (platform_iohole_size() >> PAGE_SHIFT))
+ p2m_size += (platform_iohole_size() >> PAGE_SHIFT);
+
+
+ /* set DOM0 max mem, triggering p2m table creation */
+ if ((guest_physmap_max_mem_pages(d, p2m_size)) != 0)
+ panic("Failed to set DOM0 max mem pages value\n");
d->max_pages = dom0_nrpages;
if (0 > allocate_rma(d, cpu_default_rma_order_pages()))
diff -r 539e61f7482e -r 76a5923eaed2 xen/arch/powerpc/domctl.c
--- a/xen/arch/powerpc/domctl.c Thu Mar 01 13:18:52 2007 -0600
+++ b/xen/arch/powerpc/domctl.c Thu Mar 01 13:18:52 2007 -0600
@@ -13,9 +13,10 @@
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
- * Copyright (C) IBM Corp. 2005
+ * Copyright IBM Corp. 2005, 2007
*
* Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
+ * Ryan Harper <ryanh@xxxxxxxxxx>
*/
#include <xen/config.h>
@@ -50,7 +51,6 @@ long arch_do_domctl(struct xen_domctl *d
struct domain *d = get_domain_by_id(domctl->domain);
unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
uint64_t mfn;
- struct list_head *list_ent;
ret = -EINVAL;
if ( d != NULL )
@@ -58,18 +58,22 @@ long arch_do_domctl(struct xen_domctl *d
ret = 0;
spin_lock(&d->page_alloc_lock);
- list_ent = d->page_list.next;
- for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
+ for ( i = 0; i < max_pfns; i++ )
{
- mfn = page_to_mfn(list_entry(
- list_ent, struct page_info, list));
+
+ /* bail if index is beyond p2m size */
+ if (i >= d->arch.p2m_size)
+ break;
+
+ /* translate */
+ mfn = d->arch.p2m[i];
+
if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
i, &mfn, 1) )
{
ret = -EFAULT;
break;
}
- list_ent = mfn_to_page(mfn)->list.next;
}
spin_unlock(&d->page_alloc_lock);
diff -r 539e61f7482e -r 76a5923eaed2 xen/arch/powerpc/iommu.c
--- a/xen/arch/powerpc/iommu.c Thu Mar 01 13:18:52 2007 -0600
+++ b/xen/arch/powerpc/iommu.c Thu Mar 01 13:18:52 2007 -0600
@@ -13,7 +13,7 @@
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
- * Copyright (C) IBM Corp. 2005
+ * Copyright IBM Corp. 2005, 2007
*
* Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
*/
@@ -62,7 +62,6 @@ int iommu_put(u32 buid, ulong ioba, unio
mfn = pfn2mfn(d, gmfn, &mtype);
if (mfn != INVALID_MFN) {
switch (mtype) {
- case PFN_TYPE_RMA:
case PFN_TYPE_LOGICAL:
break;
case PFN_TYPE_FOREIGN:
diff -r 539e61f7482e -r 76a5923eaed2 xen/arch/powerpc/memory.c
--- a/xen/arch/powerpc/memory.c Thu Mar 01 13:18:52 2007 -0600
+++ b/xen/arch/powerpc/memory.c Thu Mar 01 13:18:52 2007 -0600
@@ -176,6 +176,7 @@ void memory_init(module_t *mod, int mcou
DBG("total_pages: 0x%016lx\n", total_pages);
init_frametable();
+ init_machine_to_phys_table();
numa_initmem_init(0, max_page);
diff -r 539e61f7482e -r 76a5923eaed2 xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c Thu Mar 01 13:18:52 2007 -0600
+++ b/xen/arch/powerpc/mm.c Thu Mar 01 13:18:52 2007 -0600
@@ -13,10 +13,11 @@
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
- * Copyright (C) IBM Corp. 2005, 2006
+ * Copyright (C) IBM Corp. 2005, 2006, 2007
*
* Authors: Hollis Blanchard <hollisb@xxxxxxxxxx>
* Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
+ * Ryan Harper <ryanh@xxxxxxxxxx>
*/
#include <xen/config.h>
@@ -28,6 +29,7 @@
#include <asm/init.h>
#include <asm/page.h>
#include <asm/string.h>
+#include <asm/platform.h>
#include <public/arch-powerpc.h>
#ifdef VERBOSE
@@ -43,6 +45,9 @@ unsigned long max_page;
unsigned long max_page;
unsigned long total_pages;
+/* machine to phys mapping to used by all domains */
+unsigned long *machine_phys_mapping;
+
void __init init_frametable(void)
{
unsigned long p;
@@ -58,6 +63,25 @@ void __init init_frametable(void)
frame_table = (struct page_info *)(p << PAGE_SHIFT);
for (i = 0; i < nr_pages; i += 1)
clear_page((void *)((p + i) << PAGE_SHIFT));
+}
+
+/* array of pfns, indexed by mfn */
+void __init init_machine_to_phys_table(void)
+{
+ unsigned long p;
+ unsigned long nr_pages;
+ int i;
+
+ nr_pages = PFN_UP(max_page * sizeof(unsigned long));
+
+ p = alloc_boot_pages(nr_pages, 1);
+ if (p == 0)
+ panic("Not enough memory for machine phys mapping table\n");
+
+ machine_phys_mapping = (unsigned long *)(p << PAGE_SHIFT);
+ for (i = 0; i < nr_pages; i += 1)
+ clear_page((void *)((p + i) << PAGE_SHIFT));
+
}
void share_xen_page_with_guest(
@@ -285,45 +309,17 @@ extern void copy_page(void *dp, void *sp
}
}
-/* XXX should probably replace with faster data structure */
-static uint add_extent(struct domain *d, struct page_info *pg, uint order)
-{
- struct page_extents *pe;
-
- pe = xmalloc(struct page_extents);
- if (pe == NULL)
- return -ENOMEM;
-
- pe->pg = pg;
- pe->order = order;
-
- list_add_tail(&pe->pe_list, &d->arch.extent_list);
-
- return 0;
-}
-
-void free_extents(struct domain *d)
-{
- /* we just need to free the memory behind list */
- struct list_head *list;
- struct list_head *ent;
- struct list_head *next;
-
- list = &d->arch.extent_list;
- ent = list->next;
-
- while (ent != list) {
- next = ent->next;
- xfree(ent);
- ent = next;
- }
-}
-
+/* allocate rma_nrpages - nrpages more memory for domain in proper size */
uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages)
{
uint ext_order;
uint ext_nrpages;
uint total_nrpages;
+ ulong mfn;
+ ulong gpfn = rma_nrpages; /* starting pfn at end of RMA */
+ ulong iobase_page = platform_iohole_base() >> PAGE_SHIFT;
+ ulong iobase_sz = platform_iohole_size() >> PAGE_SHIFT;
+ int i;
struct page_info *pg;
ext_order = cpu_extent_order();
@@ -338,11 +334,19 @@ uint allocate_extents(struct domain *d,
if (pg == NULL)
return total_nrpages;
- if (add_extent(d, pg, ext_order) < 0) {
- free_domheap_pages(pg, ext_order);
- return total_nrpages;
- }
+ /* build p2m mapping for newly allocated extent */
+ mfn = page_to_mfn(pg);
+ for (i = 0; i < (1 << ext_order); i++)
+ guest_physmap_add_page(d, gpfn + i, mfn + i);
+
+ /* bump starting pfn by extent size pages */
+ gpfn += ext_nrpages;
+
total_nrpages += ext_nrpages;
+
+ /* if the current gpfn falls within the iohole, offset it */
+ if ((gpfn >= iobase_page) && (gpfn < (iobase_page + iobase_sz)))
+ gpfn += iobase_sz;
}
return total_nrpages;
@@ -353,6 +357,7 @@ int allocate_rma(struct domain *d, unsig
struct vcpu *v;
ulong rma_base;
ulong rma_sz;
+ ulong mfn = INVALID_MFN;
int i;
if (d->arch.rma_page)
@@ -374,10 +379,15 @@ int allocate_rma(struct domain *d, unsig
printk("allocated RMA for Dom[%d]: 0x%lx[0x%lx]\n",
d->domain_id, rma_base, rma_sz);
+ mfn = page_to_mfn(d->arch.rma_page);
+
for (i = 0; i < (1 << d->arch.rma_order); i++ ) {
/* Add in any extra CPUs that need flushing because of this page. */
d->arch.rma_page[i].count_info |= PGC_page_RMA;
clear_page((void *)page_to_maddr(&d->arch.rma_page[i]));
+
+ /* setup p2m mapping for RMA */
+ guest_physmap_add_page(d, i, mfn+i);
}
d->shared_info = (shared_info_t *)
@@ -401,9 +411,6 @@ void free_rma_check(struct page_info *pa
ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
{
- ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
- ulong rma_size_mfn = 1UL << d->arch.rma_order;
- struct page_extents *pe;
ulong mfn = INVALID_MFN;
int t = PFN_TYPE_NONE;
ulong foreign_map_pfn = 1UL << cpu_foreign_map_order();
@@ -420,23 +427,9 @@ ulong pfn2mfn(struct domain *d, ulong pf
t = PFN_TYPE_IO;
mfn = pfn;
} else {
- if (pfn < rma_size_mfn) {
- t = PFN_TYPE_RMA;
- mfn = pfn + rma_base_mfn;
- } else {
- ulong cur_pfn = rma_size_mfn;
-
- list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
- uint pe_pages = 1UL << pe->order;
- uint end_pfn = cur_pfn + pe_pages;
-
- if (pfn >= cur_pfn && pfn < end_pfn) {
- t = PFN_TYPE_LOGICAL;
- mfn = page_to_mfn(pe->pg) + (pfn - cur_pfn);
- break;
- }
- cur_pfn += pe_pages;
- }
+ if (pfn < d->arch.p2m_size) {
+ t = PFN_TYPE_LOGICAL;
+ mfn = d->arch.p2m[pfn];
}
#ifdef DEBUG
if (t != PFN_TYPE_NONE &&
@@ -483,12 +476,15 @@ ulong pfn2mfn(struct domain *d, ulong pf
return mfn;
}
+/* mfn_to_pfn */
unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn)
{
- struct page_extents *pe;
- ulong cur_pfn;
+ struct page_info *pg = mfn_to_page(mfn);
ulong gnttab_mfn;
- ulong rma_mfn;
+
+ /* is this our mfn? */
+ if (page_get_owner(pg) != d)
+ return INVALID_M2P_ENTRY;
/* grant? */
gnttab_mfn = gnttab_shared_mfn(d, d->grant_table, 0);
@@ -499,44 +495,58 @@ unsigned long mfn_to_gmfn(struct domain
if (d->is_privileged && cpu_io_mfn(mfn))
return mfn;
- rma_mfn = page_to_mfn(d->arch.rma_page);
- if (mfn >= rma_mfn &&
- mfn < (rma_mfn + (1 << d->arch.rma_order)))
- return mfn - rma_mfn;
-
- /* Extent? */
- cur_pfn = 1UL << d->arch.rma_order;
- list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
- uint pe_pages = 1UL << pe->order;
- uint b_mfn = page_to_mfn(pe->pg);
- uint e_mfn = b_mfn + pe_pages;
-
- if (mfn >= b_mfn && mfn < e_mfn) {
- return cur_pfn + (mfn - b_mfn);
- }
- cur_pfn += pe_pages;
- }
- return INVALID_M2P_ENTRY;
+ /* check m2p table */
+ return get_gpfn_from_mfn(mfn);
}
void guest_physmap_add_page(
struct domain *d, unsigned long gpfn, unsigned long mfn)
{
- printk("%s(%d, 0x%lx, 0x%lx)\n", __func__, d->domain_id, gpfn, mfn);
-}
+ if (page_get_owner(mfn_to_page(mfn)) != d) {
+ printk("Won't map foriegn mfn 0x%lx for DOM%d\n", mfn, d->domain_id);
+ return;
+ }
+
+ /* check that pfn is within guest table */
+ if (gpfn >= d->arch.p2m_size) {
+ printk("Won't map invalid pfn 0x%lx for DOM%d\n", gpfn, d->domain_id);
+ return;
+ }
+
+ /* warn if there is an existing mapping */
+ /* XXX: probably shouldn't let this happen, but
+ current interface doesn't throw errors. =( */
+ if (d->arch.p2m[gpfn] != INVALID_MFN)
+ printk("Ack! PFN aliased. pfn%lx, old mfn=%lx, new mfn=%lx\n",
+ gpfn, d->arch.p2m[gpfn], mfn);
+
+ /* pfn and mfn ok, map p2m */
+ d->arch.p2m[gpfn] = mfn;
+ /* map m2p */
+ set_gpfn_from_mfn(mfn, gpfn);
+}
+
void guest_physmap_remove_page(
struct domain *d, unsigned long gpfn, unsigned long mfn)
{
- panic("%s\n", __func__);
-}
+ if (page_get_owner(mfn_to_page(mfn)) != d) {
+ printk("Won't unmap foriegn mfn 0x%lx for DOM%d\n", mfn, d->domain_id);
+ return;
+ }
+
+ /* check that pfn is within guest table */
+ if (gpfn >= d->arch.p2m_size) {
+ printk("Won't unmap invalid pfn 0x%lx for DOM%d\n", gpfn,
d->domain_id);
+ return;
+ }
+
+ /* pfn and mfn ok, unmap p2m */
+ d->arch.p2m[gpfn] = INVALID_MFN;
+ /* unmap m2p */
+ set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
+}
+
void shadow_drop_references(
struct domain *d, struct page_info *page)
{
}
-
-int arch_domain_add_extent(struct domain *d, struct page_info *page, int order)
-{
- if (add_extent(d, page, order) < 0)
- return -ENOMEM;
- return 0;
-}
diff -r 539e61f7482e -r 76a5923eaed2 xen/arch/powerpc/ofd_fixup_memory.c
--- a/xen/arch/powerpc/ofd_fixup_memory.c Thu Mar 01 13:18:52 2007 -0600
+++ b/xen/arch/powerpc/ofd_fixup_memory.c Thu Mar 01 13:18:52 2007 -0600
@@ -13,14 +13,16 @@
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
- * Copyright (C) IBM Corp. 2006
+ * Copyright (C) IBM Corp. 2006, 2007
*
* Authors: Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
+ * Ryan Harper <ryanh@xxxxxxxxxx>
*/
#include <xen/config.h>
#include <xen/lib.h>
#include <xen/sched.h>
+#include <asm/platform.h>
#include <public/xen.h>
#include "of-devtree.h"
#include "oftree.h"
@@ -87,19 +89,30 @@ static void ofd_memory_extent_nodes(void
ulong start;
ulong size;
ofdn_t n;
- struct page_extents *pe;
ulong cur_pfn = 1UL << d->arch.rma_order;
- start = cur_pfn << PAGE_SHIFT;
- size = 0;
- list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
+ /* if dom0 > 2G, shift ram past IO hole */
+ if ((d->tot_pages << PAGE_SHIFT) > platform_iohole_base()) {
+ /* memory@RMA up to IO hole */
+ start = cur_pfn << PAGE_SHIFT;
+ size = platform_iohole_base() - (cur_pfn << PAGE_SHIFT);
+ n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory,
+ start, size);
- size += 1UL << (pe->order + PAGE_SHIFT);
- if (pe->order != cpu_extent_order())
- panic("we don't handle this yet\n");
+ BUG_ON(n <= 0);
+
+ /* remaining memory shifted up to memory@IOHOLE_END */
+ start = platform_iohole_base()+platform_iohole_size();
+ size = (d->tot_pages << PAGE_SHIFT) - platform_iohole_base();
+ n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory,
+ start, size);
+ } else {
+ /* we fit beneath the IO hole as one chunk */
+ start = cur_pfn << PAGE_SHIFT;
+ size = (d->tot_pages - cur_pfn) << PAGE_SHIFT;
+ n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory,
+ start, size);
}
- n = ofd_memory_node_create(m, OFD_ROOT, "", memory, memory,
- start, size);
BUG_ON(n <= 0);
}
diff -r 539e61f7482e -r 76a5923eaed2 xen/arch/powerpc/powerpc64/ppc970.c
--- a/xen/arch/powerpc/powerpc64/ppc970.c Thu Mar 01 13:18:52 2007 -0600
+++ b/xen/arch/powerpc/powerpc64/ppc970.c Thu Mar 01 13:18:52 2007 -0600
@@ -13,7 +13,7 @@
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
- * Copyright (C) IBM Corp. 2005, 2006
+ * Copyright IBM Corp. 2005, 2006, 2007
*
* Authors: Hollis Blanchard <hollisb@xxxxxxxxxx>
* Jimi Xenidis <jimix@xxxxxxxxxxxxxx>
@@ -27,6 +27,7 @@
#include <xen/lib.h>
#include <asm/time.h>
#include <asm/current.h>
+#include <asm/platform.h>
#include <asm/powerpc64/procarea.h>
#include <asm/powerpc64/processor.h>
#include <asm/powerpc64/ppc970-hid.h>
@@ -133,9 +134,8 @@ unsigned int cpu_extent_order(void)
* one platform now */
int cpu_io_mfn(ulong mfn)
{
- /* totally cheating */
- if (mfn >= (2UL << (30 - PAGE_SHIFT)) && /* 2GiB */
- mfn < (4UL << (30 - PAGE_SHIFT))) /* 4GiB */
+ if (mfn >= (platform_iohole_base() >> PAGE_SHIFT) &&
+ mfn < ((platform_iohole_base()+platform_iohole_size()) >> PAGE_SHIFT))
return 1;
return 0;
diff -r 539e61f7482e -r 76a5923eaed2 xen/common/memory.c
--- a/xen/common/memory.c Thu Mar 01 13:18:52 2007 -0600
+++ b/xen/common/memory.c Thu Mar 01 13:18:52 2007 -0600
@@ -76,9 +76,6 @@ static void increase_reservation(struct
goto out;
}
- /* XXX PPC-specific hack */
- BUG_ON(0 > arch_domain_add_extent(d, page, a->extent_order));
-
/* Inform the domain of the new page's machine address. */
if ( !guest_handle_is_null(a->extent_list) )
{
diff -r 539e61f7482e -r 76a5923eaed2 xen/include/asm-powerpc/domain.h
--- a/xen/include/asm-powerpc/domain.h Thu Mar 01 13:18:52 2007 -0600
+++ b/xen/include/asm-powerpc/domain.h Thu Mar 01 13:18:52 2007 -0600
@@ -37,9 +37,6 @@ struct arch_domain {
* processor is in real mode */
struct page_info *rma_page;
uint rma_order;
-
- /* list of extents beyond RMA */
- struct list_head extent_list;
uint foreign_mfn_count;
uint *foreign_mfns;
diff -r 539e61f7482e -r 76a5923eaed2 xen/include/asm-powerpc/mm.h
--- a/xen/include/asm-powerpc/mm.h Thu Mar 01 13:18:52 2007 -0600
+++ b/xen/include/asm-powerpc/mm.h Thu Mar 01 13:18:52 2007 -0600
@@ -79,15 +79,6 @@ struct page_info
} u;
-};
-
-struct page_extents {
- /* Each frame can be threaded onto a doubly-linked list. */
- struct list_head pe_list;
-
- /* page extent */
- struct page_info *pg;
- uint order;
};
/* The following page types are MUTUALLY EXCLUSIVE. */
@@ -145,6 +136,7 @@ extern unsigned long max_page;
extern unsigned long max_page;
extern unsigned long total_pages;
void init_frametable(void);
+void init_machine_to_phys_table(void);
void free_rma_check(struct page_info *page);
static inline void put_page(struct page_info *page)
@@ -226,14 +218,13 @@ typedef struct {
} vm_assist_info_t;
extern vm_assist_info_t vm_assist_info[];
-
-/* hope that accesses to this will fail spectacularly */
-#undef machine_to_phys_mapping
+extern unsigned long *machine_phys_mapping;
+#define machine_to_phys_mapping (machine_phys_mapping)
#define INVALID_M2P_ENTRY (~0UL)
-
-/* do nothing, its all calculated */
-#define set_gpfn_from_mfn(mfn, pfn) do { } while (0)
-#define get_gpfn_from_mfn(mfn) (mfn)
+#define VALID_M2P(_e) (!((_e) & (1UL<<(BITS_PER_LONG-1))))
+
+#define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
+#define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])
extern unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn);
@@ -241,7 +232,6 @@ extern unsigned long paddr_to_maddr(unsi
#define INVALID_MFN (~0UL)
#define PFN_TYPE_NONE 0
-#define PFN_TYPE_RMA 1
#define PFN_TYPE_LOGICAL 2
#define PFN_TYPE_IO 3
#define PFN_TYPE_FOREIGN 4
@@ -256,7 +246,6 @@ static inline unsigned long gmfn_to_mfn(
mfn = pfn2mfn(d, gmfn, &mtype);
if (mfn != INVALID_MFN) {
switch (mtype) {
- case PFN_TYPE_RMA:
case PFN_TYPE_LOGICAL:
break;
default:
@@ -278,10 +267,6 @@ long arch_memory_op(int op, XEN_GUEST_HA
extern int allocate_rma(struct domain *d, unsigned int order_pages);
extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages);
-extern void free_extents(struct domain *d);
-
-extern int arch_domain_add_extent(struct domain *d, struct page_info *page,
- int order);
extern int steal_page(struct domain *d, struct page_info *page,
unsigned int memflags);
diff -r 539e61f7482e -r 76a5923eaed2 xen/arch/powerpc/platform.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/arch/powerpc/platform.c Thu Mar 01 13:18:52 2007 -0600
@@ -0,0 +1,35 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Ryan Harper <ryanh@xxxxxxxxxx>
+ */
+
+#include <asm/platform.h>
+
+#define IO_RANGE_START (2UL << 30)
+#define IO_RANGE_END (4UL << 30)
+#define IO_SIZE (IO_RANGE_END - IO_RANGE_START)
+
+unsigned long platform_iohole_base(void)
+{
+ return IO_RANGE_START;
+}
+
+unsigned long platform_iohole_size(void)
+{
+ return IO_SIZE;
+}
diff -r 539e61f7482e -r 76a5923eaed2 xen/include/asm-powerpc/platform.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/xen/include/asm-powerpc/platform.h Thu Mar 01 13:18:52 2007 -0600
@@ -0,0 +1,27 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Ryan Harper <ryanh@xxxxxxxxxx>
+ */
+
+#ifndef _ASM_PLATFORM_H_
+#define _ASM_PLATFORM_H_
+
+extern unsigned long platform_iohole_base(void);
+extern unsigned long platform_iohole_size(void);
+
+#endif
_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel
|