WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ppc-devel

[XenPPC] [pushed] [ppc] Move shared pages into RMA

changeset:   9715:1bdd8ec03da55dd5d7a802a147d9e77d06d3a244
tag:         tip
user:        jimix@xxxxxxxxxxxxxxxxxxxxx
date:        Thu Mar 30 18:40:26 2006 -0500
files:       xen/arch/ppc/domain.c xen/arch/ppc/domain_build.c 
xen/arch/ppc/mm.c xen/arch/ppc/ofd_fixup.c xen/arch/ppc/papr/xlate.c 
xen/arch/ppc/ppc64/ppc970.c xen/arch/ppc/usercopy.c 
xen/include/asm-ppc/domain.h xen/include/asm-ppc/mm.h
description:
[ppc] Move shared pages into RMA
  - All references to RMO should actually be RMA.
  - Allocate domain shared_info page from the RMA
  - Remove all magical PFN hacks for mapping shared pages.


diff -r 2389fd7700c5c9d251d0ce8f666d01c48cb13f55 -r 
1bdd8ec03da55dd5d7a802a147d9e77d06d3a244 xen/arch/ppc/domain.c
--- a/xen/arch/ppc/domain.c     Thu Mar 30 18:38:23 2006 -0500
+++ b/xen/arch/ppc/domain.c     Thu Mar 30 18:40:26 2006 -0500
@@ -68,28 +68,25 @@ unsigned long hypercall_create_continuat
 
 int arch_domain_create(struct domain *d)
 {
-    void *dummy;
-
-    /* dummy safety page for mapping pages that are yet to be shared */
-    dummy = alloc_xenheap_page();
-    clear_page(dummy);
-    d->arch.shared_dummy_pfn = (ulong)dummy >> PAGE_SHIFT;
-
-    /* Shared area pfn2mfn array, may end up publishing this */
-    d->arch.shared_pfn2mfn = (void *)alloc_xenheap_page();
-    clear_page(d->arch.shared_pfn2mfn);
-    d->arch.shared_nr_pfn = PAGE_SHIFT - 3;
-
-    d->shared_info = (void *)alloc_xenheap_page();
-    clear_page(d->shared_info);
+
+    if (d->domain_id == IDLE_DOMAIN_ID) {
+        d->shared_info = (void *)alloc_xenheap_page();
+        clear_page(d->shared_info);
+
+        return 0;
+    }
+
+    /* XXX the hackage... hardcode 64M domains */
+    d->arch.rma_base = (64<<20) * (d->domain_id + 1);
+    d->arch.rma_size = (64<<20);
+
+    printk("clearing RMO: 0x%lx[0x%lx]\n", d->arch.rma_base, d->arch.rma_size);
+    memset((void*)d->arch.rma_base, 0, d->arch.rma_size);
 
     htab_alloc(d, LOG_DEFAULT_HTAB_BYTES);
 
-    /* XXX the hackage... hardcode 64M domains */
-    d->arch.rmo_base = (64<<20) * (d->domain_id + 1);
-    d->arch.rmo_len = (64<<20);
-
-    d->arch.shared_base_pfn = (d->arch.rmo_len >> PAGE_SHIFT) - 1;
+    d->shared_info = (shared_info_t *)
+        (rma_addr(&d->arch, RMA_SHARED_INFO) + d->arch.rma_base);
 
     d->arch.large_page_sizes = 1;
     d->arch.large_page_shift[0] = 24; /* 16 M for 970s */
diff -r 2389fd7700c5c9d251d0ce8f666d01c48cb13f55 -r 
1bdd8ec03da55dd5d7a802a147d9e77d06d3a244 xen/arch/ppc/domain_build.c
--- a/xen/arch/ppc/domain_build.c       Thu Mar 30 18:38:23 2006 -0500
+++ b/xen/arch/ppc/domain_build.c       Thu Mar 30 18:40:26 2006 -0500
@@ -23,6 +23,7 @@
 #include <xen/init.h>
 #include <xen/ctype.h>
 #include <xen/iocap.h>
+#include <xen/compile.h>
 #include <asm/processor.h>
 #include <asm/papr.h>
 
@@ -88,7 +89,7 @@ static inline int is_loadable_phdr(Elf_P
             ((phdr->p_flags & (PF_W|PF_X)) != 0));
 }
 
-static int rm_loadelfimage(struct domain_setup_info *dsi, ulong rmo)
+static int rm_loadelfimage(struct domain_setup_info *dsi, ulong rma)
 {
     char *elfbase = (char *)dsi->image_addr;
     Elf_Ehdr *ehdr = (Elf_Ehdr *)dsi->image_addr;
@@ -101,11 +102,11 @@ static int rm_loadelfimage(struct domain
         if (!is_loadable_phdr(phdr))
             continue;
         if (phdr->p_filesz != 0)
-            memcpy((char *)(rmo + RM_MASK(phdr->p_paddr, 42)),
+            memcpy((char *)(rma + RM_MASK(phdr->p_paddr, 42)),
                    elfbase + phdr->p_offset, 
                    phdr->p_filesz);
         if (phdr->p_memsz > phdr->p_filesz)
-            memset((char *)(rmo + RM_MASK(phdr->p_paddr, 42) + phdr->p_filesz),
+            memset((char *)(rma + RM_MASK(phdr->p_paddr, 42) + phdr->p_filesz),
                    0, phdr->p_memsz - phdr->p_filesz);
     }
 
@@ -126,11 +127,9 @@ int construct_dom0(struct domain *d,
     struct domain_setup_info dsi;
     ulong dst;
     u64 *ofh_tree;
-    ulong rmo_sz = d->arch.rmo_len;
-    ulong rmo = d->arch.rmo_base;
+    ulong rma_sz = d->arch.rma_size;
+    ulong rma = d->arch.rma_base;
     start_info_t *si;
-    ulong vstartinfo_start;
-    ulong vstartinfo_end;
     ulong eomem;
 
     /* Sanity! */
@@ -162,37 +161,26 @@ int construct_dom0(struct domain *d,
     /* By default DOM0 is allocated all available memory. */
     d->max_pages = ~0U;
 
-    ASSERT( image_len < rmo_sz );
-    printk("clearing RMO: 0x%lx[0x%lx]\n", rmo, rmo_sz);
-    memset((void*)rmo, 0, rmo_sz);
-
-    /* place the start info at the end */
-    eomem = rmo_sz;
-    vstartinfo_end = eomem;
-    eomem -= PAGE_SIZE;
-    vstartinfo_start = eomem;
-    printk("xen_start_info: 0x%lx\n", vstartinfo_start);
-    si = (start_info_t *)(vstartinfo_start + rmo);
-
+    ASSERT( image_len < rma_sz );
+
+    si = (start_info_t *)(rma_addr(&d->arch, RMA_START_INFO) + rma);
+    printk("xen_start_info: %p\n", si);
+
+    sprintf(si->magic, "xen-%i.%i-powerpc%d%s",
+            XEN_VERSION, XEN_SUBVERSION, BITS_PER_LONG, "HV");
     si->flags = SIF_PRIVILEGED | SIF_INITDOMAIN;
 
-    /* first page of shared region belongs to the Xen Shared Info page */
-    d->arch.shared_pfn2mfn[0] = ((ulong)d->shared_info) >> PAGE_SHIFT;
-
-    /* place the shared area one page below the top of RMO. */
-    d->arch.shared_base_pfn = (rmo_sz >> PAGE_SHIFT) - 1;
-    si->shared_info = d->arch.shared_base_pfn << PAGE_SHIFT;
+    si->shared_info = ((ulong)d->shared_info) - rma;
+    printk("shared_info: 0x%lx,%p\n", si->shared_info, d->shared_info);
+
+    eomem = si->shared_info;
 
     /* allow dom0 to access all of system RAM */
     d->arch.logical_base_pfn = 128 << (20 - PAGE_SHIFT); /* 128 MB */
     d->arch.logical_end_pfn = max_page;
 
-    /* dom0 is not given a store or console page */
-    si->store_mfn =  d->arch.shared_base_pfn + 1;
-    si->console_mfn = d->arch.shared_base_pfn + 2;
-
     /* number of pages accessible */
-    si->nr_pages =   (rmo_sz >> PAGE_SHIFT) + 3;
+    si->nr_pages = rma_sz >> PAGE_SHIFT;
 
     si->pt_base = 0;
     si->nr_pt_frames = 0;
@@ -208,30 +196,31 @@ int construct_dom0(struct domain *d,
     v->arch.ctxt.gprs[1] = dst - STACK_FRAME_OVERHEAD;
 
     /* copy relative to Xen */
-    dst += rmo;
+    dst += rma;
 
 
     extern int ofh_image_start[0];
     extern int ofh_image_size[0];
 
-    ASSERT((dst - rmo) + (ulong)ofh_image_size < eomem);
-    printk("loading OFH: 0x%lx, 0x%p\n", dst, ofh_image_size);
+    ASSERT((dst - rma) + (ulong)ofh_image_size < eomem);
+    printk("loading OFH: 0x%lx, RMA: 0x%lx\n", dst, dst - rma);
     memcpy((void *)dst, ofh_image_start, (ulong)ofh_image_size);
 
-    v->arch.ctxt.gprs[5] = (dst - rmo);
+    v->arch.ctxt.gprs[5] = (dst - rma);
     ofh_tree = (u64 *)(dst + 0x10);
     ASSERT(*ofh_tree == 0xdeadbeef00000000);
 
     /* accomodate for a modest bss section */
     dst = ALIGN_UP(dst + (ulong)ofh_image_size + PAGE_SIZE, PAGE_SIZE);
-    ASSERT((dst - rmo) + oftree_len < eomem);
-
-    *ofh_tree = dst - rmo;
-    printk("loading OFD: 0x%lx, 0x%lx\n", dst, oftree_len);
+    ASSERT((dst - rma) + oftree_len < eomem);
+
+    *ofh_tree = dst - rma;
+    printk("loading OFD: 0x%lx RMA: 0x%lx, 0x%lx\n", dst, dst - rma,
+           oftree_len);
     memcpy((void *)dst, (void *)oftree, oftree_len);
 
     dst = ALIGN_UP(dst + oftree_len, PAGE_SIZE);
-    printk("loading Dom0: 0x%lx, in RMO:0x%lx\n", dst, dst - rmo);
+    printk("loading Dom0: 0x%lx, in RMA:0x%lx\n", dst, dst - rma);
     rm_loadelfimage(&dsi, dst);
 
     ulong kbase = dst;
@@ -239,12 +228,12 @@ int construct_dom0(struct domain *d,
     /* move dst to end of bss */
     dst = ALIGN_UP(dsi.v_kernend + dst, PAGE_SIZE);
     if ( initrd_len > 0 ) {
-        ASSERT( (dst - rmo) + image_len < eomem );
+        ASSERT( (dst - rma) + image_len < eomem );
 
         printk("loading initrd: 0x%lx, 0x%lx\n", dst, initrd_len);
         memcpy((void *)dst, (void *)initrd_start, initrd_len);
 
-        si->mod_start = dst - rmo;
+        si->mod_start = dst - rma;
         si->mod_len = image_len;
 
         dst = ALIGN_UP(dst + initrd_len, PAGE_SIZE);
@@ -274,10 +263,10 @@ int construct_dom0(struct domain *d,
         && ((fdesc[1] >= dsi.v_kernstart)  /* toc can be greater than image */
             && (fdesc[1] < (dsi.v_kernend + (0x7fff * sizeof (ulong)))))) {
         /* it is almost certainly a function descriptor */
-        pc = RM_MASK(fdesc[0], 42) + kbase - rmo;
-        r2 = RM_MASK(fdesc[1], 42) + kbase - rmo;
+        pc = RM_MASK(fdesc[0], 42) + kbase - rma;
+        r2 = RM_MASK(fdesc[1], 42) + kbase - rma;
     } else {
-        pc = ((ulong)fdesc) - rmo;
+        pc = ((ulong)fdesc) - rma;
         r2 = 0;
     }
 
@@ -286,7 +275,7 @@ int construct_dom0(struct domain *d,
 
     printk("DOM: pc = 0x%lx, r2 = 0x%lx\n", pc, r2);
 
-    ofd_dom0_fixup(d, *ofh_tree + rmo, si, dst);
+    ofd_dom0_fixup(d, *ofh_tree + rma, si, dst - rma);
 
     set_bit(_VCPUF_initialised, &v->vcpu_flags);
 
diff -r 2389fd7700c5c9d251d0ce8f666d01c48cb13f55 -r 
1bdd8ec03da55dd5d7a802a147d9e77d06d3a244 xen/arch/ppc/mm.c
--- a/xen/arch/ppc/mm.c Thu Mar 30 18:38:23 2006 -0500
+++ b/xen/arch/ppc/mm.c Thu Mar 30 18:40:26 2006 -0500
@@ -100,32 +100,23 @@ extern void copy_page(void *dp, void *sp
 
 ulong pfn2mfn(struct domain *d, long pfn, int *type)
 {
-    ulong rmo_base_mfn = d->arch.rmo_base >> PAGE_SHIFT;
-    ulong rmo_len_mfn = d->arch.rmo_len >> PAGE_SHIFT;
+    ulong rma_base_mfn = d->arch.rma_base >> PAGE_SHIFT;
+    ulong rma_size_mfn = d->arch.rma_size >> PAGE_SHIFT;
     ulong mfn;
     int t;
 
-    if (pfn >= d->arch.shared_base_pfn &&
-            pfn < (d->arch.shared_base_pfn + d->arch.shared_nr_pfn)) {
-        /* shared_info physical mapping (not machine address) is inside RMO, so
-         * need to test for it first. */
-        mfn = d->arch.shared_pfn2mfn[pfn - d->arch.shared_base_pfn];
-        if (mfn == 0) {
-            mfn = d->arch.shared_dummy_pfn;
-        }
-        t = PFN_TYPE_SHARED;
-    } else if (pfn < rmo_len_mfn) {
-        mfn = pfn + rmo_base_mfn;
-        t = PFN_TYPE_RMO;
+    if (pfn < rma_size_mfn) {
+        mfn = pfn + rma_base_mfn;
+        t = PFN_TYPE_RMA;
     } else if (pfn >= d->arch.logical_base_pfn &&
-            pfn < d->arch.logical_end_pfn) {
+               pfn < d->arch.logical_end_pfn) {
         if (test_bit(_DOMF_privileged, &d->domain_flags)) {
             /* This hack allows dom0 to map all memory, necessary to
              * initialize domU state. */
             mfn = pfn;
         } else {
             panic("we do not handle the logical area yet\n");
-           mfn = 0;
+            mfn = 0;
         }
 
         t = PFN_TYPE_LOGICAL;
diff -r 2389fd7700c5c9d251d0ce8f666d01c48cb13f55 -r 
1bdd8ec03da55dd5d7a802a147d9e77d06d3a244 xen/arch/ppc/ofd_fixup.c
--- a/xen/arch/ppc/ofd_fixup.c  Thu Mar 30 18:38:23 2006 -0500
+++ b/xen/arch/ppc/ofd_fixup.c  Thu Mar 30 18:40:26 2006 -0500
@@ -361,8 +361,8 @@ static ofdn_t ofd_memory_props(void *m, 
     ofdn_t n = -1;
     ulong start = 0;
     static char name[] = "memory";
-    ulong mem_size = d->arch.rmo_len;
-    ulong chunk_size = d->arch.rmo_len;
+    ulong mem_size = d->arch.rma_size;
+    ulong chunk_size = d->arch.rma_size;
 
     /* Remove all old memory props */
     do {
@@ -382,7 +382,7 @@ static ofdn_t ofd_memory_props(void *m, 
                 start, size);
 
         if (start == 0) {
-            /* We are processing the first and RMO chunk */
+            /* We are processing the first and RMA chunk */
 
             /* free list of physical addresses available after OF and
              * client program have been accounted for */
@@ -426,7 +426,7 @@ static ofdn_t ofd_xen_props(void *m, str
     if (n > 0) {
         char xen[256];
         int xl;
-        u32 val;
+        u32 val[2];
 
         ofd_prop_add(m, n, "name", &path[1], sizeof (path) - 1);
 
@@ -435,22 +435,13 @@ static ofdn_t ofd_xen_props(void *m, str
         ASSERT(xl < sizeof (xen));
         ofd_prop_add(m, n, "version", xen, xl + 1);
 
-        val = (ulong)si - d->arch.rmo_base;
-        ofd_prop_add(m, n, "start-info", &val, sizeof (val));
-        val = PAGE_SIZE;
-        ofd_prop_add(m, n, "start-info-size", &val, sizeof (val));
-
-        /* the very last memory chunk is small and contains the PFNs
-         * that contain Xen uses for communication pages that coincide
-         * with the start_info page */
-        /* its just 3 pages right now:
-         *   1. the shared page
-         *   2. the XenStore page
-         *   3. the console page, tho dom0 does not have one
-         */
-        n = ofd_memory_chunk_create(m, n, "/xen", "shared", "xen-shared",
-                d->arch.shared_base_pfn << PAGE_SHIFT,
-                3 << PAGE_SHIFT);
+        val[0] = (ulong)si - d->arch.rma_base;
+        val[1] = PAGE_SIZE;
+        ofd_prop_add(m, n, "start-info", val, sizeof (val));
+
+        val[1] =  RMA_LAST_DOM0 * PAGE_SIZE;
+        val[0] =  d->arch.rma_size - val[1];
+        ofd_prop_add(m, n, "reserved", val, sizeof (val));
     }
     return n;
 }
diff -r 2389fd7700c5c9d251d0ce8f666d01c48cb13f55 -r 
1bdd8ec03da55dd5d7a802a147d9e77d06d3a244 xen/arch/ppc/papr/xlate.c
--- a/xen/arch/ppc/papr/xlate.c Thu Mar 30 18:38:23 2006 -0500
+++ b/xen/arch/ppc/papr/xlate.c Thu Mar 30 18:40:26 2006 -0500
@@ -191,14 +191,6 @@ static void h_enter(struct cpu_user_regs
             return;
         }
     }
-    if (mtype == PFN_TYPE_SHARED) {
-        /* this areas must be mapped with 4k pages */
-        if (pte.bits.l) {
-            printk("%s: Large page in shared region: 0x%lx\n", __func__, lpn);
-            regs->gprs[3] =  H_Parameter;
-            return;
-        }
-    }
     /* fixup the RPN field of our local PTE copy */
     pte.bits.rpn = rpn | lp_bits;
 
diff -r 2389fd7700c5c9d251d0ce8f666d01c48cb13f55 -r 
1bdd8ec03da55dd5d7a802a147d9e77d06d3a244 xen/arch/ppc/ppc64/ppc970.c
--- a/xen/arch/ppc/ppc64/ppc970.c       Thu Mar 30 18:38:23 2006 -0500
+++ b/xen/arch/ppc/ppc64/ppc970.c       Thu Mar 30 18:40:26 2006 -0500
@@ -94,20 +94,20 @@ void cpu_init_vcpu(struct vcpu *v)
 {
     struct domain *d = v->domain;
     union hid4 hid4;
-    ulong rmo_base = d->arch.rmo_base;
-    ulong rmo_size = d->arch.rmo_len;
+    ulong rma_base = d->arch.rma_base;
+    ulong rma_size = d->arch.rma_size;
 
     hid4.word = mfhid4();
 
     hid4.bits.lpes0 = 0; /* exceptions go to hypervisor vectors */
-    hid4.bits.lpes1 = 1; /* RMO applies */
+    hid4.bits.lpes1 = 1; /* RMA applies */
 
-    hid4.bits.rmor = rmo_base >> 26;
+    hid4.bits.rmor = rma_base >> 26;
 
     hid4.bits.lpid01 = d->domain_id & 3;
     hid4.bits.lpid25 = (d->domain_id >> 2) & 0xf;
 
-    switch (rmo_size) {
+    switch (rma_size) {
         case 256ULL << 30:  /* 256 GB */
             hid4.bits.rmlr0 = 0;
             hid4.bits.rmlr12 = 0;
diff -r 2389fd7700c5c9d251d0ce8f666d01c48cb13f55 -r 
1bdd8ec03da55dd5d7a802a147d9e77d06d3a244 xen/arch/ppc/usercopy.c
--- a/xen/arch/ppc/usercopy.c   Thu Mar 30 18:38:23 2006 -0500
+++ b/xen/arch/ppc/usercopy.c   Thu Mar 30 18:40:26 2006 -0500
@@ -42,7 +42,7 @@ unsigned long paddr_to_maddr(unsigned lo
 
     pa = pfn2mfn(d, pfn, &mtype);
     switch (mtype) {
-        case PFN_TYPE_RMO:
+        case PFN_TYPE_RMA:
         case PFN_TYPE_LOGICAL:
             break;
         default:
diff -r 2389fd7700c5c9d251d0ce8f666d01c48cb13f55 -r 
1bdd8ec03da55dd5d7a802a147d9e77d06d3a244 xen/include/asm-ppc/domain.h
--- a/xen/include/asm-ppc/domain.h      Thu Mar 30 18:38:23 2006 -0500
+++ b/xen/include/asm-ppc/domain.h      Thu Mar 30 18:40:26 2006 -0500
@@ -32,28 +32,12 @@ struct arch_domain {
     struct domain_htab htab;
     /* The RMO area is fixed to the domain and is accessible while the
      * processor is in real mode */
-    ulong rmo_base;
-    ulong rmo_len;
+    ulong rma_base;
+    ulong rma_size;
 
     /* This is regular memory, only available thru translataion */
     ulong logical_base_pfn;
     ulong logical_end_pfn;
-
-    /* This is an area dedicated for cross-domain and hypervisor
-     * shared pages, only 4k PTE are allowed in this region.  Since
-     * the domain is free to use large pages elsewhere is must be in
-     * its own segment on machines that do not allow mixing of page
-     * sizes in the segment */
-    ulong shared_base_pfn;
-    ulong shared_nr_pfn;
-    ulong *shared_pfn2mfn;
-
-    /* since most kernels map all of accessible memory in the kernel's
-     * address space, pages in the shared region that have no real
-     * page assigned get mapped to thsi dummy page.  We should, every
-     * once in a while, check and make sure that this page stays clear
-     * becuase bad thing can happen if the domain actually uses it. */
-    ulong shared_dummy_pfn;
 
     /* I/O-port access bitmap mask. */
     u8 *iobmp_mask;       /* Address of IO bitmap mask, or NULL.      */
@@ -109,4 +93,16 @@ extern void load_float(struct vcpu *);
 
 #define arch_event_deliverable (!!((guest_cpu_user_regs())->msr & MSR_EE))
 
+#define RMA_SHARED_INFO 1
+#define RMA_START_INFO 2
+#define RMA_LAST_DOM0 2
+/* these are not used for dom0 so they should be last */
+#define RMA_CONSOLE 3
+#define RMA_LAST_DOMU 3
+
+static inline ulong rma_addr(struct arch_domain *d, int type)
+{
+    return d->rma_size - (type * PAGE_SIZE);
+}
+
 #endif
diff -r 2389fd7700c5c9d251d0ce8f666d01c48cb13f55 -r 
1bdd8ec03da55dd5d7a802a147d9e77d06d3a244 xen/include/asm-ppc/mm.h
--- a/xen/include/asm-ppc/mm.h  Thu Mar 30 18:38:23 2006 -0500
+++ b/xen/include/asm-ppc/mm.h  Thu Mar 30 18:40:26 2006 -0500
@@ -195,10 +195,9 @@ extern int update_grant_va_mapping(unsig
 
 extern void put_page_type(struct page_info *page);
 
-#define PFN_TYPE_RMO 0
+#define PFN_TYPE_RMA 0
 #define PFN_TYPE_LOGICAL 1
-#define PFN_TYPE_SHARED 2
-#define PFN_TYPE_IO 3
+#define PFN_TYPE_IO 2
 extern ulong pfn2mfn(struct domain *d, long mfn, int *type);
 
 /* Arch-specific portion of memory_op hypercall. */



_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel

<Prev in Thread] Current Thread [Next in Thread>
  • [XenPPC] [pushed] [ppc] Move shared pages into RMA, jimix <=