Following code includes assembler versions of clear_page_cacheable(), by
Xenidis,
copy_page(), and copy_page_cacheable(). The 'cacheable' versions use 'dcbz' for
clearing cache lines; the target page is assumed to be cacheable.
This code has been debugged with a small application program on JS21. Also code
has been incorporated into a xen tree and runs on JS21 (though copy_page() is
not
being used).
note: in some documentation, dcbz is for 32 byte cache lines, dcbzl for 128
However, the crossbuild assembler does not recognize dcbzl. The native assembler
(for building the test application) would accept either dcbz or dcbzl, and in
either
case the 128 byte cache line was cleared.
note2: in page_alloc.c, one of the three clear_page() calls breaks the system
when changed to clear_page_cacheable().
diff -r 326e6736d92b xen/include/asm-powerpc/page.h
--- a/xen/include/asm-powerpc/page.h Mon Aug 21 10:04:37 2006 -0400
+++ b/xen/include/asm-powerpc/page.h Fri Aug 25 17:05:22 2006 -0400
@@ -70,8 +70,67 @@ typedef struct { unsigned long l1_lo; }
#define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
#define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
+
extern void clear_page(void *p);
-extern void copy_page(void *dp, void *sp);
+
+static __inline__ void copy_page(void *dp, void *sp)
+{
+ unsigned long dwords, dword_size;
+
+ dword_size = 8;
+ dwords = (PAGE_SIZE / dword_size) - 1;
+
+ __asm__ __volatile__(
+ "mtctr %2 # copy_page\n\
+ ld %2,0(%1)\n\
+ std %2,0(%0)\n\
+1: ldu %2,8(%1)\n\
+ stdu %2,8(%0)\n\
+ bdnz 1b"
+ : /* no result */
+ : "r" (dp), "r" (sp), "r" (dwords)
+ : "%ctr", "memory");
+}
+
+/* assumes page, *addr, is cacheable */
+static __inline__ void clear_page_cacheable(void *addr)
+{
+ unsigned long lines, line_size;
+
+ line_size = CACHE_LINE_SIZE;
+ lines = PAGE_SIZE / CACHE_LINE_SIZE;
+
+ __asm__ __volatile__(
+ "mtctr %1 # clear_page\n\
+1: dcbz 0,%0\n\
+ add %0,%0,%3\n\
+ bdnz 1b"
+ : "=r" (addr)
+ : "r" (lines), "0" (addr), "r" (line_size)
+ : "%ctr", "memory");
+}
+
+/* assumes destination page, *dp, is cacheable */
+static __inline__ void copy_page_cacheable(void *dp, void *sp)
+{
+ unsigned long dwords, dword_size;
+
+ dword_size = 8;
+ dwords = (PAGE_SIZE / dword_size) - 1;
+
+ clear_page_cacheable(dp);
+
+ __asm__ __volatile__(
+ "mtctr %2 # copy_page\n\
+ ld %2,0(%1)\n\
+ std %2,0(%0)\n\
+1: ldu %2,8(%1)\n\
+ stdu %2,8(%0)\n\
+ bdnz 1b"
+ : /* no result */
+ : "r" (dp), "r" (sp), "r" (dwords)
+ : "%ctr", "memory");
+}
#define linear_pg_table linear_l1_table
diff -r 326e6736d92b xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c Mon Aug 21 10:04:37 2006 -0400
+++ b/xen/arch/powerpc/mm.c Fri Aug 25 17:08:02 2006 -0400
@@ -97,16 +97,6 @@ void clear_page(void *page)
}
}
-extern void copy_page(void *dp, void *sp)
-{
- if (on_mambo()) {
- extern void *mambo_memcpy(void *,const void *,__kernel_size_t);
- mambo_memcpy(dp, sp, PAGE_SIZE);
- } else {
- memcpy(dp, sp, PAGE_SIZE);
- }
-}
-
ulong pfn2mfn(struct domain *d, long pfn, int *type)
{
ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
diff -r 326e6736d92b xen/common/page_alloc.c
--- a/xen/common/page_alloc.c Mon Aug 21 10:04:37 2006 -0400
+++ b/xen/common/page_alloc.c Fri Aug 25 17:09:12 2006 -0400
@@ -412,13 +412,13 @@ void scrub_heap_pages(void)
{
p = page_to_virt(mfn_to_page(pfn));
memguard_unguard_range(p, PAGE_SIZE);
- clear_page(p);
+ clear_page_cacheable(p);
memguard_guard_range(p, PAGE_SIZE);
}
else
{
p = map_domain_page(pfn);
- clear_page(p);
+ clear_page_cacheable(p);
unmap_domain_page(p);
}
}
@@ -794,7 +794,7 @@ static void page_scrub_softirq(void)
pg = list_entry(ent, struct page_info, list);
ent = ent->prev;
p = map_domain_page(page_to_mfn(pg));
- clear_page(p);
+ clear_page(p); /* some pages not
cacheable? */
unmap_domain_page(p);
free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, 0);
}
diff -r 326e6736d92b xen/arch/powerpc/domain.c
--- a/xen/arch/powerpc/domain.c Mon Aug 21 10:04:37 2006 -0400
+++ b/xen/arch/powerpc/domain.c Fri Aug 25 17:07:58 2006 -0400
@@ -79,7 +79,7 @@ int arch_domain_create(struct domain *d)
if (d->domain_id == IDLE_DOMAIN_ID) {
d->shared_info = (void *)alloc_xenheap_page();
- clear_page(d->shared_info);
+ clear_page_cacheable(d->shared_info);
return 0;
}
_______________________________________________
Xen-ppc-devel mailing list
Xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ppc-devel
|