# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 96bc87dd7ca9623ecbd4bd7edd99871dba09a2b6
# Parent 4e8a64d8bd0ed165b4b3d711784fc2b416687716
[IA64] get rid of sync_split_cache
Get rid of sync_split_cache.
Use flush_icache_range and ia64_fc instead.
Signed-off-by: Tristan Gingold <tristan.gingold@xxxxxxxx>
diff -r 4e8a64d8bd0e -r 96bc87dd7ca9 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Fri Apr 14 14:13:13 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c Fri Apr 14 14:20:04 2006 -0600
@@ -339,8 +339,9 @@ int arch_set_info_guest(struct vcpu *v,
d->arch.cmdline = c->cmdline;
d->shared_info->arch = c->shared;
- /* FIXME: it is required here ? */
- sync_split_caches();
+ /* Cache synchronization seems to be done by the linux kernel
+ during mmap/unmap operation. However be conservative. */
+ domain_cache_flush (d, 1);
}
new_thread(v, regs->cr_iip, 0, 0);
@@ -784,50 +785,68 @@ static void loaddomainelfimage(struct do
copy_memory(&ehdr, (void *) image_start, sizeof(Elf_Ehdr));
for ( h = 0; h < ehdr.e_phnum; h++ ) {
- copy_memory(&phdr,elfbase + ehdr.e_phoff + (h*ehdr.e_phentsize),
- sizeof(Elf_Phdr));
- //if ( !is_loadable_phdr(phdr) )
- if ((phdr.p_type != PT_LOAD)) {
- continue;
- }
- filesz = phdr.p_filesz; memsz = phdr.p_memsz;
- elfaddr = (unsigned long) elfbase + phdr.p_offset;
- dom_mpaddr = phdr.p_paddr;
+ copy_memory(&phdr,
+ elfbase + ehdr.e_phoff + (h*ehdr.e_phentsize),
+ sizeof(Elf_Phdr));
+ if ((phdr.p_type != PT_LOAD))
+ continue;
+
+ filesz = phdr.p_filesz;
+ memsz = phdr.p_memsz;
+ elfaddr = (unsigned long) elfbase + phdr.p_offset;
+ dom_mpaddr = phdr.p_paddr;
+
//printf("p_offset: %x, size=%x\n",elfaddr,filesz);
#ifdef CONFIG_DOMAIN0_CONTIGUOUS
- if (d == dom0) {
- if (dom_mpaddr+memsz>dom0_size || dom_mpaddr+filesz>dom0_size) {
- printf("Domain0 doesn't fit in allocated space!\n");
- while(1);
+ if (d == dom0) {
+ if (dom_mpaddr+memsz>dom0_size)
+ panic("Dom0 doesn't fit in memory space!\n");
+ dom_imva = __va_ul(dom_mpaddr + dom0_start);
+ copy_memory((void *)dom_imva, (void *)elfaddr, filesz);
+ if (memsz > filesz)
+ memset((void *)dom_imva+filesz, 0,
+ memsz-filesz);
+//FIXME: This test for code seems to find a lot more than objdump -x does
+ if (phdr.p_flags & PF_X) {
+ privify_memory(dom_imva,filesz);
+ flush_icache_range (dom_imva, dom_imva+filesz);
+ }
}
- dom_imva = (unsigned long) __va(dom_mpaddr + dom0_start);
- copy_memory((void *) dom_imva, (void *) elfaddr, filesz);
- if (memsz > filesz) memset((void *) dom_imva+filesz, 0,
memsz-filesz);
+ else
+#endif
+ while (memsz > 0) {
+ p = assign_new_domain_page(d,dom_mpaddr);
+ BUG_ON (unlikely(p == NULL));
+ dom_imva = __va_ul(page_to_maddr(p));
+ if (filesz > 0) {
+ if (filesz >= PAGE_SIZE)
+ copy_memory((void *) dom_imva,
+ (void *) elfaddr,
+ PAGE_SIZE);
+ else {
+ // copy partial page
+ copy_memory((void *) dom_imva,
+ (void *) elfaddr, filesz);
+ // zero the rest of page
+ memset((void *) dom_imva+filesz, 0,
+ PAGE_SIZE-filesz);
+ }
//FIXME: This test for code seems to find a lot more than objdump -x does
- if (phdr.p_flags & PF_X) privify_memory(dom_imva,filesz);
- }
- else
-#endif
- while (memsz > 0) {
- p = assign_new_domain_page(d,dom_mpaddr);
- if (unlikely(!p)) BUG();
- dom_imva = (unsigned long) __va(page_to_maddr(p));
- if (filesz > 0) {
- if (filesz >= PAGE_SIZE)
- copy_memory((void *) dom_imva, (void *)
elfaddr, PAGE_SIZE);
- else { // copy partial page, zero the rest of page
- copy_memory((void *) dom_imva, (void *)
elfaddr, filesz);
- memset((void *) dom_imva+filesz, 0,
PAGE_SIZE-filesz);
+ if (phdr.p_flags & PF_X) {
+ privify_memory(dom_imva,PAGE_SIZE);
+ flush_icache_range(dom_imva,
+ dom_imva+PAGE_SIZE);
+ }
}
-//FIXME: This test for code seems to find a lot more than objdump -x does
- if (phdr.p_flags & PF_X)
- privify_memory(dom_imva,PAGE_SIZE);
+ else if (memsz > 0) {
+ /* always zero out entire page */
+ memset((void *) dom_imva, 0, PAGE_SIZE);
+ }
+ memsz -= PAGE_SIZE;
+ filesz -= PAGE_SIZE;
+ elfaddr += PAGE_SIZE;
+ dom_mpaddr += PAGE_SIZE;
}
- else if (memsz > 0) // always zero out entire page
- memset((void *) dom_imva, 0, PAGE_SIZE);
- memsz -= PAGE_SIZE; filesz -= PAGE_SIZE;
- elfaddr += PAGE_SIZE; dom_mpaddr += PAGE_SIZE;
- }
}
}
@@ -1086,7 +1105,6 @@ int construct_dom0(struct domain *d,
new_thread(v, pkern_entry, 0, 0);
physdev_init_dom0(d);
- sync_split_caches();
// FIXME: Hack for keyboard input
//serial_input_init();
diff -r 4e8a64d8bd0e -r 96bc87dd7ca9 xen/arch/ia64/xen/privop.c
--- a/xen/arch/ia64/xen/privop.c Fri Apr 14 14:13:13 2006 -0600
+++ b/xen/arch/ia64/xen/privop.c Fri Apr 14 14:20:04 2006 -0600
@@ -60,7 +60,9 @@ void build_hypercall_bundle(UINT64 *imva
bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst;
bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18;
- *imva++ = bundle.i64[0]; *imva = bundle.i64[1];
+ imva[0] = bundle.i64[0]; imva[1] = bundle.i64[1];
+ ia64_fc (imva);
+ ia64_fc (imva + 1);
}
void build_pal_hypercall_bundles(UINT64 *imva, UINT64 brkimm, UINT64 hypnum)
@@ -83,6 +85,8 @@ void build_pal_hypercall_bundles(UINT64
bundle.slot0 = slot_a5.inst;
imva[0] = bundle.i64[0];
imva[1] = bundle.i64[1];
+ ia64_fc (imva);
+ ia64_fc (imva + 1);
/* Copy the second bundle and patch the hypercall vector. */
bundle.i64[0] = pal_call_stub[2];
@@ -93,6 +97,8 @@ void build_pal_hypercall_bundles(UINT64
bundle.slot0 = slot_m37.inst;
imva[2] = bundle.i64[0];
imva[3] = bundle.i64[1];
+ ia64_fc (imva + 2);
+ ia64_fc (imva + 3);
}
diff -r 4e8a64d8bd0e -r 96bc87dd7ca9 xen/arch/ia64/xen/xenmisc.c
--- a/xen/arch/ia64/xen/xenmisc.c Fri Apr 14 14:13:13 2006 -0600
+++ b/xen/arch/ia64/xen/xenmisc.c Fri Apr 14 14:20:04 2006 -0600
@@ -361,26 +361,6 @@ void panic_domain(struct pt_regs *regs,
debugger_trap_immediate();
}
domain_crash_synchronous ();
-}
-
-/* FIXME: for the forseeable future, all cpu's that enable VTi have split
- * caches and all cpu's that have split caches enable VTi. This may
- * eventually be untrue though. */
-#define cpu_has_split_cache vmx_enabled
-extern unsigned int vmx_enabled;
-
-void sync_split_caches(void)
-{
- unsigned long ret, progress = 0;
-
- if (cpu_has_split_cache) {
- /* Sync d/i cache conservatively */
- ret = ia64_pal_cache_flush(4, 0, &progress, NULL);
- if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED))
- printk("PAL CACHE FLUSH failed\n");
- else printk("Sync i/d cache for guest SUCC\n");
- }
- else printk("sync_split_caches ignored for CPU with no split cache\n");
}
///////////////////////////////
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|