[IA64] map/not-map correct PAL CODE address. So far PAL code is always pinned down by itr[IA64_TR_PALCODE]. However the rule was changed such that the area is pinned down right before firmware call by set_one_rr_efi(). So don't pin down in other cases. diff -r 18d01f661278 xen/arch/ia64/linux-xen/efi.c --- a/xen/arch/ia64/linux-xen/efi.c Fri Jul 11 16:29:35 2008 +0900 +++ b/xen/arch/ia64/linux-xen/efi.c Fri Jul 11 16:33:13 2008 +0900 @@ -424,7 +424,11 @@ md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); #endif +#ifdef XEN + return __va_efi(md->phys_addr); +#else return __va(md->phys_addr); +#endif } printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", __FUNCTION__); @@ -447,7 +451,11 @@ efi_map_pal_code (void) { #ifdef XEN - u64 psr; + /* + * don't map PAL code. + * PAL code is mapped by set_one_rr_efi() right before + * firmware call. + */ (void)efi_get_pal_addr(); #else void *pal_vaddr = efi_get_pal_addr (); @@ -455,7 +463,6 @@ if (!pal_vaddr) return; -#endif /* * Cannot write to CRx with PSR.ic=1 @@ -466,6 +473,7 @@ IA64_GRANULE_SHIFT); ia64_set_psr(psr); /* restore psr */ ia64_srlz_i(); +#endif } void __init diff -r 18d01f661278 xen/arch/ia64/linux-xen/mca_asm.S --- a/xen/arch/ia64/linux-xen/mca_asm.S Fri Jul 11 16:29:35 2008 +0900 +++ b/xen/arch/ia64/linux-xen/mca_asm.S Fri Jul 11 16:33:13 2008 +0900 @@ -466,6 +466,7 @@ ;; srlz.d ;; +#ifndef XEN // 3. Reload ITR for PAL code. GET_THIS_PADDR(r2, ia64_mca_pal_pte) ;; @@ -484,6 +485,8 @@ ;; srlz.i ;; +#endif + // 4. Reload DTR for stack. #ifdef XEN // Kernel registers are saved in a per_cpu cpu_kr_ia64_t diff -r 18d01f661278 xen/arch/ia64/vmx/vmx_entry.S --- a/xen/arch/ia64/vmx/vmx_entry.S Fri Jul 11 16:29:35 2008 +0900 +++ b/xen/arch/ia64/vmx/vmx_entry.S Fri Jul 11 16:33:13 2008 +0900 @@ -598,7 +598,7 @@ /* * in0: new rr7 * in1: virtual address of guest_vhpt - * in2: virtual address of pal code segment + * in2: virtual addres of guest shared_info * r8: will contain old rid value */ @@ -612,7 +612,7 @@ GLOBAL_ENTRY(__vmx_switch_rr7) // not sure this unwind statement is correct... .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1) - alloc loc1 = ar.pfs, 4, 8, 0, 0 + alloc loc1 = ar.pfs, 4, 7, 0, 0 1:{ mov r28 = in0 // copy procedure index mov r8 = ip // save ip to compute branch @@ -624,12 +624,9 @@ tpa loc2 = loc2 // get physical address of per cpu date tpa r3 = r8 // get physical address of ip dep loc5 = 0,in1,60,4 // get physical address of guest_vhpt - dep loc6 = 0,in2,60,4 // get physical address of pal code - dep loc7 = 0,in3,60,4 // get physical address of privregs + dep loc6 = 0,in2,60,4 // get physical address of privregs ;; dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT - // mask granule shift - dep loc7 = 0,loc7,0,IA64_GRANULE_SHIFT // mask granule shift mov loc4 = psr // save psr ;; @@ -726,46 +723,31 @@ ;; .vhpt_overlaps: - // re-pin mappings for PAL code section - mov r24=IA64_TR_PALCODE - or loc6 = r25,loc6 // construct PA | page properties - mov r23 = IA64_GRANULE_SHIFT<<2 - ;; - ptr.i in2,r23 - ;; - mov cr.itir=r23 - mov cr.ifa=in2 - ;; - itr.i itr[r24]=loc6 // wire in new mapping... - ;; - // r16, r19, r20 are used by // ia64_switch_mode_phys()/ia64_switch_mode_virt() // re-pin mappings for privregs // r21 = (current physical addr) & (IA64_GRANULE_SIZE - 1) // r17 = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1) - // loc6 = (((pal phys addr) & (IA64_GRANULE_SIZE - 1) << 2)) | PAGE_KERNEL - // loc7 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1) - cmp.ne.unc p7,p0=r21,loc7 // check overlap with current stack + // loc6 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1) + cmp.ne.unc p7,p0=r21,loc6 // check overlap with current stack ;; -(p7) cmp.ne.unc p8,p0=r17,loc7 // check overlap with guest_vhpt +(p7) cmp.ne.unc p8,p0=r17,loc6 // check overlap with guest_vhpt ;; - // loc7 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | PAGE_KERNEL - or loc7 = r25,loc7 // construct PA | page properties + // loc6 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | PAGE_KERNEL + or loc6 = r25,loc6 // construct PA | page properties ;; - cmp.ne p9,p0=loc6,loc7 mov r22=IA64_TR_VPD mov r24=IA64_TR_MAPPED_REGS mov r23=IA64_GRANULE_SHIFT<<2 ;; -(p9) ptr.i in3,r23 -(p8) ptr.d in3,r23 + ptr.i in2,r23 +(p8) ptr.d in2,r23 mov cr.itir=r23 - mov cr.ifa=in3 + mov cr.ifa=in2 ;; -(p9) itr.i itr[r22]=loc7 // wire in new mapping... + itr.i itr[r22]=loc6 // wire in new mapping... ;; -(p8) itr.d dtr[r24]=loc7 // wire in new mapping... +(p8) itr.d dtr[r24]=loc6 // wire in new mapping... ;; // done, switch back to virtual and return diff -r 18d01f661278 xen/arch/ia64/vmx/vmx_vcpu.c --- a/xen/arch/ia64/vmx/vmx_vcpu.c Fri Jul 11 16:29:35 2008 +0900 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c Fri Jul 11 16:33:13 2008 +0900 @@ -201,7 +201,7 @@ { __get_cpu_var(inserted_vhpt) = (unsigned long)guest_vhpt; __get_cpu_var(inserted_mapped_regs) = (unsigned long)shared_arch_info; - __vmx_switch_rr7(rid, guest_vhpt, pal_vaddr, shared_arch_info); + __vmx_switch_rr7(rid, guest_vhpt, shared_arch_info); } IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val) diff -r 18d01f661278 xen/arch/ia64/xen/xenasm.S --- a/xen/arch/ia64/xen/xenasm.S Fri Jul 11 16:29:35 2008 +0900 +++ b/xen/arch/ia64/xen/xenasm.S Fri Jul 11 16:33:13 2008 +0900 @@ -34,12 +34,13 @@ // unsigned long va_vhpt) /* in4 */ //Local usage: // loc0=rp, loc1=ar.pfs, loc2=percpu_paddr, loc3=psr, loc4=ar.rse -// loc5=pal_vaddr, loc6=xen_paddr, loc7=shared_archinfo_paddr, +// loc5=shared_archinfo_paddr, loc6=xen_paddr, // r16, r19, r20 are used by ia64_switch_mode_{phys, virt}() +// loc5 is unused. GLOBAL_ENTRY(ia64_new_rr7) // FIXME? not sure this unwind statement is correct... .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1) - alloc loc1 = ar.pfs, 5, 8, 0, 0 + alloc loc1 = ar.pfs, 5, 7, 0, 0 movl loc2=PERCPU_ADDR 1: { mov loc3 = psr // save psr @@ -51,7 +52,7 @@ tpa in1=in1 // grab shared_info BEFORE changing rr7 adds r8 = 1f-1b,r8 // calculate return address for call ;; - tpa loc7=in2 // grab arch_vcpu_info BEFORE chg rr7 + tpa loc5=in2 // grab arch_vcpu_info BEFORE chg rr7 movl r17=PSR_BITS_TO_SET mov loc4=ar.rsc // save RSE configuration movl r16=PSR_BITS_TO_CLEAR @@ -60,10 +61,7 @@ mov ar.rsc=0 // put RSE in enforced lazy, LE mode or loc3=loc3,r17 // add in psr the bits to set ;; - movl loc5=pal_vaddr // get pal_vaddr - ;; - ld8 loc5=[loc5] // read pal_vaddr - ;; + andcm r16=loc3,r16 // removes bits to clear from psr dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr br.call.sptk.many rp=ia64_switch_mode_phys @@ -163,24 +161,12 @@ add r22=r22,in3 ;; ptr.d r22,r24 - or r23=loc7,r25 // construct PA | page properties + or r23=loc5,r25 // construct PA | page properties mov cr.itir=r24 mov cr.ifa=r22 mov r21=IA64_TR_MAPPED_REGS ;; itr.d dtr[r21]=r23 // wire in new mapping... - - // Purge/insert PAL TR - mov r24=IA64_TR_PALCODE - mov r23=IA64_GRANULE_SHIFT<<2 - dep r25=0,loc5,60,4 // convert pal vaddr to paddr - ;; - ptr.i loc5,r23 - or r25=r25,r26 // construct PA | page properties - mov cr.itir=r23 - mov cr.ifa=loc5 - ;; - itr.i itr[r24]=r25 // done, switch back to virtual and return mov r16=loc3 // r16= original psr @@ -352,6 +338,8 @@ mov cr.ifa=loc5 ;; itr.i itr[r24]=r25 + ;; + srlz.i // done, switch back to virtual and return mov r16=loc3 // r16= original psr diff -r 18d01f661278 xen/include/asm-ia64/vmx_vcpu.h --- a/xen/include/asm-ia64/vmx_vcpu.h Fri Jul 11 16:29:35 2008 +0900 +++ b/xen/include/asm-ia64/vmx_vcpu.h Fri Jul 11 16:33:13 2008 +0900 @@ -103,7 +103,7 @@ extern uint64_t guest_read_vivr(VCPU * vcpu); extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector); extern void vcpu_load_kernel_regs(VCPU * vcpu); -extern void __vmx_switch_rr7(unsigned long, void *, void *, void *); +extern void __vmx_switch_rr7(unsigned long, void *, void *); extern void vmx_switch_rr7(unsigned long, void *, void *, void *); extern void vmx_ia64_set_dcr(VCPU * v); extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);