# HG changeset patch
# User Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
# Date 1216696502 -32400
# Node ID 7da7b53b21397951103fe04bd27ce158b6919c8e
# Parent 54060aec0dc1cd06202293eceb1615dc7e041c3e
[IA64] kexec: Only map PAL when making EFI, PAL or SAL calls
Move PAL code from the Xen identity mapped region to the
EFI identity mapped region, which overlaps with guest virtual space.
Make sure that PAL memory is only pinned into the TLB when making
EFI, PAL or SAL calls.
This seems to be nice as it provides a symmetrical approach to
mapping an unmapping pal code.
However it would be just as safe, and arguably simpler just to map
the PAL code (once?) when the EFI RR is active - for instance very
early on in boot, or when calling XEN_EFI_RR_ENTER. In other words,
unpining is XEN_EFI_RR_LEAVE shouldn't be neccessary, as the EFI RR
should protect the memory from unwanted accesses by guests (or
the hypevisor for that matter).
This patch is mostly the work of Yamahata-san.
Cc: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Signed-off-by: Simon Horman <horms@xxxxxxxxxxxx>
---
xen/arch/ia64/linux-xen/efi.c | 47 +++++++++++++++++----
xen/arch/ia64/linux-xen/mca_asm.S | 3 +
xen/arch/ia64/linux-xen/smpboot.c | 2
xen/arch/ia64/vmx/vmx_entry.S | 54 ++++++++-----------------
xen/arch/ia64/vmx/vmx_phy_mode.c | 4 -
xen/arch/ia64/vmx/vmx_vcpu.c | 6 +-
xen/arch/ia64/xen/xenasm.S | 62 +++++++----------------------
xen/include/asm-ia64/linux-xen/linux/efi.h | 27 +++++++++---
xen/include/asm-ia64/vmx_vcpu.h | 4 -
9 files changed, 104 insertions(+), 105 deletions(-)
diff -r 54060aec0dc1 -r 7da7b53b2139 xen/arch/ia64/linux-xen/efi.c
--- a/xen/arch/ia64/linux-xen/efi.c Tue Jul 22 12:15:02 2008 +0900
+++ b/xen/arch/ia64/linux-xen/efi.c Tue Jul 22 12:15:02 2008 +0900
@@ -424,7 +424,7 @@ efi_get_pal_addr (void)
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
#endif
- return __va(md->phys_addr);
+ return __va_efi(md->phys_addr);
}
printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
__FUNCTION__);
@@ -432,7 +432,7 @@ efi_get_pal_addr (void)
}
#ifdef XEN
-void *pal_vaddr = 0;
+static void *pal_vaddr = 0;
void *
efi_get_pal_addr(void)
@@ -443,24 +443,51 @@ efi_get_pal_addr(void)
}
#endif
+#ifdef XEN
+static void
+__efi_unmap_pal_code (void *pal_vaddr)
+{
+ ia64_ptr(0x1, GRANULEROUNDDOWN((unsigned long)pal_vaddr),
+ IA64_GRANULE_SHIFT);
+}
+
void
-efi_map_pal_code (void)
-{
-#ifdef XEN
- u64 psr;
- (void)efi_get_pal_addr();
-#else
+efi_unmap_pal_code (void)
+{
void *pal_vaddr = efi_get_pal_addr ();
u64 psr;
if (!pal_vaddr)
return;
-#endif
/*
* Cannot write to CRx with PSR.ic=1
*/
psr = ia64_clear_ic();
+ __efi_unmap_pal_code(pal_vaddr);
+ ia64_set_psr(psr); /* restore psr */
+ ia64_srlz_i();
+}
+#endif
+
+void
+efi_map_pal_code (void)
+{
+ void *pal_vaddr = efi_get_pal_addr ();
+ u64 psr;
+
+ if (!pal_vaddr)
+ return;
+
+ /*
+ * Cannot write to CRx with PSR.ic=1
+ */
+ psr = ia64_clear_ic();
+#ifdef XEN
+ /* pal_vaddr must be unpinned before pinning
+ * This is needed in the case of a nested EFI, PAL or SAL call */
+ __efi_unmap_pal_code(pal_vaddr);
+#endif
ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long)
pal_vaddr),
pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
IA64_GRANULE_SHIFT);
@@ -594,7 +621,9 @@ efi_init (void)
}
#endif
+#ifndef XEN
efi_map_pal_code();
+#endif
efi_enter_virtual_mode();
}
diff -r 54060aec0dc1 -r 7da7b53b2139 xen/arch/ia64/linux-xen/mca_asm.S
--- a/xen/arch/ia64/linux-xen/mca_asm.S Tue Jul 22 12:15:02 2008 +0900
+++ b/xen/arch/ia64/linux-xen/mca_asm.S Tue Jul 22 12:15:02 2008 +0900
@@ -473,6 +473,7 @@ ia64_reload_tr:
;;
srlz.d
;;
+#ifndef XEN
// 3. Reload ITR for PAL code.
GET_THIS_PADDR(r2, ia64_mca_pal_pte)
;;
@@ -491,6 +492,8 @@ ia64_reload_tr:
;;
srlz.i
;;
+#endif
+
// 4. Reload DTR for stack.
#ifdef XEN
// Kernel registers are saved in a per_cpu cpu_kr_ia64_t
diff -r 54060aec0dc1 -r 7da7b53b2139 xen/arch/ia64/linux-xen/smpboot.c
--- a/xen/arch/ia64/linux-xen/smpboot.c Tue Jul 22 12:15:02 2008 +0900
+++ b/xen/arch/ia64/linux-xen/smpboot.c Tue Jul 22 12:15:02 2008 +0900
@@ -438,7 +438,9 @@ start_secondary (void *unused)
/* Early console may use I/O ports */
ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
Dprintk("start_secondary: starting CPU 0x%x\n",
hard_smp_processor_id());
+#ifndef XEN
efi_map_pal_code();
+#endif
cpu_init();
smp_callin();
diff -r 54060aec0dc1 -r 7da7b53b2139 xen/arch/ia64/vmx/vmx_entry.S
--- a/xen/arch/ia64/vmx/vmx_entry.S Tue Jul 22 12:15:02 2008 +0900
+++ b/xen/arch/ia64/vmx/vmx_entry.S Tue Jul 22 12:15:02 2008 +0900
@@ -598,7 +598,7 @@ END(ia64_leave_hypercall)
/*
* in0: new rr7
* in1: virtual address of guest_vhpt
- * in2: virtual address of pal code segment
+ * in2: virtual addres of guest shared_info
* r8: will contain old rid value
*/
@@ -611,7 +611,7 @@ GLOBAL_ENTRY(__vmx_switch_rr7)
GLOBAL_ENTRY(__vmx_switch_rr7)
// not sure this unwind statement is correct...
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
- alloc loc1 = ar.pfs, 4, 8, 0, 0
+ alloc loc1 = ar.pfs, 4, 7, 0, 0
1:{
mov r28 = in0 // copy procedure index
mov r8 = ip // save ip to compute branch
@@ -623,12 +623,9 @@ 1:{
tpa loc2 = loc2 // get physical address of per cpu date
tpa r3 = r8 // get physical address of ip
dep loc5 = 0,in1,60,4 // get physical address of guest_vhpt
- dep loc6 = 0,in2,60,4 // get physical address of pal code
- dep loc7 = 0,in3,60,4 // get physical address of privregs
+ dep loc6 = 0,in2,60,4 // get physical address of privregs
;;
dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT
- // mask granule shift
- dep loc7 = 0,loc7,0,IA64_GRANULE_SHIFT
// mask granule shift
mov loc4 = psr // save psr
;;
@@ -725,46 +722,31 @@ 1:
;;
.vhpt_overlaps:
- // re-pin mappings for PAL code section
- mov r24=IA64_TR_PALCODE
- or loc6 = r25,loc6 // construct PA | page properties
- mov r23 = IA64_GRANULE_SHIFT<<2
- ;;
- ptr.i in2,r23
- ;;
- mov cr.itir=r23
- mov cr.ifa=in2
- ;;
- itr.i itr[r24]=loc6 // wire in new mapping...
- ;;
-
// r16, r19, r20 are used by
// ia64_switch_mode_phys()/ia64_switch_mode_virt()
// re-pin mappings for privregs
// r21 = (current physical addr) & (IA64_GRANULE_SIZE - 1)
// r17 = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)
- // loc6 = (((pal phys addr) & (IA64_GRANULE_SIZE - 1) << 2)) |
PAGE_KERNEL
- // loc7 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1)
- cmp.ne.unc p7,p0=r21,loc7 // check overlap with current stack
- ;;
-(p7) cmp.ne.unc p8,p0=r17,loc7 // check overlap with guest_vhpt
- ;;
- // loc7 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) |
PAGE_KERNEL
- or loc7 = r25,loc7 // construct PA | page properties
- ;;
- cmp.ne p9,p0=loc6,loc7
+ // loc6 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1)
+ cmp.ne.unc p7,p0=r21,loc6 // check overlap with current stack
+ ;;
+(p7) cmp.ne.unc p8,p0=r17,loc6 // check overlap with guest_vhpt
+ ;;
+ // loc6 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) |
PAGE_KERNEL
+ or loc6 = r25,loc6 // construct PA | page properties
+ ;;
mov r22=IA64_TR_VPD
mov r24=IA64_TR_MAPPED_REGS
mov r23=IA64_GRANULE_SHIFT<<2
;;
-(p9) ptr.i in3,r23
-(p8) ptr.d in3,r23
+ ptr.i in2,r23
+(p8) ptr.d in2,r23
mov cr.itir=r23
- mov cr.ifa=in3
- ;;
-(p9) itr.i itr[r22]=loc7 // wire in new mapping...
- ;;
-(p8) itr.d dtr[r24]=loc7 // wire in new mapping...
+ mov cr.ifa=in2
+ ;;
+ itr.i itr[r22]=loc6 // wire in new mapping...
+ ;;
+(p8) itr.d dtr[r24]=loc6 // wire in new mapping...
;;
// done, switch back to virtual and return
diff -r 54060aec0dc1 -r 7da7b53b2139 xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c Tue Jul 22 12:15:02 2008 +0900
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c Tue Jul 22 12:15:02 2008 +0900
@@ -132,8 +132,6 @@ vmx_init_all_rr(VCPU *vcpu)
VMX(vcpu, vrr[VRN6]) = 0x38;
VMX(vcpu, vrr[VRN7]) = 0x738;
}
-
-extern void * pal_vaddr;
void
vmx_load_all_rr(VCPU *vcpu)
@@ -173,7 +171,7 @@ vmx_load_all_rr(VCPU *vcpu)
ia64_dv_serialize_data();
vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
(void *)vcpu->arch.vhpt.hash,
- pal_vaddr, vcpu->arch.privregs);
+ vcpu->arch.privregs);
ia64_set_pta(VMX(vcpu, mpta));
vmx_ia64_set_dcr(vcpu);
diff -r 54060aec0dc1 -r 7da7b53b2139 xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c Tue Jul 22 12:15:02 2008 +0900
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c Tue Jul 22 12:15:02 2008 +0900
@@ -197,12 +197,12 @@ void vmx_vcpu_set_rr_fast(VCPU *vcpu, u6
}
void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
- void *pal_vaddr, void *shared_arch_info)
+ void *shared_arch_info)
{
__get_cpu_var(inserted_vhpt) = (unsigned long)guest_vhpt;
__get_cpu_var(inserted_vpd) = (unsigned long)shared_arch_info;
__get_cpu_var(inserted_mapped_regs) = (unsigned long)shared_arch_info;
- __vmx_switch_rr7(rid, guest_vhpt, pal_vaddr, shared_arch_info);
+ __vmx_switch_rr7(rid, guest_vhpt, shared_arch_info);
}
IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
@@ -219,7 +219,7 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u6
case VRN7:
if (likely(vcpu == current))
vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash,
- pal_vaddr, vcpu->arch.privregs);
+ vcpu->arch.privregs);
break;
case VRN4:
rrval = vrrtomrr(vcpu,val);
diff -r 54060aec0dc1 -r 7da7b53b2139 xen/arch/ia64/xen/xenasm.S
--- a/xen/arch/ia64/xen/xenasm.S Tue Jul 22 12:15:02 2008 +0900
+++ b/xen/arch/ia64/xen/xenasm.S Tue Jul 22 12:15:02 2008 +0900
@@ -34,12 +34,13 @@
// unsigned long va_vhpt) /* in4 */
//Local usage:
// loc0=rp, loc1=ar.pfs, loc2=percpu_paddr, loc3=psr, loc4=ar.rse
-// loc5=pal_vaddr, loc6=xen_paddr, loc7=shared_archinfo_paddr,
+// loc5=shared_archinfo_paddr, loc6=xen_paddr,
// r16, r19, r20 are used by ia64_switch_mode_{phys, virt}()
+// loc5 is unused.
GLOBAL_ENTRY(ia64_new_rr7)
// FIXME? not sure this unwind statement is correct...
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
- alloc loc1 = ar.pfs, 5, 8, 0, 0
+ alloc loc1 = ar.pfs, 5, 7, 0, 0
movl loc2=PERCPU_ADDR
1: {
mov loc3 = psr // save psr
@@ -51,7 +52,7 @@ 1: {
tpa in1=in1 // grab shared_info BEFORE changing rr7
adds r8 = 1f-1b,r8 // calculate return address for call
;;
- tpa loc7=in2 // grab arch_vcpu_info BEFORE chg rr7
+ tpa loc5=in2 // grab arch_vcpu_info BEFORE chg rr7
movl r17=PSR_BITS_TO_SET
mov loc4=ar.rsc // save RSE configuration
movl r16=PSR_BITS_TO_CLEAR
@@ -60,10 +61,7 @@ 1: {
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
or loc3=loc3,r17 // add in psr the bits to set
;;
- movl loc5=pal_vaddr // get pal_vaddr
- ;;
- ld8 loc5=[loc5] // read pal_vaddr
- ;;
+
andcm r16=loc3,r16 // removes bits to clear from psr
dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
br.call.sptk.many rp=ia64_switch_mode_phys
@@ -163,24 +161,12 @@ 1:
add r22=r22,in3
;;
ptr.d r22,r24
- or r23=loc7,r25 // construct PA | page properties
+ or r23=loc5,r25 // construct PA | page properties
mov cr.itir=r24
mov cr.ifa=r22
mov r21=IA64_TR_MAPPED_REGS
;;
itr.d dtr[r21]=r23 // wire in new mapping...
-
- // Purge/insert PAL TR
- mov r24=IA64_TR_PALCODE
- mov r23=IA64_GRANULE_SHIFT<<2
- dep r25=0,loc5,60,4 // convert pal vaddr to paddr
- ;;
- ptr.i loc5,r23
- or r25=r25,r26 // construct PA | page properties
- mov cr.itir=r23
- mov cr.ifa=loc5
- ;;
- itr.i itr[r24]=r25
// done, switch back to virtual and return
mov r16=loc3 // r16= original psr
@@ -216,7 +202,7 @@ GLOBAL_ENTRY(ia64_new_rr7_efi)
GLOBAL_ENTRY(ia64_new_rr7_efi)
// FIXME? not sure this unwind statement is correct...
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
- alloc loc1 = ar.pfs, 3, 8, 0, 0
+ alloc loc1 = ar.pfs, 3, 7, 0, 0
movl loc2=PERCPU_ADDR
1: {
mov loc3 = psr // save psr
@@ -235,17 +221,13 @@ 1: {
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
or loc3=loc3,r17 // add in psr the bits to set
;;
- movl loc5=pal_vaddr // get pal_vaddr
- ;;
- ld8 loc5=[loc5] // read pal_vaddr
- ;;
- dep loc7 = 0,in2,60,4 // get physical address of VPD
- ;;
- dep loc7 = 0,loc7,0,IA64_GRANULE_SHIFT
+ dep loc6 = 0,in2,60,4 // get physical address of VPD
+ ;;
+ dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT
// mask granule shift
;;
andcm r16=loc3,r16 // removes bits to clear from psr
- dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
+ dep loc5=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
br.call.sptk.many rp=ia64_switch_mode_phys
1:
movl r26=PAGE_KERNEL
@@ -271,7 +253,7 @@ 1:
mov r16=IA64_TR_KERNEL
mov cr.itir=r24
mov cr.ifa=r17
- or r18=loc6,r26
+ or r18=loc5,r26
;;
itr.i itr[r16]=r18
;;
@@ -324,7 +306,7 @@ ia64_new_rr7_efi_percpu_not_mapped:
// VPD
cmp.eq p7,p0=r0,in2
(p7) br.cond.sptk ia64_new_rr7_efi_vpd_not_mapped
- or loc7 = r26,loc7 // construct PA | page properties
+ or loc6 = r26,loc6 // construct PA | page properties
mov r22=IA64_TR_VPD
mov r24=IA64_TR_MAPPED_REGS
mov r23=IA64_GRANULE_SHIFT<<2
@@ -340,27 +322,15 @@ ia64_new_rr7_efi_percpu_not_mapped:
mov cr.itir=r23
mov cr.ifa=in2
;;
- itr.i itr[r22]=loc7
- ;;
- itr.d dtr[r24]=loc7
+ itr.i itr[r22]=loc6
+ ;;
+ itr.d dtr[r24]=loc6
;;
srlz.i
;;
srlz.d
;;
ia64_new_rr7_efi_vpd_not_mapped:
-
- // Purge/insert PAL TR
- mov r24=IA64_TR_PALCODE
- mov r23=IA64_GRANULE_SHIFT<<2
- dep r25=0,loc5,60,4 // convert pal vaddr to paddr
- ;;
- ptr.i loc5,r23
- or r25=r25,r26 // construct PA | page properties
- mov cr.itir=r23
- mov cr.ifa=loc5
- ;;
- itr.i itr[r24]=r25
// done, switch back to virtual and return
mov r16=loc3 // r16= original psr
diff -r 54060aec0dc1 -r 7da7b53b2139 xen/include/asm-ia64/linux-xen/linux/efi.h
--- a/xen/include/asm-ia64/linux-xen/linux/efi.h Tue Jul 22 12:15:02
2008 +0900
+++ b/xen/include/asm-ia64/linux-xen/linux/efi.h Tue Jul 22 12:15:02
2008 +0900
@@ -23,10 +23,6 @@
#include <asm/page.h>
#include <asm/system.h>
-
-#ifdef XEN
-extern void * pal_vaddr;
-#endif
#define EFI_SUCCESS 0
#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
@@ -302,6 +298,9 @@ extern void efi_init (void);
extern void efi_init (void);
extern void *efi_get_pal_addr (void);
extern void efi_map_pal_code (void);
+#ifdef XEN
+extern void efi_unmap_pal_code (void);
+#endif
extern void efi_map_memmap(void);
extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timespec *ts);
@@ -466,16 +465,32 @@ struct efi_generic_dev_path {
* E: bits N-53: reserved (0)
*/
+/* rr7 (and rr6) may already be set to XEN_EFI_RR, which
+ * would indicate a nested EFI, SAL or PAL call, such
+ * as from an MCA. This may have occured during a call
+ * to set_one_rr_efi(). To be safe, repin everything anyway.
+ */
+
#define XEN_EFI_RR_ENTER(rr6, rr7) do { \
rr6 = ia64_get_rr(6UL << 61); \
rr7 = ia64_get_rr(7UL << 61); \
set_one_rr_efi(6UL << 61, XEN_EFI_RR); \
set_one_rr_efi(7UL << 61, XEN_EFI_RR); \
+ efi_map_pal_code(); \
} while (0)
+/* There is no need to do anything if the saved rr7 (and rr6)
+ * is XEN_EFI_RR, as it would just switch them from XEN_EFI_RR to XEN_EFI_RR
+ * Furthermore, if this is a nested call it is important not
+ * to unpin efi_unmap_pal_code() until the outermost call is finished
+ */
+
#define XEN_EFI_RR_LEAVE(rr6, rr7) do { \
- set_one_rr_efi(6UL << 61, rr6); \
- set_one_rr_efi(7UL << 61, rr7); \
+ if (rr7 != XEN_EFI_RR) { \
+ efi_unmap_pal_code(); \
+ set_one_rr_efi(6UL << 61, rr6); \
+ set_one_rr_efi(7UL << 61, rr7); \
+ } \
} while (0)
#else
diff -r 54060aec0dc1 -r 7da7b53b2139 xen/include/asm-ia64/vmx_vcpu.h
--- a/xen/include/asm-ia64/vmx_vcpu.h Tue Jul 22 12:15:02 2008 +0900
+++ b/xen/include/asm-ia64/vmx_vcpu.h Tue Jul 22 12:15:02 2008 +0900
@@ -104,9 +104,9 @@ extern int vmx_vcpu_pend_interrupt(VCPU
extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector);
extern void vcpu_load_kernel_regs(VCPU * vcpu);
extern void __vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
- void *pal_vaddr, void *shared_arch_info);
+ void *shared_arch_info);
extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
- void *pal_vaddr, void *shared_arch_info);
+ void *shared_arch_info);
extern void vmx_ia64_set_dcr(VCPU * v);
extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
extern void vmx_asm_bsw0(void);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|