This patch reduces the ammount of assembly code required
to handle EFI RID by having the three relevant page fault handlers
use some common code.
Cc: Tristan Gingold <tgingold@xxxxxxx>
Cc: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Signed-off-by: Simon Horman <horms@xxxxxxxxxxxx>
Index: xen-unstable.hg/xen/arch/ia64/xen/ivt.S
===================================================================
--- xen-unstable.hg.orig/xen/arch/ia64/xen/ivt.S 2007-10-29
09:40:23.000000000 +0900
+++ xen-unstable.hg/xen/arch/ia64/xen/ivt.S 2007-10-29 11:18:24.000000000
+0900
@@ -115,36 +115,17 @@ ENTRY(itlb_miss)
DBG_FAULT(1)
mov r16 = cr.ifa
mov r31 = pr
- ;;
- extr.u r17=r16,59,5
- ;;
/* If address belongs to VMM, go to alt tlb handler */
- cmp.eq p6,p0=0x1e,r17
-(p6) br.cond.spnt late_alt_itlb_miss
-
- // If it is an EFI address then must have XEN_EFI_RID set
- // And if that is true, go to alt tlb handler
- // (r17 == 0x18 && rr[6] == XEN_EFI_RID) ||
- // (r17 == 0x1c && rr[7] == XEN_EFI_RID)
- mov r23=6
- mov r24=7
+ movl r26=itlb_miss_identity_map
+ movl r27=fast_tlb_miss_reflect
;;
- mov r23=rr[r23]
- mov r24=rr[r24]
+ br.cond.spnt branch_on_xen_memory
;;
- mov r25=XEN_EFI_RID
- cmp.eq p8,p0=0x18,r17 // 0xc...
- cmp.eq p9,p0=0x1c,r17 // 0xe...
- ;;
- cmp.ne.and p8,p0=r25,r23 // rr[6] == XEN_EFI_RID
- cmp.ne.and p9,p0=r25,r24 // rr[7] == XEN_EFI_RID
- ;;
-(p8) br.cond.spnt fast_tlb_miss_reflect
-(p9) br.cond.spnt fast_tlb_miss_reflect
-
- // EFI PAGE size is IA64_GRANULE_SIZE
- // itir's key should be 0, as should the reserved space
- // thus we can just set itir = (IA64_GRANULE_SHIFT << 2)
+itlb_miss_identity_map:
+ /* EFI PAGE size is IA64_GRANULE_SIZE
+ * itir's key should be 0, as should the reserved space
+ * thus we can just set itir = (IA64_GRANULE_SHIFT << 2)
+ */
movl r20=IA64_GRANULE_SHIFT
;;
shl r20=r20,2
@@ -152,48 +133,6 @@ ENTRY(itlb_miss)
mov cr.itir=r20
;;
br.cond.sptk late_alt_itlb_miss
-
-#if 0
- mov r21=cr.ipsr
- ;;
- extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
- ;;
- cmp.ne p8,p0=r0,r23 // psr.cpl != 0
- ;;
-(p8) br.cond.spnt page_fault
- ;;
-
- // EFI PAGE size is IA64_GRANULE_SIZE
- // itir's key should be 0, as should the reserved space
- // thus we can just set itir = (IA64_GRANULE_SHIFT << 2)
- movl r20=IA64_GRANULE_SHIFT
- ;;
- shl r20=r20,2
- ;;
- mov cr.itir=r20
- ;;
-
- movl r17=PAGE_KERNEL
- movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- ;;
- and r19=r19,r16 // clear ed, reserved bits, and PTE ctrl bits
- ;;
- or r19=r17,r19 // insert PTE control bits into r19
- ;;
-
- cmp.ne p8,p0=r0,r18 // Xen UC bit set
- ;;
- cmp.eq.or p8,p0=0x18,r22 // Region 6 is UC for EFI
- ;;
-(p8) dep r19=-1,r19,4,1 // set bit 4 (uncached) if access to UC area
- ;;
-
- itc.i r19 // insert the TLB entry
- mov pr=r31,-1
- rfi
- ;;
-#endif
-
END(itlb_miss)
.org ia64_ivt+0x0800
@@ -203,36 +142,10 @@ ENTRY(dtlb_miss)
DBG_FAULT(2)
mov r16=cr.ifa // get virtual address
mov r31=pr
+ movl r26=late_alt_dtlb_miss
+ movl r27=fast_tlb_miss_reflect
;;
- extr.u r17=r16,59,5
- ;;
- /* If address belongs to VMM, go to alt tlb handler */
- cmp.eq p6,p0=0x1e,r17
-(p6) br.cond.spnt late_alt_dtlb_miss
-
- // If it is an EFI address then must have XEN_EFI_RID set
- // And if that is true, go to alt tlb handler
- // (r17 == 0x18 && rr[6] == XEN_EFI_RID) ||
- // (r17 == 0x1c && rr[7] == XEN_EFI_RID)
- mov r23=6
- mov r24=7
- ;;
- mov r23=rr[r23]
- mov r24=rr[r24]
- ;;
- mov r25=XEN_EFI_RID
- cmp.eq p8,p0=0x18,r17 // 0xc...
- cmp.eq p9,p0=0x1c,r17 // 0xe...
- ;;
- cmp.eq.and p8,p0=r25,r23 // rr[6] == XEN_EFI_RID
- cmp.eq.and p9,p0=r25,r24 // rr[7] == XEN_EFI_RID
- ;;
-(p8) br.cond.spnt late_alt_dtlb_miss
-(p9) br.cond.spnt late_alt_dtlb_miss
- ;;
-
- br.cond.sptk fast_tlb_miss_reflect
- ;;
+ br.cond.spnt branch_on_xen_memory
END(dtlb_miss)
.org ia64_ivt+0x0c00
@@ -266,6 +179,35 @@ late_alt_itlb_miss:
rfi
END(alt_itlb_miss)
+// If it is not a Xen address, handle it via page_fault.
+// !( ((r22 == 0x18 || r22 == 0x1c) && rr[6] == XEN_EFI_RID) ||
+// r22 == 0x1e )
+// Note that rr[6] == XEN_EFI_RID implies rr[7] == XEN_EFI_RID
+//
+// in: r26 = Branch if a XEN address
+// r27 = Branch otherwise
+// out: r22 = address << 60
+// r23 = rr[6]
+// r24 = XEN_EFI_RID
+branch_on_xen_memory:
+ extr.u r22=r16,59,5
+ mov r25=XEN_EFI_RID
+ ;;
+ cmp.eq p8,p0=0x18,r22 // address == 0xc...
+ mov r23=6
+ ;;
+ cmp.eq.or p8,p0=0x1c,r22 // address == 0xe...
+ mov r23=rr[r23]
+ ;;
+ cmp.eq.and p8,p0=r25,r23 // rr[6] == XEN_EFI_RID
+ mov b1=r26
+ ;;
+ cmp.eq.or p8,p0=0x1e,r22 // address == 0xf...
+ mov b2=r27
+ ;;
+(p8) br.cond.spnt b1
+ br.cond.spnt b2
+
.org ia64_ivt+0x1000
//////////////////////////////////////////////////////////////////////////
// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
@@ -299,29 +241,11 @@ late_alt_dtlb_miss:
cmp.eq p8,p0=((VIRT_FRAME_TABLE_ADDR>>56)&0xff)-0x100,r22
(p8) br.cond.sptk frametable_miss ;;
#endif
- // If it is not a Xen address, handle it via page_fault.
- // If it is not a Xen address, handle it via page_fault.
- // !( ((r22 == 0x18 || r22 == 0x1c) && rr[6] == XEN_EFI_RID) ||
- // r22 == 0x1e )
- // Note that rr[6] == XEN_EFI_RID implies rr[7] == XEN_EFI_RID
- extr.u r22=r16,59,5
+ movl r26=alt_dtlb_miss_identity_map
+ movl r27=page_fault
;;
dep r20=0,r20,IA64_ITIR_KEY,IA64_ITIR_KEY_LEN // clear the key
- mov r23=6
- ;;
- mov r23=rr[r23]
- ;;
- mov r25=XEN_EFI_RID
- cmp.eq p8,p0=0x18,r22 // 0xc...
- ;;
- cmp.eq.or p8,p0=0x1c,r22 // 0xe...
- ;;
- cmp.eq.and p8,p0=r25,r23 // rr[6] == XEN_EFI_RID
- ;;
- cmp.eq.or p8,p0=0x1e,r22 // 0xf...
- ;;
-(p8) br.cond.spnt alt_dtlb_miss_identity_map
- br.cond.spnt page_fault
+ br.cond.spnt branch_on_xen_memory
;;
alt_dtlb_miss_identity_map:
dep r21=-1,r21,IA64_PSR_ED_BIT,1
--
--
Horms
H: http://www.vergenet.net/~horms/
W: http://www.valinux.co.jp/en/
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|