WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [IA64] optimize entry and exit path

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [IA64] optimize entry and exit path
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 28 Jul 2006 16:21:12 +0000
Delivery-date: Fri, 28 Jul 2006 09:29:06 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID e61bb865ec7407cd4a7a9b95485637009202a5ab
# Parent  c4af6e854010f2bd053236036e15f4067b99a418
[IA64] optimize entry and exit path

VMM save/restore r4~r7 and unat to handle virtualization fault
and mmio access, but it is not necessary for other faults to
save/restore these registers.  This patch is to save/restore
these registers only when dtlb and virtualization fault happen.

Signed-off-by: Anthony Xu <anthony.xu@xxxxxxxxx>
---
 xen/arch/ia64/vmx/vmx_entry.S    |   31 ++++++++++++++++++++-----------
 xen/arch/ia64/vmx/vmx_ivt.S      |   34 ++++++++++++++++++++++++++++------
 xen/arch/ia64/vmx/vmx_minstate.h |   27 +++++++++++++++------------
 3 files changed, 63 insertions(+), 29 deletions(-)

diff -r c4af6e854010 -r e61bb865ec74 xen/arch/ia64/vmx/vmx_entry.S
--- a/xen/arch/ia64/vmx/vmx_entry.S     Fri Jul 14 11:06:38 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_entry.S     Fri Jul 14 11:18:36 2006 -0600
@@ -163,24 +163,39 @@ END(ia64_leave_nested)
 
 
 
-GLOBAL_ENTRY(ia64_leave_hypervisor)
+GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
     PT_REGS_UNWIND_INFO(0)
     /*
      * work.need_resched etc. mustn't get changed by this CPU before it 
returns to
     ;;
      * user- or fsys-mode, hence we disable interrupts early on:
      */
+    adds r2 = PT(R4)+16,r12
+    adds r3 = PT(R5)+16,r12
+    adds r8 = PT(EML_UNAT)+16,r12
+    ;;
+    ld8 r8 = [r8]
+    ;;
+    mov ar.unat=r8
+    ;;
+    ld8.fill r4=[r2],16    //load r4
+    ld8.fill r5=[r3],16    //load r5
+    ;;
+    ld8.fill r6=[r2]    //load r6
+    ld8.fill r7=[r3]    //load r7
+    ;;
+END(ia64_leave_hypervisor_prepare)
+//fall through
+GLOBAL_ENTRY(ia64_leave_hypervisor)
+    PT_REGS_UNWIND_INFO(0)
     rsm psr.i
     ;;
     alloc loc0=ar.pfs,0,1,1,0
+    ;;
     adds out0=16,r12
-    adds r7 = PT(EML_UNAT)+16,r12
-    ;;
-    ld8 r7 = [r7]
     br.call.sptk.many b0=leave_hypervisor_tail
     ;;
     mov ar.pfs=loc0
-    mov ar.unat=r7
     adds r20=PT(PR)+16,r12
     ;;
     lfetch [r20],PT(CR_IPSR)-PT(PR)
@@ -244,12 +259,6 @@ GLOBAL_ENTRY(ia64_leave_hypervisor)
     ;;
     ldf.fill f10=[r2],32
     ldf.fill f11=[r3],24
-    ;;
-    ld8.fill r4=[r2],16    //load r4
-    ld8.fill r5=[r3],16    //load r5
-    ;;
-    ld8.fill r6=[r2]    //load r6
-    ld8.fill r7=[r3]    //load r7
     ;;
     srlz.i          // ensure interruption collection is off
     ;;
diff -r c4af6e854010 -r e61bb865ec74 xen/arch/ia64/vmx/vmx_ivt.S
--- a/xen/arch/ia64/vmx/vmx_ivt.S       Fri Jul 14 11:06:38 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_ivt.S       Fri Jul 14 11:18:36 2006 -0600
@@ -201,7 +201,7 @@ vmx_itlb_loop:
     ;;
 vmx_itlb_out:
     mov r19 = 1
-    br.sptk vmx_dispatch_tlb_miss
+    br.sptk vmx_dispatch_itlb_miss
     VMX_FAULT(1);
 END(vmx_itlb_miss)
 
@@ -275,7 +275,7 @@ vmx_dtlb_loop:
     ;;
 vmx_dtlb_out:
     mov r19 = 2
-    br.sptk vmx_dispatch_tlb_miss
+    br.sptk vmx_dispatch_dtlb_miss
     VMX_FAULT(2);
 END(vmx_dtlb_miss)
 
@@ -1041,9 +1041,10 @@ ENTRY(vmx_dispatch_virtualization_fault)
     srlz.i                  // guarantee that interruption collection is on
     ;;
     (p15) ssm psr.i               // restore psr.i
-    movl r14=ia64_leave_hypervisor
+    movl r14=ia64_leave_hypervisor_prepare
     ;;
     VMX_SAVE_REST
+    VMX_SAVE_EXTRA
     mov rp=r14
     ;;
     adds out1=16,sp         //regs
@@ -1070,7 +1071,7 @@ ENTRY(vmx_dispatch_vexirq)
     br.call.sptk.many b6=vmx_vexirq
 END(vmx_dispatch_vexirq)
 
-ENTRY(vmx_dispatch_tlb_miss)
+ENTRY(vmx_dispatch_itlb_miss)
     VMX_SAVE_MIN_WITH_COVER_R19
     alloc r14=ar.pfs,0,0,3,0
     mov out0=cr.ifa
@@ -1089,8 +1090,29 @@ ENTRY(vmx_dispatch_tlb_miss)
     ;;
     adds out2=16,r12
     br.call.sptk.many b6=vmx_hpw_miss
-END(vmx_dispatch_tlb_miss)
-
+END(vmx_dispatch_itlb_miss)
+
+ENTRY(vmx_dispatch_dtlb_miss)
+    VMX_SAVE_MIN_WITH_COVER_R19
+    alloc r14=ar.pfs,0,0,3,0
+    mov out0=cr.ifa
+    mov out1=r15
+    adds r3=8,r2                // set up second base pointer
+    ;;
+    ssm psr.ic
+    ;;
+    srlz.i                  // guarantee that interruption collection is on
+    ;;
+    (p15) ssm psr.i               // restore psr.i
+    movl r14=ia64_leave_hypervisor_prepare
+    ;;
+    VMX_SAVE_REST
+    VMX_SAVE_EXTRA
+    mov rp=r14
+    ;;
+    adds out2=16,r12
+    br.call.sptk.many b6=vmx_hpw_miss
+END(vmx_dispatch_dtlb_miss)
 
 ENTRY(vmx_dispatch_break_fault)
     VMX_SAVE_MIN_WITH_COVER_R19
diff -r c4af6e854010 -r e61bb865ec74 xen/arch/ia64/vmx/vmx_minstate.h
--- a/xen/arch/ia64/vmx/vmx_minstate.h  Fri Jul 14 11:06:38 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_minstate.h  Fri Jul 14 11:18:36 2006 -0600
@@ -260,24 +260,27 @@
     stf.spill [r3]=f9,32;           \
     ;;                  \
     stf.spill [r2]=f10,32;         \
-    stf.spill [r3]=f11,24;         \
-    ;;                  \
+    stf.spill [r3]=f11;         \
+    adds r25=PT(B7)-PT(F11),r3;     \
+    ;;                  \
+    st8 [r24]=r18,16;       /* b6 */    \
+    st8 [r25]=r19,16;       /* b7 */    \
+    adds r3=PT(R5)-PT(F11),r3;     \
+    ;;                  \
+    st8 [r24]=r9;           /* ar.csd */    \
+    st8 [r25]=r10;          /* ar.ssd */    \
+    ;;
+
+#define VMX_SAVE_EXTRA               \
 .mem.offset 0,0; st8.spill [r2]=r4,16;     \
 .mem.offset 8,0; st8.spill [r3]=r5,16;     \
     ;;                  \
 .mem.offset 0,0; st8.spill [r2]=r6,16;      \
 .mem.offset 8,0; st8.spill [r3]=r7;      \
-    adds r25=PT(B7)-PT(R7),r3;     \
-    ;;                  \
-    st8 [r24]=r18,16;       /* b6 */    \
-    st8 [r25]=r19,16;       /* b7 */    \
-    ;;                  \
-    st8 [r24]=r9;           /* ar.csd */    \
-    mov r26=ar.unat;            \
-    ;;      \
-    st8 [r25]=r10;          /* ar.ssd */    \
+    ;;                 \
+    mov r26=ar.unat;    \
+    ;;                 \
     st8 [r2]=r26;       /* eml_unat */ \
-    ;;
 
 #define VMX_SAVE_MIN_WITH_COVER   VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,)
 #define VMX_SAVE_MIN_WITH_COVER_R19 VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs, mov 
r15=r19)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [IA64] optimize entry and exit path, Xen patchbot-unstable <=