WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Add support for fast mov_to_kr privops

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Add support for fast mov_to_kr privops
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 12 Oct 2005 08:32:17 +0000
Delivery-date: Wed, 12 Oct 2005 08:36:38 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User djm@xxxxxxxxxxxxxxx
# Node ID 0ba10f7fef519e0a9065c570653f955b66d930ea
# Parent  cbe6b4c4480f26645f6daff619fa2652a7089bd1
Add support for fast mov_to_kr privops
Signed-off by: Dan Magenheimer <dan.magenheimer@xxxxxx>

diff -r cbe6b4c4480f -r 0ba10f7fef51 
linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S
--- a/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S    Fri Oct  7 15:40:37 2005
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S    Sat Oct  8 17:37:45 2005
@@ -202,6 +202,61 @@
        ;;
 END(xen_set_rr)
 
+GLOBAL_ENTRY(xen_set_kr)
+       movl r8=running_on_xen;;
+       ld4 r8=[r8];;
+       cmp.ne p7,p0=r8,r0;;
+(p7)   br.cond.spnt.few 1f;
+       ;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar0=r9
+(p7)   br.ret.sptk.many rp;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar1=r9
+(p7)   br.ret.sptk.many rp;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar2=r9
+(p7)   br.ret.sptk.many rp;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar3=r9
+(p7)   br.ret.sptk.many rp;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar4=r9
+(p7)   br.ret.sptk.many rp;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar5=r9
+(p7)   br.ret.sptk.many rp;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar6=r9
+(p7)   br.ret.sptk.many rp;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar7=r9
+(p7)   br.ret.sptk.many rp;;
+
+1:     movl r11=XSI_PSR_IC
+       mov r8=r32
+       mov r9=r33
+       ;;
+       ld8 r10=[r11]
+       ;;
+       st8 [r11]=r0
+       ;;
+       XEN_HYPER_SET_KR
+       ;;
+       st8 [r11]=r10
+       ;;
+       br.ret.sptk.many rp
+       ;;
+END(xen_set_rr)
+
 GLOBAL_ENTRY(xen_fc)
        movl r8=running_on_xen;;
        ld4 r8=[r8];;
diff -r cbe6b4c4480f -r 0ba10f7fef51 
linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S
--- a/linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S     Fri Oct  7 15:40:37 2005
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S     Sat Oct  8 17:37:45 2005
@@ -61,6 +61,12 @@
        shr.u r26=r20,IA64_GRANULE_SHIFT
        cmp.eq p7,p6=r25,in0
        ;;
+#ifdef CONFIG_XEN
+       movl r8=XSI_PSR_IC
+       ;;
+       st4 [r8]=r0     // force psr.ic off for hyperprivop(s)
+       ;;
+#endif
        /*
         * If we've already mapped this task's page, we can skip doing it again.
         */
@@ -69,18 +75,24 @@
        ;;
 .done:
 #ifdef CONFIG_XEN
+       // psr.ic already off
+       // update "current" application register
+       mov r8=IA64_KR_CURRENT
+       mov r9=in0;;
+       XEN_HYPER_SET_KR
+       ld8 sp=[r21]                    // load kernel stack pointer of new task
        movl r27=XSI_PSR_IC
        mov r8=1
        ;;
-(p6)   st4 [r27]=r8
+       st4 [r27]=r8                    // psr.ic back on
        ;;
 #else
 (p6)   ssm psr.ic                      // if we had to map, reenable the 
psr.ic bit FIRST!!!
        ;;
 (p6)   srlz.d
-#endif
        ld8 sp=[r21]                    // load kernel stack pointer of new task
        mov IA64_KR(CURRENT)=in0        // update "current" application register
+#endif
        mov r8=r13                      // return pointer to previously running 
task
        mov r13=in0                     // set "current" pointer
        ;;
@@ -93,9 +105,7 @@
 
 .map:
 #ifdef CONFIG_XEN
-       movl r27=XSI_PSR_IC
-       ;;
-       st4 [r27]=r0
+       // psr.ic already off
 #else
        rsm psr.ic                      // interrupts (psr.i) are already 
disabled here
 #endif
@@ -115,13 +125,17 @@
        st8 [r8]=in0                     // VA of next task...
        ;;
        mov r25=IA64_TR_CURRENT_STACK
+       // remember last page we mapped...
+       mov r8=IA64_KR_CURRENT_STACK
+       mov r9=r26;;
+       XEN_HYPER_SET_KR;;
 #else
        mov cr.itir=r25
        mov cr.ifa=in0                  // VA of next task...
        ;;
        mov r25=IA64_TR_CURRENT_STACK
-#endif
        mov IA64_KR(CURRENT_STACK)=r26  // remember last page we mapped...
+#endif
        ;;
        itr.d dtr[r25]=r23              // wire in new mapping...
        br.cond.sptk .done
diff -r cbe6b4c4480f -r 0ba10f7fef51 
linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h        Fri Oct  7 
15:40:37 2005
+++ b/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h        Sat Oct  8 
17:37:45 2005
@@ -32,6 +32,7 @@
 #define        XEN_HYPER_ITR_D                 break 0xf
 #define        XEN_HYPER_GET_RR                break 0x10
 #define        XEN_HYPER_SET_RR                break 0x11
+#define        XEN_HYPER_SET_KR                break 0x12
 #endif
 
 #ifndef __ASSEMBLY__
@@ -93,9 +94,6 @@
        XEN_HYPER_SSM_I;                                                \
 })
 
-// for now, just use privop.  may use hyperprivop later
-/*#define xen_set_kr(regnum,val) (__ia64_setreg(regnum,val)) */
-
 /* turning off interrupts can be paravirtualized simply by writing
  * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
 #define xen_rsm_i()    xen_set_virtual_psr_i(0)
@@ -157,6 +155,7 @@
 extern void xen_eoi(void);
 extern void xen_set_rr(unsigned long index, unsigned long val);
 extern unsigned long xen_get_rr(unsigned long index);
+extern void xen_set_kr(unsigned long index, unsigned long val);
 
 /* Note: It may look wrong to test for running_on_xen in each case.
  * However regnum is always a constant so, as written, the compiler
@@ -193,9 +192,8 @@
 ({                                                                     \
        switch(regnum) {                                                \
        case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:                     \
-/* for now, just use privop.  may use hyperprivop later */             \
-/*             (running_on_xen) ?                                      \
-                       xen_set_kr((regnum-_IA64_REG_AR_KR0), val) : */ \
+               (running_on_xen) ?                                      \
+                       xen_set_kr((regnum-_IA64_REG_AR_KR0), val) :    \
                        __ia64_setreg(regnum,val);                      \
                break;                                                  \
        case _IA64_REG_CR_ITM:                                          \
diff -r cbe6b4c4480f -r 0ba10f7fef51 xen/arch/ia64/asm-offsets.c
--- a/xen/arch/ia64/asm-offsets.c       Fri Oct  7 15:40:37 2005
+++ b/xen/arch/ia64/asm-offsets.c       Sat Oct  8 17:37:45 2005
@@ -69,6 +69,7 @@
        DEFINE(XSI_TPR_OFS, offsetof(mapped_regs_t, tpr));
        DEFINE(XSI_PTA_OFS, offsetof(mapped_regs_t, pta));
        DEFINE(XSI_ITV_OFS, offsetof(mapped_regs_t, itv));
+       DEFINE(XSI_KR0_OFS, offsetof(mapped_regs_t, krs[0]));
        //DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, 
blocked));
        //DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, 
clear_child_tid));
        //DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, 
group_leader));
diff -r cbe6b4c4480f -r 0ba10f7fef51 xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S   Fri Oct  7 15:40:37 2005
+++ b/xen/arch/ia64/xen/hyperprivop.S   Sat Oct  8 17:37:45 2005
@@ -44,6 +44,7 @@
 #define    XEN_HYPER_ITR_D         0xf
 #define    XEN_HYPER_GET_RR        0x10
 #define    XEN_HYPER_SET_RR        0x11
+#define    XEN_HYPER_SET_KR        0x12
 
 #ifdef CONFIG_SMP
 #warning "FIXME: ptc.ga instruction requires spinlock for SMP"
@@ -168,6 +169,10 @@
        // HYPERPRIVOP_THASH?
        cmp.eq p7,p6=XEN_HYPER_THASH,r17
 (p7)   br.sptk.many hyper_thash;;
+
+       // HYPERPRIVOP_SET_KR?
+       cmp.eq p7,p6=XEN_HYPER_SET_KR,r17
+(p7)   br.sptk.many hyper_set_kr;;
 
        // if not one of the above, give up for now and do it the slow way
        br.sptk.many dispatch_break_fault ;;
@@ -1459,6 +1464,62 @@
        ;;
 END(hyper_set_rr)
 
+ENTRY(hyper_set_kr)
+       extr.u r25=r8,3,61;;
+       cmp.ne p7,p0=r0,r25     // if kr# > 7, go slow way
+(p7)   br.spnt.many dispatch_break_fault ;;
+#ifdef FAST_HYPERPRIVOP_CNT
+       movl r20=fast_hyperpriv_cnt+(8*XEN_HYPER_SET_KR);;
+       ld8 r21=[r20];;
+       adds r21=1,r21;;
+       st8 [r20]=r21;;
+#endif
+       adds r21=XSI_KR0_OFS-XSI_PSR_IC_OFS,r18 ;;
+       shl r20=r8,3;;
+       add r22=r20,r21;;
+       st8 [r21]=r9;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar0=r9;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar1=r9;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar2=r9;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar3=r9;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar4=r9;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar5=r9;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar6=r9;;
+       cmp.eq p7,p0=r8,r0
+       adds r8=-1,r8;;
+(p7)   mov ar7=r9;;
+       // done, mosey on back
+1:     mov r24=cr.ipsr
+       mov r25=cr.iip;;
+       extr.u r26=r24,41,2 ;;
+       cmp.eq p6,p7=2,r26 ;;
+(p6)   mov r26=0
+(p6)   adds r25=16,r25
+(p7)   adds r26=1,r26
+       ;;
+       dep r24=r26,r24,41,2
+       ;;
+       mov cr.ipsr=r24
+       mov cr.iip=r25
+       mov pr=r31,-1 ;;
+       rfi
+       ;;
+END(hyper_set_kr)
+
 // this routine was derived from optimized assembly output from
 // vcpu_thash so it is dense and difficult to read but it works
 // On entry:
diff -r cbe6b4c4480f -r 0ba10f7fef51 xen/arch/ia64/xen/privop.c
--- a/xen/arch/ia64/xen/privop.c        Fri Oct  7 15:40:37 2005
+++ b/xen/arch/ia64/xen/privop.c        Sat Oct  8 17:37:45 2005
@@ -757,12 +757,13 @@
 #define HYPERPRIVOP_ITR_D              0xf
 #define HYPERPRIVOP_GET_RR             0x10
 #define HYPERPRIVOP_SET_RR             0x11
-#define HYPERPRIVOP_MAX                        0x11
+#define HYPERPRIVOP_SET_KR             0x12
+#define HYPERPRIVOP_MAX                        0x12
 
 char *hyperpriv_str[HYPERPRIVOP_MAX+1] = {
        0, "rfi", "rsm.dt", "ssm.dt", "cover", "itc.d", "itc.i", "ssm.i",
        "=ivr", "=tpr", "tpr=", "eoi", "itm=", "thash", "ptc.ga", "itr.d",
-       "=rr", "rr=",
+       "=rr", "rr=", "kr=",
        0
 };
 
@@ -847,6 +848,9 @@
                return 1;
            case HYPERPRIVOP_SET_RR:
                (void)vcpu_set_rr(v,regs->r8,regs->r9);
+               return 1;
+           case HYPERPRIVOP_SET_KR:
+               (void)vcpu_set_ar(v,regs->r8,regs->r9);
                return 1;
        }
        return 0;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Add support for fast mov_to_kr privops, Xen patchbot -unstable <=