WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [IA64] Merge vpsr.i with evtchn_upcall_mask tosolve one

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [IA64] Merge vpsr.i with evtchn_upcall_mask tosolve one trickish bug
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 27 Apr 2006 10:32:18 +0000
Delivery-date: Thu, 27 Apr 2006 03:35:46 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 2b6e531dab38da94fd07996e705ece6a3f7fabe3
# Parent  986538da9be011ecceadbd400ef880e90254c798
[IA64] Merge vpsr.i with evtchn_upcall_mask tosolve one trickish bug

Per agreement on the summit, xen/ia64 will move to event channel
model same as xen/x86, under which event is the layer under pirq
(external interrupt), virq, and ipi with the latter three bound
to event ports. Within that model, no external interrupt will be
injected directly and evtchn_upcall_mask is the flag to  control
whether events are deliverable.

So xenlinux needs to operate evtchn_upcall_mask at all places 
where it originally operates vpsr.i. However these two flags are
presented at different shared area, and thus xenlinux can't ensure
atomical update on two flags which leaves severe stability issues.
One severe bug comes for this reason where some hypercall may be
restarted infinitely when events pending.

Actually based on description of future model, events become the
superset of external interrupts and thus evtchn_upcall_mask super-
set of vpsr.i (interrupt_delivery_enable). We can merge two flags
into one by removing the latter. By this way, we ensure correctness
and most importantly conform to common code which always assumes
upon evtchn_upcall_mask. 

Signed-off-by Kevin Tian <kevin.tian@xxxxxxxxx>

diff -r 986538da9be0 -r 2b6e531dab38 
linux-2.6-xen-sparse/arch/ia64/xen/drivers/xenia64_init.c
--- a/linux-2.6-xen-sparse/arch/ia64/xen/drivers/xenia64_init.c Wed Mar 29 
12:41:33 2006 -0700
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/drivers/xenia64_init.c Thu Mar 30 
09:55:26 2006 -0700
@@ -5,6 +5,7 @@
 #include <asm/sal.h>
 #include <asm/hypervisor.h>
 /* #include <asm-xen/evtchn.h> */
+#include <xen/interface/arch-ia64.h>
 #include <linux/vmalloc.h>
 
 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)0xf100000000000000;
diff -r 986538da9be0 -r 2b6e531dab38 
linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S
--- a/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S    Wed Mar 29 12:41:33 
2006 -0700
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/hypercall.S    Thu Mar 30 09:55:26 
2006 -0700
@@ -8,6 +8,27 @@
 #include <asm/processor.h>
 #include <asm/asmmacro.h>
 
+/* To clear vpsr.ic, vpsr.i needs to be cleared first */
+#define XEN_CLEAR_PSR_IC                               \
+       mov r14=1;                                      \
+       movl r15=XSI_PSR_I_ADDR;                        \
+       movl r2=XSI_PSR_IC;                             \
+       ;;                                              \
+       ld8 r15=[r15];                                  \
+       ld4 r3=[r2];                                    \
+       ;;                                              \
+       ld1 r16=[r15];                                  \
+       ;;                                              \
+       st1 [r15]=r14;                                  \
+       st4 [r2]=r0;                                    \
+       ;;
+
+/* First restore vpsr.ic, and then vpsr.i */
+#define XEN_RESTORE_PSR_IC                             \
+       st4 [r2]=r3;                                    \
+       st1 [r15]=r16;                                  \
+       ;;
+
 GLOBAL_ENTRY(xen_get_ivr)
        movl r8=running_on_xen;;
        ld4 r8=[r8];;
@@ -15,15 +36,12 @@ GLOBAL_ENTRY(xen_get_ivr)
 (p7)   mov r8=cr.ivr;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_GET_IVR
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
+       ;;
        br.ret.sptk.many rp
        ;;
 END(xen_get_ivr)
@@ -35,15 +53,12 @@ GLOBAL_ENTRY(xen_get_tpr)
 (p7)   mov r8=cr.tpr;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_GET_TPR
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
+       ;;
        br.ret.sptk.many rp
        ;;
 END(xen_get_tpr)
@@ -55,16 +70,14 @@ GLOBAL_ENTRY(xen_set_tpr)
 (p7)   mov cr.tpr=r32;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       mov r8=r32
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       mov r8=r32
+       ;;
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_SET_TPR
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
+       ;;
        br.ret.sptk.many rp
        ;;
 END(xen_set_tpr)
@@ -76,16 +89,14 @@ GLOBAL_ENTRY(xen_eoi)
 (p7)   mov cr.eoi=r0;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       mov r8=r32
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       mov r8=r32
+       ;;
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_EOI
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
+       ;;
        br.ret.sptk.many rp
        ;;
 END(xen_eoi)
@@ -97,16 +108,13 @@ GLOBAL_ENTRY(xen_thash)
 (p7)   thash r8=r32;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       mov r8=r32
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       mov r8=r32
+       ;;
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_THASH
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
        ;;
@@ -119,16 +127,13 @@ GLOBAL_ENTRY(xen_set_itm)
 (p7)   mov cr.itm=r32;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       mov r8=r32
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       mov r8=r32
+       ;;
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_SET_ITM
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
        ;;
@@ -141,17 +146,14 @@ GLOBAL_ENTRY(xen_ptcga)
 (p7)   ptc.ga r32,r33;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r11=XSI_PSR_IC
        mov r8=r32
        mov r9=r33
        ;;
-       ld8 r10=[r11]
-       ;;
-       st8 [r11]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_PTC_GA
        ;;
-       st8 [r11]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
        ;;
@@ -164,16 +166,13 @@ GLOBAL_ENTRY(xen_get_rr)
 (p7)   mov r8=rr[r32];;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       mov r8=r32
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       mov r8=r32
+       ;;
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_GET_RR
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
        ;;
@@ -186,17 +185,14 @@ GLOBAL_ENTRY(xen_set_rr)
 (p7)   mov rr[r32]=r33;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r11=XSI_PSR_IC
        mov r8=r32
        mov r9=r33
        ;;
-       ld8 r10=[r11]
-       ;;
-       st8 [r11]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_SET_RR
        ;;
-       st8 [r11]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
        ;;
@@ -241,17 +237,14 @@ GLOBAL_ENTRY(xen_set_kr)
 (p7)   mov ar7=r9
 (p7)   br.ret.sptk.many rp;;
 
-1:     movl r11=XSI_PSR_IC
-       mov r8=r32
+1:     mov r8=r32
        mov r9=r33
        ;;
-       ld8 r10=[r11]
-       ;;
-       st8 [r11]=r0
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_SET_KR
        ;;
-       st8 [r11]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
 END(xen_set_rr)
@@ -263,16 +256,13 @@ GLOBAL_ENTRY(xen_fc)
 (p7)   fc r32;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       mov r8=r32
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       mov r8=r32
+       ;;
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_FC
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
 END(xen_fc)
@@ -284,16 +274,13 @@ GLOBAL_ENTRY(xen_get_cpuid)
 (p7)   mov r8=cpuid[r32];;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       mov r8=r32
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       mov r8=r32
+       ;;
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_GET_CPUID
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
 END(xen_get_cpuid)
@@ -305,16 +292,13 @@ GLOBAL_ENTRY(xen_get_pmd)
 (p7)   mov r8=pmd[r32];;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       mov r8=r32
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       mov r8=r32
+       ;;
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_GET_PMD
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
 END(xen_get_pmd)
@@ -327,16 +311,13 @@ GLOBAL_ENTRY(xen_get_eflag)
 (p7)   mov r8=ar24;;
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       mov r8=r32
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       mov r8=r32
+       ;;
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_GET_EFLAG
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
 END(xen_get_eflag)
@@ -349,16 +330,13 @@ GLOBAL_ENTRY(xen_set_eflag)
 (p7)   mov ar24=r32
 (p7)   br.ret.sptk.many rp
        ;;
-       movl r9=XSI_PSR_IC
-       mov r8=r32
-       ;;
-       ld8 r10=[r9]
-       ;;
-       st8 [r9]=r0
+       mov r8=r32
+       ;;
+       XEN_CLEAR_PSR_IC
        ;;
        XEN_HYPER_SET_EFLAG
        ;;
-       st8 [r9]=r10
+       XEN_RESTORE_PSR_IC
        ;;
        br.ret.sptk.many rp
 END(xen_set_eflag)
diff -r 986538da9be0 -r 2b6e531dab38 
linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S
--- a/linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S     Wed Mar 29 12:41:33 
2006 -0700
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenentry.S     Thu Mar 30 09:55:26 
2006 -0700
@@ -312,9 +312,12 @@ ENTRY(ia64_leave_syscall)
        cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count == 0)
 #else /* !CONFIG_PREEMPT */
 #ifdef CONFIG_XEN
-       movl r2=XSI_PSR_I
-       ;;
-(pUStk)        st4 [r2]=r0
+       movl r2=XSI_PSR_I_ADDR
+       mov r18=1
+       ;;
+       ld8 r2=[r2]
+       ;;
+(pUStk)        st1 [r2]=r18
 #else
 (pUStk)        rsm psr.i
 #endif
@@ -345,9 +348,14 @@ ENTRY(ia64_leave_syscall)
        ;;
        invala                  // M0|1 invalidate ALAT
 #ifdef CONFIG_XEN
+       movl r28=XSI_PSR_I_ADDR
        movl r29=XSI_PSR_IC
        ;;
-       st8     [r29]=r0        // note: clears both vpsr.i and vpsr.ic!
+       ld8 r28=[r28]
+       mov r30=1
+       ;;
+       st1     [r28]=r30
+       st4     [r29]=r0        // note: clears both vpsr.i and vpsr.ic!
        ;;
 #else
        rsm psr.i | psr.ic      // M2 initiate turning off of interrupt and 
interruption collection
@@ -441,9 +449,12 @@ GLOBAL_ENTRY(ia64_leave_kernel)
        cmp.eq p6,p0=r21,r0             // p6 <- pUStk || (preempt_count == 0)
 #else
 #ifdef CONFIG_XEN
-(pUStk)        movl r17=XSI_PSR_I
-       ;;
-(pUStk)        st4 [r17]=r0
+(pUStk)        movl r17=XSI_PSR_I_ADDR
+(pUStk)        mov r31=1
+               ;;
+(pUStk)        ld8 r17=[r17]
+               ;;
+(pUStk)        st1 [r17]=r31
        ;;
 #else
 (pUStk)        rsm psr.i
@@ -496,9 +507,14 @@ GLOBAL_ENTRY(ia64_leave_kernel)
        mov ar.ssd=r31
        ;;
 #ifdef CONFIG_XEN
+       movl r23=XSI_PSR_I_ADDR
        movl r22=XSI_PSR_IC
        ;;
-       st8 [r22]=r0            // note: clears both vpsr.i and vpsr.ic!
+       ld8 r23=[r23]
+       mov r25=1
+       ;;
+       st1 [r23]=r25
+       st4 [r22]=r0            // note: clears both vpsr.i and vpsr.ic!
        ;;
 #else
        rsm psr.i | psr.ic      // initiate turning off of interrupt and 
interruption collection
@@ -803,9 +819,12 @@ skip_rbs_switch:
        br.call.spnt.many rp=schedule
 .ret9: cmp.eq p6,p0=r0,r0                              // p6 <- 1
 #ifdef CONFIG_XEN
-       movl r2=XSI_PSR_I
-       ;;
-       st4 [r2]=r0
+       movl r2=XSI_PSR_I_ADDR
+       mov r20=1
+       ;;
+       ld8 r2=[r2]
+       ;;
+       st1 [r2]=r20
 #else
        rsm psr.i               // disable interrupts
 #endif
diff -r 986538da9be0 -r 2b6e531dab38 linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S
--- a/linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S       Wed Mar 29 12:41:33 
2006 -0700
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenivt.S       Thu Mar 30 09:55:26 
2006 -0700
@@ -683,9 +683,11 @@ ENTRY(dkey_miss)
        // Leaving this code inline above results in an IVT section overflow
        // There is no particular reason for this code to be here...
 xen_page_fault:
-(p15)  movl r3=XSI_PSR_I
-       ;;
-(p15)  st4 [r3]=r14,XSI_PEND-XSI_PSR_I         // if (p15) vpsr.i = 1
+(p15)  movl r3=XSI_PSR_I_ADDR
+       ;;
+(p15)  ld8 r3=[r3]
+       ;;
+(p15)  st1 [r3]=r0,XSI_PEND-XSI_PSR_I_ADDR     // if (p15) vpsr.i = 1
        mov r14=r0
        ;;
 (p15)  ld4 r14=[r3]                            // if (pending_interrupts)
@@ -1043,9 +1045,11 @@ ENTRY(break_fault)
        mov r16=1
        ;;
 #if 1
-       st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC       // vpsr.ic = 1
-       ;;
-(p15)  st4 [r3]=r16,XSI_PEND-XSI_PSR_I         // if (p15) vpsr.i = 1
+       st4 [r3]=r16,XSI_PSR_I_ADDR-XSI_PSR_IC  // vpsr.ic = 1
+       ;;
+(p15)  ld8 r3=[r3]
+       ;;
+(p15)  st1 [r3]=r0,XSI_PEND-XSI_PSR_I_ADDR     // if (p15) vpsr.i = 1
        mov r16=r0
        ;;
 (p15)  ld4 r16=[r3]                            // if (pending_interrupts)
@@ -1055,10 +1059,12 @@ ENTRY(break_fault)
 (p6)   ssm     psr.i                           //   do a real ssm psr.i
        ;;
 #else
-//     st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC       // vpsr.ic = 1
-       adds r3=XSI_PSR_I-XSI_PSR_IC,r3         // SKIP vpsr.ic = 1
-       ;;
-(p15)  st4 [r3]=r16,XSI_PEND-XSI_PSR_I         // if (p15) vpsr.i = 1
+//     st4 [r3]=r16,XSI_PSR_I_ADDR-XSI_PSR_IC  // vpsr.ic = 1
+       adds r3=XSI_PSR_I_ADDR-XSI_PSR_IC,r3    // SKIP vpsr.ic = 1
+       ;;
+(p15)  ld8 r3=[r3]
+       ;;
+(p15)  st1 [r3]=r0,XSI_PEND-XSI_PSR_I_ADDR     // if (p15) vpsr.i = 1
        mov r16=r0
        ;;
 (p15)  ld4 r16=[r3]                            // if (pending_interrupts)
diff -r 986538da9be0 -r 2b6e531dab38 linux-2.6-xen-sparse/arch/ia64/xen/xenpal.S
--- a/linux-2.6-xen-sparse/arch/ia64/xen/xenpal.S       Wed Mar 29 12:41:33 
2006 -0700
+++ b/linux-2.6-xen-sparse/arch/ia64/xen/xenpal.S       Thu Mar 30 09:55:26 
2006 -0700
@@ -43,11 +43,14 @@ 1:  {
        // from the idle loop so confuses privop counting
        movl r31=XSI_PSR_IC
        ;;
-(p6)   st8 [r31]=r0
+(p6)   st4 [r31]=r0
        ;;
-(p7)   adds r31=XSI_PSR_I-XSI_PSR_IC,r31
+(p7)   adds r31=XSI_PSR_I_ADDR-XSI_PSR_IC,r31
+(p7)   mov r22=1
        ;;
-(p7)   st4 [r31]=r0
+(p7)   ld8 r31=[r31]
+       ;;
+(p7)   st1 [r31]=r22
        ;;
        mov r31 = in3
        mov b7 = loc2
diff -r 986538da9be0 -r 2b6e531dab38 
linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h        Wed Mar 29 
12:41:33 2006 -0700
+++ b/linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h        Thu Mar 30 
09:55:26 2006 -0700
@@ -87,9 +87,14 @@ extern void xen_set_eflag(unsigned long)
  * Others, like "pend", are abstractions based on privileged registers.
  * "Pend" is guaranteed to be set if reading cr.ivr would return a
  * (non-spurious) interrupt. */
-#define xen_get_virtual_psr_i()                (*(int *)(XSI_PSR_I))
-#define xen_set_virtual_psr_i(_val)    ({ *(int *)(XSI_PSR_I) = _val ? 1:0; })
-#define xen_set_virtual_psr_ic(_val)   ({ *(int *)(XSI_PSR_IC) = _val ? 1:0; })
+#define XSI_PSR_I                      \
+       (*(uint64_t *)(XSI_PSR_I_ADDR))
+#define xen_get_virtual_psr_i()                \
+       (!(*(uint8_t *)(XSI_PSR_I)))
+#define xen_set_virtual_psr_i(_val)    \
+       ({ *(uint8_t *)(XSI_PSR_I) = (uint8_t)(_val) ? 0:1; })
+#define xen_set_virtual_psr_ic(_val)   \
+       ({ *(int *)(XSI_PSR_IC) = _val ? 1:0; })
 #define xen_get_virtual_pend()         (*(int *)(XSI_PEND))
 
 /* Hyperprivops are "break" instructions with a well-defined API.
diff -r 986538da9be0 -r 2b6e531dab38 xen/arch/ia64/asm-xsi-offsets.c
--- a/xen/arch/ia64/asm-xsi-offsets.c   Wed Mar 29 12:41:33 2006 -0700
+++ b/xen/arch/ia64/asm-xsi-offsets.c   Thu Mar 30 09:55:26 2006 -0700
@@ -50,8 +50,8 @@ void foo(void)
        /* First is shared info page, and then arch specific vcpu context */
        DEFINE(XSI_BASE, SHAREDINFO_ADDR);
 
-       DEFINE(XSI_PSR_I_OFS, (XSI_OFS + offsetof(mapped_regs_t, 
interrupt_delivery_enabled)));
-       DEFINE(XSI_PSR_I, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, 
interrupt_delivery_enabled)));
+       DEFINE(XSI_PSR_I_ADDR_OFS, (XSI_OFS + offsetof(mapped_regs_t, 
interrupt_mask_addr)));
+       DEFINE(XSI_PSR_I_ADDR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, 
interrupt_mask_addr)));
        DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr)));
        DEFINE(XSI_IPSR_OFS, (XSI_OFS + offsetof(mapped_regs_t, ipsr)));
        DEFINE(XSI_IIP_OFS, (XSI_OFS + offsetof(mapped_regs_t, iip)));
@@ -104,5 +104,4 @@ void foo(void)
        DEFINE(XSI_PKR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, 
pkrs[0])));
        DEFINE(XSI_TMP0_OFS, (XSI_OFS + offsetof(mapped_regs_t, tmp[0])));
        DEFINE(XSI_TMP0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, 
tmp[0])));
-       
 }
diff -r 986538da9be0 -r 2b6e531dab38 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Wed Mar 29 12:41:33 2006 -0700
+++ b/xen/arch/ia64/xen/domain.c        Thu Mar 30 09:55:26 2006 -0700
@@ -485,6 +485,8 @@ void new_thread(struct vcpu *v,
                regs->ar_rsc |= (2 << 2); /* force PL2/3 */
                VCPU(v, banknum) = 1;
                VCPU(v, metaphysical_mode) = 1;
+               VCPU(v, interrupt_mask_addr) =
+                   (uint64_t)SHAREDINFO_ADDR + INT_ENABLE_OFFSET(v);
        }
 }
 
diff -r 986538da9be0 -r 2b6e531dab38 xen/arch/ia64/xen/hyperprivop.S
--- a/xen/arch/ia64/xen/hyperprivop.S   Wed Mar 29 12:41:33 2006 -0700
+++ b/xen/arch/ia64/xen/hyperprivop.S   Thu Mar 30 09:55:26 2006 -0700
@@ -87,7 +87,7 @@
 //     r16 == cr.isr
 //     r17 == cr.iim
 //     r18 == XSI_PSR_IC_OFS
-//     r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
+//     r19 == vpsr.ic
 //     r31 == pr
 GLOBAL_ENTRY(fast_hyperprivop)
 #ifndef FAST_HYPERPRIVOPS // see beginning of file
@@ -223,7 +223,7 @@ 1:  // when we get to here r20=~=interrup
 //     r16 == cr.isr
 //     r17 == cr.iim
 //     r18 == XSI_PSR_IC
-//     r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
+//     r19 == vpsr.ic 
 //     r31 == pr
 ENTRY(hyper_ssm_i)
 #ifndef FAST_SSM_I
@@ -278,11 +278,15 @@ ENTRY(hyper_ssm_i)
        movl r27=~(IA64_PSR_BE|IA64_PSR_BN);;
        or r30=r30,r28;;
        and r30=r30,r27;;
+       mov r20=1
+       adds r22=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
        adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
+       ld8 r22=[r22]
        st8 [r21]=r30 ;;
        // set shared_mem interrupt_delivery_enabled to 0
        // set shared_mem interrupt_collection_enabled to 0
-       st8 [r18]=r0;;
+       st1 [r22]=r20;;
+       st4 [r18]=r0;;
        // cover and set shared_mem precover_ifs to cr.ifs
        // set shared_mem ifs and incomplete_regframe to 0
        cover ;;
@@ -405,9 +409,10 @@ GLOBAL_ENTRY(fast_tick_reflect)
        cmp.eq p6,p0=r16,r0;;
 (p6)   br.cond.spnt.few fast_tick_reflect_done;;
        // if guest vpsr.i is off, we're done
-       adds r21=XSI_PSR_I_OFS-XSI_PSR_IC_OFS,r18 ;;
-       ld4 r21=[r21];;
-       cmp.eq p6,p0=r21,r0
+       adds r21=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18 ;;
+       ld8 r21=[r21];;
+       ld1 r21=[r21];;
+       cmp.eq p0,p6=r21,r0
 (p6)   br.cond.spnt.few fast_tick_reflect_done;;
 
        // OK, we have a clock tick to deliver to the active domain!
@@ -445,17 +450,22 @@ GLOBAL_ENTRY(fast_tick_reflect)
        dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
        or r17=r17,r28;;
        and r17=r17,r27;;
-       ld4 r16=[r18],4;;
+       ld4 r16=[r18],XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS;;
        cmp.ne p6,p0=r16,r0;;
+       ld8 r16=[r18],XSI_PSR_IC_OFS-XSI_PSR_I_ADDR_OFS
 (p6)   dep r17=-1,r17,IA64_PSR_IC_BIT,1 ;;
-       ld4 r16=[r18],-4;;
-       cmp.ne p6,p0=r16,r0;;
+       ld1 r16=[r16];;
+       cmp.eq p6,p0=r16,r0;;
 (p6)   dep r17=-1,r17,IA64_PSR_I_BIT,1 ;;
+       mov r20=1
+       adds r22=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
        adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
+       ld8 r22=[r22]
        st8 [r21]=r17 ;;
        // set shared_mem interrupt_delivery_enabled to 0
        // set shared_mem interrupt_collection_enabled to 0
-       st8 [r18]=r0;;
+       st1 [r22]=r20;;
+       st4 [r18]=r0;;
        // cover and set shared_mem precover_ifs to cr.ifs
        // set shared_mem ifs and incomplete_regframe to 0
        cover ;;
@@ -530,7 +540,7 @@ END(fast_tick_reflect)
 //     r16 == cr.isr
 //     r17 == cr.iim
 //     r18 == XSI_PSR_IC
-//     r19 == vpsr.ic (low 32 bits) | vpsr.i (high 32 bits)
+//     r19 == vpsr.ic
 //     r31 == pr
 GLOBAL_ENTRY(fast_break_reflect)
 #ifndef FAST_BREAK // see beginning of file
@@ -594,12 +604,13 @@ ENTRY(fast_reflect)
 #endif
        // save iip in shared_info (DON'T POINT TO NEXT INSTRUCTION!)
        adds r21=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
-       st8 [r21]=r29;;
+       st8 [r21]=r29,XSI_ISR_OFS-XSI_IIP_OFS;;
        // set shared_mem isr
-       adds r21=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;; 
        st8 [r21]=r16 ;;
        // set cr.ipsr
+       adds r21=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
        mov r29=r30 ;;
+       ld8 r21=[r21]
        movl r28=DELIVER_PSR_SET;;
        movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
        or r29=r29,r28;;
@@ -616,19 +627,22 @@ ENTRY(fast_reflect)
        or r30=r30,r28;;
        and r30=r30,r27;;
        // also set shared_mem ipsr.i and ipsr.ic appropriately
-       ld8 r24=[r18];;
-       extr.u r22=r24,32,32
+       ld1 r22=[r21]
+       ld4 r24=[r18];;
        cmp4.eq p6,p7=r24,r0;;
 (p6)   dep r30=0,r30,IA64_PSR_IC_BIT,1
 (p7)   dep r30=-1,r30,IA64_PSR_IC_BIT,1 ;;
-       cmp4.eq p6,p7=r22,r0;;
+       mov r24=r21
+       cmp.ne p6,p7=r22,r0;;
 (p6)   dep r30=0,r30,IA64_PSR_I_BIT,1
 (p7)   dep r30=-1,r30,IA64_PSR_I_BIT,1 ;;
+       mov r22=1
        adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
        st8 [r21]=r30 ;;
        // set shared_mem interrupt_delivery_enabled to 0
        // set shared_mem interrupt_collection_enabled to 0
-       st8 [r18]=r0;;
+       st1 [r24]=r22
+       st4 [r18]=r0;;
        // cover and set shared_mem precover_ifs to cr.ifs
        // set shared_mem ifs and incomplete_regframe to 0
        cover ;;
@@ -639,8 +653,6 @@ ENTRY(fast_reflect)
        st8 [r21]=r0 ;;
        adds r21=XSI_PRECOVER_IFS_OFS-XSI_PSR_IC_OFS,r18 ;;
        st8 [r21]=r24 ;;
-       // vpsr.i = vpsr.ic = 0 on delivery of interruption
-       st8 [r18]=r0;;
        // FIXME: need to save iipa and isr to be arch-compliant
        // set iip to go to domain IVA break instruction vector
        movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
@@ -723,7 +735,7 @@ GLOBAL_ENTRY(fast_access_reflect)
        cmp.eq p7,p0=r21,r0
 (p7)   br.spnt.few dispatch_reflection ;;
        movl r18=XSI_PSR_IC;;
-       ld8 r21=[r18];;
+       ld4 r21=[r18];;
        cmp.eq p7,p0=r0,r21
 (p7)   br.spnt.few dispatch_reflection ;;
        // set shared_mem ifa, FIXME: should we validate it?
@@ -1062,17 +1074,20 @@ just_do_rfi:
        dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set
        mov cr.ifs=r20 ;;
        // ipsr.cpl == (vcr.ipsr.cpl == 0) 2 : 3;
+       adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
        dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
        // vpsr.i = vcr.ipsr.i; vpsr.ic = vcr.ipsr.ic
-       mov r19=r0 ;;
+       ld8 r20=[r20]
+       mov r19=1 
        extr.u r23=r21,IA64_PSR_I_BIT,1 ;;
        cmp.ne p7,p6=r23,r0 ;;
        // not done yet
-(p7)   dep r19=-1,r19,32,1
+(p7)   st1 [r20]=r0
+(p6)   st1 [r20]=r19;;
        extr.u r23=r21,IA64_PSR_IC_BIT,1 ;;
        cmp.ne p7,p6=r23,r0 ;;
-(p7)   dep r19=-1,r19,0,1 ;;
-       st8 [r18]=r19 ;;
+(p7)   st4 [r18]=r19;;
+(p6)   st4 [r18]=r0;;
        // force on psr.ic, i, dt, rt, it, bn
        movl 
r20=(IA64_PSR_I|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IT|IA64_PSR_BN)
        ;;
@@ -1209,10 +1224,12 @@ GLOBAL_ENTRY(rfi_with_interrupt)
        extr.u r20=r21,41,2 ;;  // get v(!)psr.ri
        dep r16=r20,r16,41,2 ;; // deposit cr.isr.ei
        adds r22=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;; 
-       st8 [r22]=r16 ;;
+       st8 [r22]=r16,XSI_PSR_I_ADDR_OFS-XSI_ISR_OFS ;;
        // set cr.ipsr (make sure cpl==2!)
        mov r29=r17 ;;
        movl r28=DELIVER_PSR_SET;;
+       mov r20=1
+       ld8 r22=[r22]
        movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
        or r29=r29,r28;;
        and r29=r29,r27;;
@@ -1220,7 +1237,8 @@ GLOBAL_ENTRY(rfi_with_interrupt)
        // v.ipsr and v.iip are already set (and v.iip validated) as rfi target
        // set shared_mem interrupt_delivery_enabled to 0
        // set shared_mem interrupt_collection_enabled to 0
-       st8 [r18]=r0;;
+       st1 [r22]=r20
+       st4 [r18]=r0;;
        // cover and set shared_mem precover_ifs to cr.ifs
        // set shared_mem ifs and incomplete_regframe to 0
 #if 0
diff -r 986538da9be0 -r 2b6e531dab38 xen/arch/ia64/xen/ivt.S
--- a/xen/arch/ia64/xen/ivt.S   Wed Mar 29 12:41:33 2006 -0700
+++ b/xen/arch/ia64/xen/ivt.S   Thu Mar 30 09:55:26 2006 -0700
@@ -930,7 +930,7 @@ ENTRY(break_fault)
 #endif
        movl r18=XSI_PSR_IC
        ;;
-       ld8 r19=[r18]
+       ld4 r19=[r18]
        ;;
        cmp.eq p7,p0=r0,r17                     // is this a psuedo-cover?
 (p7)   br.spnt.many dispatch_privop_fault
diff -r 986538da9be0 -r 2b6e531dab38 xen/arch/ia64/xen/process.c
--- a/xen/arch/ia64/xen/process.c       Wed Mar 29 12:41:33 2006 -0700
+++ b/xen/arch/ia64/xen/process.c       Thu Mar 30 09:55:26 2006 -0700
@@ -206,9 +206,9 @@ void reflect_interruption(unsigned long 
 #ifdef CONFIG_SMP
 #warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu"
 #endif
-       regs->r31 = (unsigned long) &(((mapped_regs_t 
*)SHARED_ARCHINFO_ADDR)->ipsr);
-
-       PSCB(v,interrupt_delivery_enabled) = 0;
+       regs->r31 = XSI_IPSR;
+
+       v->vcpu_info->evtchn_upcall_mask = 1;
        PSCB(v,interrupt_collection_enabled) = 0;
 
        inc_slow_reflect_count(vector);
diff -r 986538da9be0 -r 2b6e531dab38 xen/arch/ia64/xen/vcpu.c
--- a/xen/arch/ia64/xen/vcpu.c  Wed Mar 29 12:41:33 2006 -0700
+++ b/xen/arch/ia64/xen/vcpu.c  Thu Mar 30 09:55:26 2006 -0700
@@ -197,7 +197,8 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, 
        ipsr = (struct ia64_psr *)&regs->cr_ipsr;
        imm = *(struct ia64_psr *)&imm24;
        // interrupt flag
-       if (imm.i) PSCB(vcpu,interrupt_delivery_enabled) = 0;
+       if (imm.i)
+           vcpu->vcpu_info->evtchn_upcall_mask = 1;
        if (imm.ic)  PSCB(vcpu,interrupt_collection_enabled) = 0;
        // interrupt collection flag
        //if (imm.ic) PSCB(vcpu,interrupt_delivery_enabled) = 0;
@@ -232,7 +233,7 @@ IA64FAULT vcpu_set_psr_dt(VCPU *vcpu)
 
 IA64FAULT vcpu_set_psr_i(VCPU *vcpu)
 {
-       PSCB(vcpu,interrupt_delivery_enabled) = 1;
+       vcpu->vcpu_info->evtchn_upcall_mask = 0;
        PSCB(vcpu,interrupt_collection_enabled) = 1;
        return IA64_NO_FAULT;
 }
@@ -261,11 +262,11 @@ IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UI
        }
        if (imm.sp) { ipsr->sp = 1; psr.sp = 1; }
        if (imm.i) {
-               if (!PSCB(vcpu,interrupt_delivery_enabled)) {
+               if (vcpu->vcpu_info->evtchn_upcall_mask) {
 //printf("vcpu_set_psr_sm: psr.ic 0->1 ");
                        enabling_interrupts = 1;
                }
-               PSCB(vcpu,interrupt_delivery_enabled) = 1;
+               vcpu->vcpu_info->evtchn_upcall_mask = 0;
        }
        if (imm.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
        // TODO: do this faster
@@ -312,9 +313,9 @@ IA64FAULT vcpu_set_psr_l(VCPU *vcpu, UIN
        if (newpsr.up) { ipsr->up = 1; psr.up = 1; }
        if (newpsr.sp) { ipsr->sp = 1; psr.sp = 1; }
        if (newpsr.i) {
-               if (!PSCB(vcpu,interrupt_delivery_enabled))
+               if (vcpu->vcpu_info->evtchn_upcall_mask)
                        enabling_interrupts = 1;
-               PSCB(vcpu,interrupt_delivery_enabled) = 1;
+               vcpu->vcpu_info->evtchn_upcall_mask = 0;
        }
        if (newpsr.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
        if (newpsr.mfl) { ipsr->mfl = 1; psr.mfl = 1; }
@@ -340,7 +341,7 @@ IA64FAULT vcpu_get_psr(VCPU *vcpu, UINT6
 
        newpsr = *(struct ia64_psr *)&regs->cr_ipsr;
        if (newpsr.cpl == 2) newpsr.cpl = 0;
-       if (PSCB(vcpu,interrupt_delivery_enabled)) newpsr.i = 1;
+       if (!vcpu->vcpu_info->evtchn_upcall_mask) newpsr.i = 1;
        else newpsr.i = 0;
        if (PSCB(vcpu,interrupt_collection_enabled)) newpsr.ic = 1;
        else newpsr.ic = 0;
@@ -360,7 +361,7 @@ BOOLEAN vcpu_get_psr_ic(VCPU *vcpu)
 
 BOOLEAN vcpu_get_psr_i(VCPU *vcpu)
 {
-       return !!PSCB(vcpu,interrupt_delivery_enabled);
+       return !vcpu->vcpu_info->evtchn_upcall_mask;
 }
 
 UINT64 vcpu_get_ipsr_int_state(VCPU *vcpu,UINT64 prevpsr)
@@ -373,7 +374,7 @@ UINT64 vcpu_get_ipsr_int_state(VCPU *vcp
        psr.ia64_psr.be = 0; if (dcr & IA64_DCR_BE) psr.ia64_psr.be = 1;
        psr.ia64_psr.pp = 0; if (dcr & IA64_DCR_PP) psr.ia64_psr.pp = 1;
        psr.ia64_psr.ic = PSCB(vcpu,interrupt_collection_enabled);
-       psr.ia64_psr.i = PSCB(vcpu,interrupt_delivery_enabled);
+       psr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
        psr.ia64_psr.bn = PSCB(vcpu,banknum);
        psr.ia64_psr.dt = 1; psr.ia64_psr.it = 1; psr.ia64_psr.rt = 1;
        if (psr.ia64_psr.cpl == 2) psr.ia64_psr.cpl = 0; // !!!! fool domain
@@ -931,7 +932,7 @@ IA64FAULT vcpu_set_eoi(VCPU *vcpu, UINT6
        bits &= ~(1L << bitnum);
        *p = bits;
        /* clearing an eoi bit may unmask another pending interrupt... */
-       if (PSCB(vcpu,interrupt_delivery_enabled)) { // but only if enabled...
+       if (!vcpu->vcpu_info->evtchn_upcall_mask) { // but only if enabled...
                // worry about this later... Linux only calls eoi
                // with interrupts disabled
                printf("Trying to EOI interrupt with interrupts enabled\n");
@@ -1186,7 +1187,6 @@ IA64FAULT vcpu_rfi(VCPU *vcpu)
 
        psr.i64 = PSCB(vcpu,ipsr);
        if (psr.ia64_psr.cpl < 3) psr.ia64_psr.cpl = 2;
-       if (psr.ia64_psr.i) PSCB(vcpu,interrupt_delivery_enabled) = 1;
        int_enable = psr.ia64_psr.i;
        if (psr.ia64_psr.ic)  PSCB(vcpu,interrupt_collection_enabled) = 1;
        if (psr.ia64_psr.dt && psr.ia64_psr.rt && psr.ia64_psr.it) 
vcpu_set_metaphysical_mode(vcpu,FALSE);
@@ -1218,7 +1218,7 @@ IA64FAULT vcpu_rfi(VCPU *vcpu)
        }
        PSCB(vcpu,interrupt_collection_enabled) = 1;
        vcpu_bsw1(vcpu);
-       PSCB(vcpu,interrupt_delivery_enabled) = int_enable;
+       vcpu->vcpu_info->evtchn_upcall_mask = !int_enable;
        return (IA64_NO_FAULT);
 }
 
diff -r 986538da9be0 -r 2b6e531dab38 xen/arch/ia64/xen/xentime.c
--- a/xen/arch/ia64/xen/xentime.c       Wed Mar 29 12:41:33 2006 -0700
+++ b/xen/arch/ia64/xen/xentime.c       Thu Mar 30 09:55:26 2006 -0700
@@ -111,7 +111,7 @@ xen_timer_interrupt (int irq, void *dev_
        if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) {
                printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */
                        regs->cr_iip /*,
-                       VCPU(current,interrupt_delivery_enabled),
+                       !current->vcpu_info->evtchn_upcall_mask,
                        VCPU(current,pending_interruption) */);
                count = 0;
        }
diff -r 986538da9be0 -r 2b6e531dab38 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Wed Mar 29 12:41:33 2006 -0700
+++ b/xen/include/asm-ia64/domain.h     Thu Mar 30 09:55:26 2006 -0700
@@ -39,6 +39,9 @@ struct arch_domain {
 #define xen_vastart arch.xen_vastart
 #define xen_vaend arch.xen_vaend
 #define shared_info_va arch.shared_info_va
+#define INT_ENABLE_OFFSET(v)             \
+    (sizeof(vcpu_info_t) * (v)->vcpu_id + \
+    offsetof(vcpu_info_t, evtchn_upcall_mask))
 
 struct arch_vcpu {
 #if 1
diff -r 986538da9be0 -r 2b6e531dab38 xen/include/public/arch-ia64.h
--- a/xen/include/public/arch-ia64.h    Wed Mar 29 12:41:33 2006 -0700
+++ b/xen/include/public/arch-ia64.h    Thu Mar 30 09:55:26 2006 -0700
@@ -268,7 +268,11 @@ typedef struct {
             unsigned long precover_ifs;
             unsigned long unat;  // not sure if this is needed until NaT arch 
is done
             int interrupt_collection_enabled; // virtual psr.ic
-            int interrupt_delivery_enabled; // virtual psr.i
+            /* virtual interrupt deliverable flag is evtchn_upcall_mask in
+             * shared info area now. interrupt_mask_addr is the address
+             * of evtchn_upcall_mask for current vcpu
+             */
+            unsigned long interrupt_mask_addr;
             int pending_interruption;
             int incomplete_regframe; // see SDM vol2 6.8
             unsigned long reserved5_1[4];

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [IA64] Merge vpsr.i with evtchn_upcall_mask tosolve one trickish bug, Xen patchbot -unstable <=