WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

[Xen-ia64-devel] [PATCH 1/12] vti fault handler clean up take 2: white s

To: xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-ia64-devel] [PATCH 1/12] vti fault handler clean up take 2: white space of vmx_minstate.h
From: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Date: Mon, 3 Dec 2007 15:02:18 +0900
Cc: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Delivery-date: Sun, 02 Dec 2007 22:03:43 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ia64-devel-request@lists.xensource.com?subject=help>
List-id: Discussion of the ia64 port of Xen <xen-ia64-devel.lists.xensource.com>
List-post: <mailto:xen-ia64-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-ia64-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.4.2.1i
# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1194514571 -32400
# Node ID ded58a0265bd0b854b73f7289b60e269c1234111
# Parent  32ec5dbe2978fdff4682912de0c78a14d479a8a3
white space.
PATCHNAME: white_space_vmx_minstate_h

Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>

diff -r 32ec5dbe2978 -r ded58a0265bd xen/arch/ia64/vmx/vmx_minstate.h
--- a/xen/arch/ia64/vmx/vmx_minstate.h  Fri Nov 30 08:54:33 2007 -0700
+++ b/xen/arch/ia64/vmx/vmx_minstate.h  Thu Nov 08 18:36:11 2007 +0900
@@ -1,4 +1,3 @@
-/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
 /*
  * vmx_minstate.h:
  * Copyright (c) 2005, Intel Corporation.
@@ -35,52 +34,47 @@
 #include <asm/cache.h>
 #include "entry.h"
 
-#define VMX_MINSTATE_START_SAVE_MIN         \
-    mov ar.rsc=0;       /* set enforced lazy mode, pl 0, little-endian, 
loadrs=0 */ \
-    ;;                                          \
-    mov.m r28=ar.rnat;                                  \
-    addl r22=IA64_RBS_OFFSET,r1;            /* compute base of RBS */       \
-    ;;                                          \
-    lfetch.fault.excl.nt1 [r22];                                \
-    addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;   /* compute base of memory 
stack */  \
-    mov r23=ar.bspstore;                /* save ar.bspstore */          \
-    ;;                                          \
-    mov ar.bspstore=r22;                /* switch to kernel RBS */      \
-    ;;                                          \
-    mov r18=ar.bsp;                                     \
-    mov ar.rsc=0x3;     /* set eager mode, pl 0, little-endian, loadrs=0 */    
 \
-
-
-
-#define VMX_MINSTATE_END_SAVE_MIN           \
-    bsw.1;          /* switch back to bank 1 (must be last in insn group) */   
 \
+#define VMX_MINSTATE_START_SAVE_MIN                                            
                 \
+    mov ar.rsc=0;       /* set enforced lazy mode, pl 0, little-endian, 
loadrs=0 */             \
+    ;;                                                                         
                 \
+    mov.m r28=ar.rnat;                                                         
                 \
+    addl r22=IA64_RBS_OFFSET,r1;                        /* compute base of RBS 
*/               \
+    ;;                                                                         
                 \
+    lfetch.fault.excl.nt1 [r22];                                               
                 \
+    addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;       /* compute base of 
memory stack */      \
+    mov r23=ar.bspstore;                                /* save ar.bspstore */ 
                 \
+    ;;                                                                         
                 \
+    mov ar.bspstore=r22;                                /* switch to kernel 
RBS */              \
+    ;;                                                                         
                 \
+    mov r18=ar.bsp;                                                            
                 \
+    mov ar.rsc=0x3;     /* set eager mode, pl 0, little-endian, loadrs=0 */
+
+#define VMX_MINSTATE_END_SAVE_MIN                                              
                 \
+    bsw.1;              /* switch back to bank 1 (must be last in insn group) 
*/                \
     ;;
 
-
-#define PAL_VSA_SYNC_READ           \
-    /* begin to call pal vps sync_read */     \
-    add r25=IA64_VPD_BASE_OFFSET, r21;       \
-    movl r20=__vsa_base;     \
-    ;;          \
-    ld8 r25=[r25];      /* read vpd base */     \
-    ld8 r20=[r20];      /* read entry point */  \
-    ;;      \
-    add r20=PAL_VPS_SYNC_READ,r20;  \
-    ;;  \
-{ .mii;  \
-    nop 0x0;   \
-    mov r24=ip;        \
-    mov b0=r20;     \
-    ;;      \
-};           \
-{ .mmb;      \
-    add r24 = 0x20, r24;    \
-    nop 0x0;            \
-    br.cond.sptk b0;        /*  call the service */ \
-    ;;              \
-};           \
-
-
+#define PAL_VSA_SYNC_READ                               \
+    /* begin to call pal vps sync_read */               \
+    add r25=IA64_VPD_BASE_OFFSET, r21;                  \
+    movl r20=__vsa_base;                                \
+    ;;                                                  \
+    ld8 r25=[r25];              /* read vpd base */     \
+    ld8 r20=[r20];              /* read entry point */  \
+    ;;                                                  \
+    add r20=PAL_VPS_SYNC_READ,r20;                      \
+    ;;                                                  \
+{ .mii;                                                 \
+    nop 0x0;                                            \
+    mov r24=ip;                                         \
+    mov b0=r20;                                         \
+    ;;                                                  \
+};                                                      \
+{ .mmb;                                                 \
+    add r24 = 0x20, r24;                                \
+    nop 0x0;                                            \
+    br.cond.sptk b0;        /*  call the service */     \
+    ;;                                                  \
+};
 
 #define IA64_CURRENT_REG    IA64_KR(CURRENT)  /* r21 is reserved for current 
pointer */
 //#define VMX_MINSTATE_GET_CURRENT(reg)   mov reg=IA64_CURRENT_REG
@@ -112,101 +106,101 @@
  * we can pass interruption state as arguments to a handler.
  */
 
-#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)                           \
-    VMX_MINSTATE_GET_CURRENT(r16);  /* M (or M;;I) */                   \
-    mov r27=ar.rsc;         /* M */                         \
-    mov r20=r1;         /* A */                         \
-    mov r25=ar.unat;        /* M */                         \
-    mov r29=cr.ipsr;        /* M */                         \
-    mov r26=ar.pfs;         /* I */                     \
-    mov r18=cr.isr;         \
-    COVER;              /* B;; (or nothing) */                  \
-    ;;                                          \
-    tbit.z p6,p0=r29,IA64_PSR_VM_BIT;       \
-    ;;      \
-    tbit.nz.or p6,p0 = r18,IA64_ISR_NI_BIT; \
-    ;;        \
-(p6) br.spnt.few vmx_panic;        \
-    tbit.z p0,p15=r29,IA64_PSR_I_BIT;   \
-    mov r1=r16;                     \
-/*    mov r21=r16;     */              \
-    /* switch from user to kernel RBS: */                           \
-    ;;                                          \
-    invala;             /* M */                         \
-    SAVE_IFS;                                       \
-    ;;                                          \
-    VMX_MINSTATE_START_SAVE_MIN                                 \
-    adds r17=2*L1_CACHE_BYTES,r1;       /* really: biggest cache-line size */  
     \
-    adds r16=PT(CR_IPSR),r1;                                \
-    ;;                                          \
-    lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;                     \
-    st8 [r16]=r29;      /* save cr.ipsr */                      \
-    ;;                                          \
-    lfetch.fault.excl.nt1 [r17];                                \
-    tbit.nz p15,p0=r29,IA64_PSR_I_BIT;                          \
-    mov r29=b0                                      \
-    ;;                                          \
-    adds r16=PT(R8),r1; /* initialize first base pointer */             \
-    adds r17=PT(R9),r1; /* initialize second base pointer */                \
-    ;;                                          \
-.mem.offset 0,0; st8.spill [r16]=r8,16;                             \
-.mem.offset 8,0; st8.spill [r17]=r9,16;                             \
-        ;;                                          \
-.mem.offset 0,0; st8.spill [r16]=r10,24;                            \
-.mem.offset 8,0; st8.spill [r17]=r11,24;                            \
-        ;;                                          \
-    mov r9=cr.iip;         /* M */                         \
-    mov r10=ar.fpsr;        /* M */                         \
-        ;;                      \
-    st8 [r16]=r9,16;    /* save cr.iip */                       \
-    st8 [r17]=r30,16;   /* save cr.ifs */                       \
-    sub r18=r18,r22;    /* r18=RSE.ndirty*8 */                      \
-    ;;          \
-    st8 [r16]=r25,16;   /* save ar.unat */                      \
-    st8 [r17]=r26,16;    /* save ar.pfs */                       \
-    shl r18=r18,16;     /* compute ar.rsc to be used for "loadrs" */           
 \
-    ;;                                          \
-    st8 [r16]=r27,16;   /* save ar.rsc */                       \
-    st8 [r17]=r28,16;   /* save ar.rnat */                      \
-    ;;          /* avoid RAW on r16 & r17 */                    \
-    st8 [r16]=r23,16;   /* save ar.bspstore */                      \
-    st8 [r17]=r31,16;   /* save predicates */                       \
-    ;;                                          \
-    st8 [r16]=r29,16;   /* save b0 */                           \
-    st8 [r17]=r18,16;   /* save ar.rsc value for "loadrs" */                \
-    cmp.eq pNonSys,pSys=r0,r0   /* initialize pSys=0, pNonSys=1 */          \
-    ;;                                          \
-.mem.offset 0,0; st8.spill [r16]=r20,16;    /* save original r1 */             
 \
-.mem.offset 8,0; st8.spill [r17]=r12,16;                            \
+#define VMX_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)                                  
         \
+    VMX_MINSTATE_GET_CURRENT(r16);      /* M (or M;;I) */                      
         \
+    mov r27=ar.rsc;                     /* M */                                
         \
+    mov r20=r1;                         /* A */                                
         \
+    mov r25=ar.unat;                    /* M */                                
         \
+    mov r29=cr.ipsr;                    /* M */                                
         \
+    mov r26=ar.pfs;                     /* I */                                
         \
+    mov r18=cr.isr;                                                            
         \
+    COVER;                              /* B;; (or nothing) */                 
         \
+    ;;                                                                         
         \
+    tbit.z p6,p0=r29,IA64_PSR_VM_BIT;                                          
         \
+    ;;                                                                         
         \
+    tbit.nz.or p6,p0 = r18,IA64_ISR_NI_BIT;                                    
         \
+    ;;                                                                         
         \
+(p6)br.spnt.few vmx_panic;                                                     
         \
+    tbit.z p0,p15=r29,IA64_PSR_I_BIT;                                          
         \
+    mov r1=r16;                                                                
         \
+    /*    mov r21=r16;  */                                                     
         \
+    /* switch from user to kernel RBS: */                                      
         \
+    ;;                                                                         
         \
+    invala;                             /* M */                                
         \
+    SAVE_IFS;                                                                  
         \
+    ;;                                                                         
         \
+    VMX_MINSTATE_START_SAVE_MIN                                                
         \
+    adds r17=2*L1_CACHE_BYTES,r1;       /* really: biggest cache-line size */  
         \
+    adds r16=PT(CR_IPSR),r1;                                                   
         \
+    ;;                                                                         
         \
+    lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;                                
         \
+    st8 [r16]=r29;      /* save cr.ipsr */                                     
         \
+    ;;                                                                         
         \
+    lfetch.fault.excl.nt1 [r17];                                               
         \
+    tbit.nz p15,p0=r29,IA64_PSR_I_BIT;                                         
         \
+    mov r29=b0                                                                 
         \
+    ;;                                                                         
         \
+    adds r16=PT(R8),r1; /* initialize first base pointer */                    
         \
+    adds r17=PT(R9),r1; /* initialize second base pointer */                   
         \
+    ;;                                                                         
         \
+.mem.offset 0,0; st8.spill [r16]=r8,16;                                        
         \
+.mem.offset 8,0; st8.spill [r17]=r9,16;                                        
         \
+    ;;                                                                         
         \
+.mem.offset 0,0; st8.spill [r16]=r10,24;                                       
         \
+.mem.offset 8,0; st8.spill [r17]=r11,24;                                       
         \
+    ;;                                                                         
         \
+    mov r9=cr.iip;      /* M */                                                
         \
+    mov r10=ar.fpsr;    /* M */                                                
         \
+    ;;                                                                         
         \
+    st8 [r16]=r9,16;    /* save cr.iip */                                      
         \
+    st8 [r17]=r30,16;   /* save cr.ifs */                                      
         \
+    sub r18=r18,r22;    /* r18=RSE.ndirty*8 */                                 
         \
+    ;;                                                                         
         \
+    st8 [r16]=r25,16;   /* save ar.unat */                                     
         \
+    st8 [r17]=r26,16;    /* save ar.pfs */                                     
         \
+    shl r18=r18,16;     /* compute ar.rsc to be used for "loadrs" */           
         \
+    ;;                                                                         
         \
+    st8 [r16]=r27,16;   /* save ar.rsc */                                      
         \
+    st8 [r17]=r28,16;   /* save ar.rnat */                                     
         \
+    ;;                  /* avoid RAW on r16 & r17 */                           
         \
+    st8 [r16]=r23,16;   /* save ar.bspstore */                                 
         \
+    st8 [r17]=r31,16;   /* save predicates */                                  
         \
+    ;;                                                                         
         \
+    st8 [r16]=r29,16;   /* save b0 */                                          
         \
+    st8 [r17]=r18,16;   /* save ar.rsc value for "loadrs" */                   
         \
+    cmp.eq pNonSys,pSys=r0,r0   /* initialize pSys=0, pNonSys=1 */             
         \
+    ;;                                                                         
         \
+.mem.offset 0,0; st8.spill [r16]=r20,16;        /* save original r1 */         
         \
+.mem.offset 8,0; st8.spill [r17]=r12,16;                                       
         \
     adds r12=-16,r1;    /* switch to kernel memory stack (with 16 bytes of 
scratch) */  \
-    ;;                                          \
-.mem.offset 0,0; st8.spill [r16]=r13,16;                            \
-.mem.offset 8,0; st8.spill [r17]=r10,16;    /* save ar.fpsr */              \
-    mov r13=r21;   /* establish `current' */               \
-    ;;                                          \
-.mem.offset 0,0; st8.spill [r16]=r15,16;                            \
-.mem.offset 8,0; st8.spill [r17]=r14,16;                            \
-    ;;                                          \
-.mem.offset 0,0; st8.spill [r16]=r2,16;                             \
-.mem.offset 8,0; st8.spill [r17]=r3,16;                             \
-    adds r2=IA64_PT_REGS_R16_OFFSET,r1;                         \
-     ;;  \
-    adds r16=IA64_VCPU_IIPA_OFFSET,r13;                       \
-    adds r17=IA64_VCPU_ISR_OFFSET,r13;                       \
-    mov r26=cr.iipa;  \
-    mov r27=cr.isr;   \
-    ;;      \
-    st8 [r16]=r26;      \
-    st8 [r17]=r27;      \
-    ;;  \
-    EXTRA;                                          \
-    mov r8=ar.ccv;          \
-    mov r9=ar.csd;                                      \
-    mov r10=ar.ssd;                                     \
-    movl r11=FPSR_DEFAULT;   /* L-unit */                           \
-    movl r1=__gp;       /* establish kernel global pointer */               \
-    ;;                                          \
-    PAL_VSA_SYNC_READ           \
+    ;;                                                                         
         \
+.mem.offset 0,0; st8.spill [r16]=r13,16;                                       
         \
+.mem.offset 8,0; st8.spill [r17]=r10,16;        /* save ar.fpsr */             
         \
+    mov r13=r21;        /* establish `current' */                              
         \
+    ;;                                                                         
         \
+.mem.offset 0,0; st8.spill [r16]=r15,16;                                       
         \
+.mem.offset 8,0; st8.spill [r17]=r14,16;                                       
         \
+    ;;                                                                         
         \
+.mem.offset 0,0; st8.spill [r16]=r2,16;                                        
         \
+.mem.offset 8,0; st8.spill [r17]=r3,16;                                        
         \
+    adds r2=IA64_PT_REGS_R16_OFFSET,r1;                                        
         \
+    ;;                                                                         
         \
+    adds r16=IA64_VCPU_IIPA_OFFSET,r13;                                        
         \
+    adds r17=IA64_VCPU_ISR_OFFSET,r13;                                         
         \
+    mov r26=cr.iipa;                                                           
         \
+    mov r27=cr.isr;                                                            
         \
+    ;;                                                                         
         \
+    st8 [r16]=r26;                                                             
         \
+    st8 [r17]=r27;                                                             
         \
+    ;;                                                                         
         \
+    EXTRA;                                                                     
         \
+    mov r8=ar.ccv;                                                             
         \
+    mov r9=ar.csd;                                                             
         \
+    mov r10=ar.ssd;                                                            
         \
+    movl r11=FPSR_DEFAULT;      /* L-unit */                                   
         \
+    movl r1=__gp;               /* establish kernel global pointer */          
         \
+    ;;                                                                         
         \
+    PAL_VSA_SYNC_READ                                                          
         \
     VMX_MINSTATE_END_SAVE_MIN
 
 /*
@@ -223,71 +217,80 @@
  *
  * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
  */
-#define VMX_SAVE_REST               \
-.mem.offset 0,0; st8.spill [r2]=r16,16;     \
-.mem.offset 8,0; st8.spill [r3]=r17,16;     \
-    ;;                  \
-.mem.offset 0,0; st8.spill [r2]=r18,16;     \
-.mem.offset 8,0; st8.spill [r3]=r19,16;     \
-    ;;                  \
-.mem.offset 0,0; st8.spill [r2]=r20,16;     \
-.mem.offset 8,0; st8.spill [r3]=r21,16;     \
-    mov r18=b6;         \
-    ;;                  \
-.mem.offset 0,0; st8.spill [r2]=r22,16;     \
-.mem.offset 8,0; st8.spill [r3]=r23,16;     \
-    mov r19=b7;     \
-    ;;                  \
-.mem.offset 0,0; st8.spill [r2]=r24,16;     \
-.mem.offset 8,0; st8.spill [r3]=r25,16;     \
-    ;;                  \
-.mem.offset 0,0; st8.spill [r2]=r26,16;     \
-.mem.offset 8,0; st8.spill [r3]=r27,16;     \
-    ;;                  \
-.mem.offset 0,0; st8.spill [r2]=r28,16;     \
-.mem.offset 8,0; st8.spill [r3]=r29,16;     \
-    ;;                  \
-.mem.offset 0,0; st8.spill [r2]=r30,16;     \
-.mem.offset 8,0; st8.spill [r3]=r31,32;     \
-    ;;                  \
-    mov ar.fpsr=r11;     \
-    st8 [r2]=r8,8;       \
-    adds r24=PT(B6)-PT(F7),r3;      \
-    ;;                  \
-    stf.spill [r2]=f6,32;           \
-    stf.spill [r3]=f7,32;           \
-    ;;                  \
-    stf.spill [r2]=f8,32;           \
-    stf.spill [r3]=f9,32;           \
-    ;;                  \
-    stf.spill [r2]=f10,32;         \
-    stf.spill [r3]=f11;         \
-    adds r25=PT(B7)-PT(F11),r3;     \
-    ;;                  \
-    st8 [r24]=r18,16;       /* b6 */    \
-    st8 [r25]=r19,16;       /* b7 */    \
-    adds r3=PT(R5)-PT(F11),r3;     \
-    ;;                  \
-    st8 [r24]=r9;           /* ar.csd */    \
-    st8 [r25]=r10;          /* ar.ssd */    \
-    ;;         \
-    mov r18=ar.unat;    \
-    adds r19=PT(EML_UNAT)-PT(R4),r2;   \
-    ;;                 \
-    st8 [r19]=r18;       /* eml_unat */ \
-
-
-#define VMX_SAVE_EXTRA               \
-.mem.offset 0,0; st8.spill [r2]=r4,16;     \
-.mem.offset 8,0; st8.spill [r3]=r5,16;     \
-    ;;                  \
-.mem.offset 0,0; st8.spill [r2]=r6,16;      \
-.mem.offset 8,0; st8.spill [r3]=r7;      \
-    ;;                 \
-    mov r26=ar.unat;    \
-    ;;                 \
-    st8 [r2]=r26;       /* eml_unat */ \
-
-#define VMX_SAVE_MIN_WITH_COVER   VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,)
+#define VMX_SAVE_REST                   \
+.mem.offset 0,0; st8.spill [r2]=r16,16; \
+.mem.offset 8,0; st8.spill [r3]=r17,16; \
+    ;;                                  \
+.mem.offset 0,0; st8.spill [r2]=r18,16; \
+.mem.offset 8,0; st8.spill [r3]=r19,16; \
+    ;;                                  \
+.mem.offset 0,0; st8.spill [r2]=r20,16; \
+.mem.offset 8,0; st8.spill [r3]=r21,16; \
+    mov r18=b6;                         \
+    ;;                                  \
+.mem.offset 0,0; st8.spill [r2]=r22,16; \
+.mem.offset 8,0; st8.spill [r3]=r23,16; \
+    mov r19=b7;                         \
+    ;;                                  \
+.mem.offset 0,0; st8.spill [r2]=r24,16; \
+.mem.offset 8,0; st8.spill [r3]=r25,16; \
+    ;;                                  \
+.mem.offset 0,0; st8.spill [r2]=r26,16; \
+.mem.offset 8,0; st8.spill [r3]=r27,16; \
+    ;;                                  \
+.mem.offset 0,0; st8.spill [r2]=r28,16; \
+.mem.offset 8,0; st8.spill [r3]=r29,16; \
+    ;;                                  \
+.mem.offset 0,0; st8.spill [r2]=r30,16; \
+.mem.offset 8,0; st8.spill [r3]=r31,32; \
+    ;;                                  \
+    mov ar.fpsr=r11;                    \
+    st8 [r2]=r8,8;                      \
+    adds r24=PT(B6)-PT(F7),r3;          \
+    ;;                                  \
+    stf.spill [r2]=f6,32;               \
+    stf.spill [r3]=f7,32;               \
+    ;;                                  \
+    stf.spill [r2]=f8,32;               \
+    stf.spill [r3]=f9,32;               \
+    ;;                                  \
+    stf.spill [r2]=f10,32;              \
+    stf.spill [r3]=f11;                 \
+    adds r25=PT(B7)-PT(F11),r3;         \
+    ;;                                  \
+    st8 [r24]=r18,16;   /* b6 */        \
+    st8 [r25]=r19,16;   /* b7 */        \
+    adds r3=PT(R5)-PT(F11),r3;          \
+    ;;                                  \
+    st8 [r24]=r9;       /* ar.csd */    \
+    st8 [r25]=r10;      /* ar.ssd */    \
+    ;;                                  \
+    mov r18=ar.unat;                    \
+    adds r19=PT(EML_UNAT)-PT(R4),r2;    \
+    ;;                                  \
+    st8 [r19]=r18;      /* eml_unat */
+
+#define VMX_SAVE_EXTRA                  \
+.mem.offset 0,0; st8.spill [r2]=r4,16;  \
+.mem.offset 8,0; st8.spill [r3]=r5,16;  \
+    ;;                                  \
+.mem.offset 0,0; st8.spill [r2]=r6,16;  \
+.mem.offset 8,0; st8.spill [r3]=r7;     \
+    ;;                                  \
+    mov r26=ar.unat;                    \
+    ;;                                  \
+    st8 [r2]=r26;       /* eml_unat */
+
+#define VMX_SAVE_MIN_WITH_COVER     VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,)
 #define VMX_SAVE_MIN_WITH_COVER_R19 VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs, mov 
r15=r19)
-#define VMX_SAVE_MIN      VMX_DO_SAVE_MIN(     , mov r30=r0, )
+#define VMX_SAVE_MIN                VMX_DO_SAVE_MIN(     , mov r30=r0, )
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */

Attachment: 16502_ded58a0265bd_white_space_vmx_minstate_h.patch
Description: Text Data

_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-ia64-devel] [PATCH 1/12] vti fault handler clean up take 2: white space of vmx_minstate.h, Isaku Yamahata <=