WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86: Clean up asm keyword usage (asm vola

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86: Clean up asm keyword usage (asm volatile rather than __asm__
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Tue, 11 Sep 2007 15:30:53 -0700
Delivery-date: Tue, 11 Sep 2007 15:34:57 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1189509278 -3600
# Node ID 5ccf8bbf8628675433b09585b5bc464f5a0981d5
# Parent  a657ebf8e4184a1dd1ec242cf30c53f1a1793a59
x86: Clean up asm keyword usage (asm volatile rather than __asm__
__volatile__ in most places) and ensure we use volatile keyword
wherever we have an asm stmt that produces outputs but has other
unspecified side effects or dependencies other than the
explicitly-stated inputs.

Also added volatile in a few places where its not strictly necessary
but where it's unlikely to produce worse code and it makes our
intentions perfectly clear.

The original problem this patch fixes was tracked down by Joseph
Cihula <joseph.cihula@xxxxxxxxx>.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/bitops.c           |   10 +-
 xen/arch/x86/cpu/common.c       |    8 +-
 xen/arch/x86/domain.c           |    8 +-
 xen/arch/x86/time.c             |    8 +-
 xen/arch/x86/traps.c            |   35 +++++-----
 xen/arch/x86/x86_64/traps.c     |    6 -
 xen/arch/x86/x86_emulate.c      |   22 +++---
 xen/include/asm-x86/bitops.h    |    4 -
 xen/include/asm-x86/byteorder.h |    2 
 xen/include/asm-x86/div64.h     |   41 ++++++------
 xen/include/asm-x86/processor.h |  136 +++++++++++++++++++++-------------------
 11 files changed, 149 insertions(+), 131 deletions(-)

diff -r a657ebf8e418 -r 5ccf8bbf8628 xen/arch/x86/bitops.c
--- a/xen/arch/x86/bitops.c     Tue Sep 11 11:21:44 2007 +0100
+++ b/xen/arch/x86/bitops.c     Tue Sep 11 12:14:38 2007 +0100
@@ -7,7 +7,7 @@ unsigned int __find_first_bit(
 {
     unsigned long d0, d1, res;
 
-    __asm__ __volatile__ (
+    asm volatile (
         "   xor %%eax,%%eax\n\t" /* also ensures ZF==1 if size==0 */
         "   repe; scas"__OS"\n\t"
         "   je 1f\n\t"
@@ -34,8 +34,8 @@ unsigned int __find_next_bit(
     if ( bit != 0 )
     {
         /* Look for a bit in the first word. */
-        __asm__ ( "bsf %1,%%"__OP"ax"
-                  : "=a" (set) : "r" (*p >> bit), "0" (BITS_PER_LONG) );
+        asm ( "bsf %1,%%"__OP"ax"
+              : "=a" (set) : "r" (*p >> bit), "0" (BITS_PER_LONG) );
         if ( set < (BITS_PER_LONG - bit) )
             return (offset + set);
         offset += BITS_PER_LONG - bit;
@@ -55,7 +55,7 @@ unsigned int __find_first_zero_bit(
 {
     unsigned long d0, d1, d2, res;
 
-    __asm__ (
+    asm volatile (
         "   xor %%edx,%%edx\n\t" /* also ensures ZF==1 if size==0 */
         "   repe; scas"__OS"\n\t"
         "   je 1f\n\t"
@@ -83,7 +83,7 @@ unsigned int __find_next_zero_bit(
     if ( bit != 0 )
     {
         /* Look for zero in the first word. */
-        __asm__ ( "bsf %1,%%"__OP"ax" : "=a" (set) : "r" (~(*p >> bit)) );
+        asm ( "bsf %1,%%"__OP"ax" : "=a" (set) : "r" (~(*p >> bit)) );
         if ( set < (BITS_PER_LONG - bit) )
             return (offset + set);
         offset += BITS_PER_LONG - bit;
diff -r a657ebf8e418 -r 5ccf8bbf8628 xen/arch/x86/cpu/common.c
--- a/xen/arch/x86/cpu/common.c Tue Sep 11 11:21:44 2007 +0100
+++ b/xen/arch/x86/cpu/common.c Tue Sep 11 12:14:38 2007 +0100
@@ -557,10 +557,10 @@ void __devinit cpu_init(void)
 
        *(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;
        *(unsigned long  *)(&gdt_load[2]) = GDT_VIRT_START(current);
-       __asm__ __volatile__ ( "lgdt %0" : "=m" (gdt_load) );
+       asm volatile ( "lgdt %0" : "=m" (gdt_load) );
 
        /* No nested task. */
-       __asm__("pushf ; andw $0xbfff,(%"__OP"sp) ; popf");
+       asm volatile ("pushf ; andw $0xbfff,(%"__OP"sp) ; popf" );
 
        /* Ensure FPU gets initialised for each domain. */
        stts();
@@ -579,10 +579,10 @@ void __devinit cpu_init(void)
 #endif
        set_tss_desc(cpu,t);
        load_TR(cpu);
-       __asm__ __volatile__ ( "lldt %%ax" : : "a" (0) );
+       asm volatile ( "lldt %%ax" : : "a" (0) );
 
        /* Clear all 6 debug registers: */
-#define CD(register) __asm__("mov %0,%%db" #register ::"r"(0UL) );
+#define CD(register) asm volatile ( "mov %0,%%db" #register : : "r"(0UL) );
        CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
 #undef CD
 
diff -r a657ebf8e418 -r 5ccf8bbf8628 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Tue Sep 11 11:21:44 2007 +0100
+++ b/xen/arch/x86/domain.c     Tue Sep 11 12:14:38 2007 +0100
@@ -930,7 +930,7 @@ arch_do_vcpu_op(
 
 #define loadsegment(seg,value) ({               \
     int __r = 1;                                \
-    __asm__ __volatile__ (                      \
+    asm volatile (                              \
         "1: movl %k1,%%" #seg "\n2:\n"          \
         ".section .fixup,\"ax\"\n"              \
         "3: xorl %k0,%k0\n"                     \
@@ -1017,7 +1017,7 @@ static void load_segments(struct vcpu *n
 
         /* If in kernel mode then switch the GS bases around. */
         if ( (n->arch.flags & TF_kernel_mode) )
-            __asm__ __volatile__ ( "swapgs" );
+            asm volatile ( "swapgs" );
     }
 
     if ( unlikely(!all_segs_okay) )
@@ -1190,7 +1190,7 @@ static void paravirt_ctxt_switch_to(stru
 }
 
 #define loaddebug(_v,_reg) \
-    __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
+    asm volatile ( "mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]) )
 
 static void __context_switch(void)
 {
@@ -1242,7 +1242,7 @@ static void __context_switch(void)
         char gdt_load[10];
         *(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;
         *(unsigned long  *)(&gdt_load[2]) = GDT_VIRT_START(n);
-        __asm__ __volatile__ ( "lgdt %0" : "=m" (gdt_load) );
+        asm volatile ( "lgdt %0" : "=m" (gdt_load) );
     }
 
     if ( p->domain != n->domain )
diff -r a657ebf8e418 -r 5ccf8bbf8628 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c       Tue Sep 11 11:21:44 2007 +0100
+++ b/xen/arch/x86/time.c       Tue Sep 11 12:14:38 2007 +0100
@@ -89,7 +89,7 @@ static inline u32 div_frac(u32 dividend,
 {
     u32 quotient, remainder;
     ASSERT(dividend < divisor);
-    __asm__ ( 
+    asm ( 
         "divl %4"
         : "=a" (quotient), "=d" (remainder)
         : "0" (0), "1" (dividend), "r" (divisor) );
@@ -103,7 +103,7 @@ static inline u32 mul_frac(u32 multiplic
 static inline u32 mul_frac(u32 multiplicand, u32 multiplier)
 {
     u32 product_int, product_frac;
-    __asm__ (
+    asm (
         "mul %3"
         : "=a" (product_frac), "=d" (product_int)
         : "0" (multiplicand), "r" (multiplier) );
@@ -127,7 +127,7 @@ static inline u64 scale_delta(u64 delta,
         delta <<= scale->shift;
 
 #ifdef CONFIG_X86_32
-    __asm__ (
+    asm (
         "mul  %5       ; "
         "mov  %4,%%eax ; "
         "mov  %%edx,%4 ; "
@@ -138,7 +138,7 @@ static inline u64 scale_delta(u64 delta,
         : "=A" (product), "=r" (tmp1), "=r" (tmp2)
         : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (scale->mul_frac) );
 #else
-    __asm__ (
+    asm (
         "mul %%rdx ; shrd $32,%%rdx,%%rax"
         : "=a" (product) : "0" (delta), "d" ((u64)scale->mul_frac) );
 #endif
diff -r a657ebf8e418 -r 5ccf8bbf8628 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Tue Sep 11 11:21:44 2007 +0100
+++ b/xen/arch/x86/traps.c      Tue Sep 11 12:14:38 2007 +0100
@@ -597,7 +597,7 @@ static int emulate_forced_invalid_op(str
         return 0;
     eip += sizeof(instr);
 
-    __asm__ ( 
+    asm ( 
         "cpuid"
         : "=a" (a), "=b" (b), "=c" (c), "=d" (d)
         : "0" (a), "1" (b), "2" (c), "3" (d) );
@@ -1090,20 +1090,25 @@ static int read_descriptor(unsigned int 
     *ar = desc.b & 0x00f0ff00;
     if ( !(desc.b & _SEGMENT_L) )
     {
-        *base = (desc.a >> 16) + ((desc.b & 0xff) << 16) + (desc.b & 
0xff000000);
+        *base = ((desc.a >> 16) + ((desc.b & 0xff) << 16) +
+                 (desc.b & 0xff000000));
         *limit = (desc.a & 0xffff) | (desc.b & 0x000f0000);
         if ( desc.b & _SEGMENT_G )
             *limit = ((*limit + 1) << 12) - 1;
 #ifndef NDEBUG
-        if ( !vm86_mode(regs) && sel > 3 )
+        if ( !vm86_mode(regs) && (sel > 3) )
         {
             unsigned int a, l;
             unsigned char valid;
 
-            __asm__("larl %2, %0\n\tsetz %1" : "=r" (a), "=rm" (valid) : "rm" 
(sel));
-            BUG_ON(valid && (a & 0x00f0ff00) != *ar);
-            __asm__("lsll %2, %0\n\tsetz %1" : "=r" (l), "=rm" (valid) : "rm" 
(sel));
-            BUG_ON(valid && l != *limit);
+            asm volatile (
+                "larl %2,%0 ; setz %1"
+                : "=r" (a), "=rm" (valid) : "rm" (sel));
+            BUG_ON(valid && ((a & 0x00f0ff00) != *ar));
+            asm volatile (
+                "lsll %2,%0 ; setz %1"
+                : "=r" (l), "=rm" (valid) : "rm" (sel));
+            BUG_ON(valid && (l != *limit));
         }
 #endif
     }
@@ -2011,13 +2016,13 @@ asmlinkage int do_debug(struct cpu_user_
     unsigned long condition;
     struct vcpu *v = current;
 
-    __asm__ __volatile__("mov %%db6,%0" : "=r" (condition));
+    asm volatile ( "mov %%db6,%0" : "=r" (condition) );
 
     /* Mask out spurious debug traps due to lazy DR7 setting */
     if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
          (v->arch.guest_context.debugreg[7] == 0) )
     {
-        __asm__("mov %0,%%db7" : : "r" (0UL));
+        asm volatile ( "mov %0,%%db7" : : "r" (0UL) );
         goto out;
     }
 
@@ -2186,25 +2191,25 @@ long set_debugreg(struct vcpu *p, int re
         if ( !access_ok(value, sizeof(long)) )
             return -EPERM;
         if ( p == current ) 
-            __asm__ ( "mov %0, %%db0" : : "r" (value) );
+            asm volatile ( "mov %0, %%db0" : : "r" (value) );
         break;
     case 1: 
         if ( !access_ok(value, sizeof(long)) )
             return -EPERM;
         if ( p == current ) 
-            __asm__ ( "mov %0, %%db1" : : "r" (value) );
+            asm volatile ( "mov %0, %%db1" : : "r" (value) );
         break;
     case 2: 
         if ( !access_ok(value, sizeof(long)) )
             return -EPERM;
         if ( p == current ) 
-            __asm__ ( "mov %0, %%db2" : : "r" (value) );
+            asm volatile ( "mov %0, %%db2" : : "r" (value) );
         break;
     case 3:
         if ( !access_ok(value, sizeof(long)) )
             return -EPERM;
         if ( p == current ) 
-            __asm__ ( "mov %0, %%db3" : : "r" (value) );
+            asm volatile ( "mov %0, %%db3" : : "r" (value) );
         break;
     case 6:
         /*
@@ -2214,7 +2219,7 @@ long set_debugreg(struct vcpu *p, int re
         value &= 0xffffefff; /* reserved bits => 0 */
         value |= 0xffff0ff0; /* reserved bits => 1 */
         if ( p == current ) 
-            __asm__ ( "mov %0, %%db6" : : "r" (value) );
+            asm volatile ( "mov %0, %%db6" : : "r" (value) );
         break;
     case 7:
         /*
@@ -2235,7 +2240,7 @@ long set_debugreg(struct vcpu *p, int re
                 if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM;
         }
         if ( p == current ) 
-            __asm__ ( "mov %0, %%db7" : : "r" (value) );
+            asm volatile ( "mov %0, %%db7" : : "r" (value) );
         break;
     default:
         return -EINVAL;
diff -r a657ebf8e418 -r 5ccf8bbf8628 xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c       Tue Sep 11 11:21:44 2007 +0100
+++ b/xen/arch/x86/x86_64/traps.c       Tue Sep 11 12:14:38 2007 +0100
@@ -149,7 +149,7 @@ asmlinkage void do_double_fault(struct c
 {
     unsigned int cpu, tr;
 
-    asm ( "str %0" : "=r" (tr) );
+    asm volatile ( "str %0" : "=r" (tr) );
     cpu = ((tr >> 3) - __FIRST_TSS_ENTRY) >> 2;
 
     watchdog_disable();
@@ -185,11 +185,11 @@ void toggle_guest_mode(struct vcpu *v)
     if ( is_pv_32bit_vcpu(v) )
         return;
     v->arch.flags ^= TF_kernel_mode;
-    __asm__ __volatile__ ( "swapgs" );
+    asm volatile ( "swapgs" );
     update_cr3(v);
 #ifdef USER_MAPPINGS_ARE_GLOBAL
     /* Don't flush user global mappings from the TLB. Don't tick TLB clock. */
-    __asm__ __volatile__ ( "mov %0, %%cr3" : : "r" (v->arch.cr3) : "memory" );
+    asm volatile ( "mov %0, %%cr3" : : "r" (v->arch.cr3) : "memory" );
 #else
     write_ptbase(v);
 #endif
diff -r a657ebf8e418 -r 5ccf8bbf8628 xen/arch/x86/x86_emulate.c
--- a/xen/arch/x86/x86_emulate.c        Tue Sep 11 11:21:44 2007 +0100
+++ b/xen/arch/x86/x86_emulate.c        Tue Sep 11 12:14:38 2007 +0100
@@ -329,7 +329,7 @@ do{ unsigned long _tmp;                 
     switch ( (_dst).bytes )                                                \
     {                                                                      \
     case 2:                                                                \
-        __asm__ __volatile__ (                                             \
+        asm volatile (                                                     \
             _PRE_EFLAGS("0","4","2")                                       \
             _op"w %"_wx"3,%1; "                                            \
             _POST_EFLAGS("0","4","2")                                      \
@@ -338,7 +338,7 @@ do{ unsigned long _tmp;                 
               "m" (_eflags), "m" ((_dst).val) );                           \
         break;                                                             \
     case 4:                                                                \
-        __asm__ __volatile__ (                                             \
+        asm volatile (                                                     \
             _PRE_EFLAGS("0","4","2")                                       \
             _op"l %"_lx"3,%1; "                                            \
             _POST_EFLAGS("0","4","2")                                      \
@@ -356,7 +356,7 @@ do{ unsigned long _tmp;                 
     switch ( (_dst).bytes )                                                \
     {                                                                      \
     case 1:                                                                \
-        __asm__ __volatile__ (                                             \
+        asm volatile (                                                     \
             _PRE_EFLAGS("0","4","2")                                       \
             _op"b %"_bx"3,%1; "                                            \
             _POST_EFLAGS("0","4","2")                                      \
@@ -388,7 +388,7 @@ do{ unsigned long _tmp;                 
     switch ( (_dst).bytes )                                                \
     {                                                                      \
     case 1:                                                                \
-        __asm__ __volatile__ (                                             \
+        asm volatile (                                                     \
             _PRE_EFLAGS("0","3","2")                                       \
             _op"b %1; "                                                    \
             _POST_EFLAGS("0","3","2")                                      \
@@ -396,7 +396,7 @@ do{ unsigned long _tmp;                 
             : "i" (EFLAGS_MASK), "m" (_eflags), "m" ((_dst).val) );        \
         break;                                                             \
     case 2:                                                                \
-        __asm__ __volatile__ (                                             \
+        asm volatile (                                                     \
             _PRE_EFLAGS("0","3","2")                                       \
             _op"w %1; "                                                    \
             _POST_EFLAGS("0","3","2")                                      \
@@ -404,7 +404,7 @@ do{ unsigned long _tmp;                 
             : "i" (EFLAGS_MASK), "m" (_eflags), "m" ((_dst).val) );        \
         break;                                                             \
     case 4:                                                                \
-        __asm__ __volatile__ (                                             \
+        asm volatile (                                                     \
             _PRE_EFLAGS("0","3","2")                                       \
             _op"l %1; "                                                    \
             _POST_EFLAGS("0","3","2")                                      \
@@ -420,7 +420,7 @@ do{ unsigned long _tmp;                 
 /* Emulate an instruction with quadword operands (x86/64 only). */
 #if defined(__x86_64__)
 #define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy)         \
-do{ __asm__ __volatile__ (                                              \
+do{ asm volatile (                                                      \
         _PRE_EFLAGS("0","4","2")                                        \
         _op"q %"_qx"3,%1; "                                             \
         _POST_EFLAGS("0","4","2")                                       \
@@ -429,7 +429,7 @@ do{ __asm__ __volatile__ (              
           "m" (_eflags), "m" ((_dst).val) );                            \
 } while (0)
 #define __emulate_1op_8byte(_op, _dst, _eflags)                         \
-do{ __asm__ __volatile__ (                                              \
+do{ asm volatile (                                                      \
         _PRE_EFLAGS("0","3","2")                                        \
         _op"q %1; "                                                     \
         _POST_EFLAGS("0","3","2")                                       \
@@ -480,7 +480,7 @@ do {                                    
 /* Given byte has even parity (even number of 1s)? */
 static int even_parity(uint8_t v)
 {
-    __asm__ ( "test %%al,%%al; setp %%al"
+    asm ( "test %%al,%%al; setp %%al"
               : "=a" (v) : "0" (v) );
     return v;
 }
@@ -2402,11 +2402,11 @@ x86_emulate(
             break;
         case 4:
 #ifdef __x86_64__
-            __asm__ ( "bswap %k0" : "=r" (dst.val) : "0" (*dst.reg) );
+            asm ( "bswap %k0" : "=r" (dst.val) : "0" (*dst.reg) );
             break;
         case 8:
 #endif
-            __asm__ ( "bswap %0" : "=r" (dst.val) : "0" (*dst.reg) );
+            asm ( "bswap %0" : "=r" (dst.val) : "0" (*dst.reg) );
             break;
         }
         break;
diff -r a657ebf8e418 -r 5ccf8bbf8628 xen/include/asm-x86/bitops.h
--- a/xen/include/asm-x86/bitops.h      Tue Sep 11 11:21:44 2007 +0100
+++ b/xen/include/asm-x86/bitops.h      Tue Sep 11 12:14:38 2007 +0100
@@ -164,7 +164,7 @@ static __inline__ int __test_and_set_bit
 {
        int oldbit;
 
-       __asm__(
+       __asm__ __volatile__(
                "btsl %2,%1\n\tsbbl %0,%0"
                :"=r" (oldbit),"=m" (ADDR)
                :"dIr" (nr), "m" (ADDR) : "memory");
@@ -203,7 +203,7 @@ static __inline__ int __test_and_clear_b
 {
        int oldbit;
 
-       __asm__(
+       __asm__ __volatile__(
                "btrl %2,%1\n\tsbbl %0,%0"
                :"=r" (oldbit),"=m" (ADDR)
                :"dIr" (nr), "m" (ADDR) : "memory");
diff -r a657ebf8e418 -r 5ccf8bbf8628 xen/include/asm-x86/byteorder.h
--- a/xen/include/asm-x86/byteorder.h   Tue Sep 11 11:21:44 2007 +0100
+++ b/xen/include/asm-x86/byteorder.h   Tue Sep 11 12:14:38 2007 +0100
@@ -20,7 +20,7 @@ static inline __attribute_const__ __u64 
     asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" 
         : "=r" (v.s.a), "=r" (v.s.b) 
         : "0" (v.s.a), "1" (v.s.b)); 
-    return v.u;        
+    return v.u;
 } 
 
 /* Do not define swab16.  Gcc is smart enough to recognize "C" version and
diff -r a657ebf8e418 -r 5ccf8bbf8628 xen/include/asm-x86/div64.h
--- a/xen/include/asm-x86/div64.h       Tue Sep 11 11:21:44 2007 +0100
+++ b/xen/include/asm-x86/div64.h       Tue Sep 11 12:14:38 2007 +0100
@@ -5,13 +5,13 @@
 
 #if BITS_PER_LONG == 64
 
-# define do_div(n,base) ({                                     \
-       uint32_t __base = (base);                               \
-       uint32_t __rem;                                         \
-       __rem = ((uint64_t)(n)) % __base;                       \
-       (n) = ((uint64_t)(n)) / __base;                         \
-       __rem;                                                  \
- })
+#define do_div(n,base) ({                       \
+    uint32_t __base = (base);                   \
+    uint32_t __rem;                             \
+    __rem = ((uint64_t)(n)) % __base;           \
+    (n) = ((uint64_t)(n)) / __base;             \
+    __rem;                                      \
+})
 
 #else
 
@@ -27,18 +27,21 @@
  * This ends up being the most efficient "calling
  * convention" on x86.
  */
-#define do_div(n,base) ({ \
-       unsigned long __upper, __low, __high, __mod, __base; \
-       __base = (base); \
-       asm("":"=a" (__low), "=d" (__high):"A" (n)); \
-       __upper = __high; \
-       if (__high) { \
-               __upper = __high % (__base); \
-               __high = __high / (__base); \
-       } \
-       asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), 
"1" (__upper)); \
-       asm("":"=A" (n):"a" (__low),"d" (__high)); \
-       __mod; \
+#define do_div(n,base) ({                                       \
+    unsigned long __upper, __low, __high, __mod, __base;        \
+    __base = (base);                                            \
+    asm ( "" : "=a" (__low), "=d" (__high) : "A" (n) );         \
+    __upper = __high;                                           \
+    if ( __high )                                               \
+    {                                                           \
+        __upper = __high % (__base);                            \
+        __high = __high / (__base);                             \
+    }                                                           \
+    asm ( "divl %2"                                             \
+          : "=a" (__low), "=d" (__mod)                          \
+          : "rm" (__base), "0" (__low), "1" (__upper) );        \
+    asm ( "" : "=A" (n) : "a" (__low), "d" (__high) );          \
+    __mod;                                                      \
 })
 
 #endif
diff -r a657ebf8e418 -r 5ccf8bbf8628 xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h   Tue Sep 11 11:21:44 2007 +0100
+++ b/xen/include/asm-x86/processor.h   Tue Sep 11 12:14:38 2007 +0100
@@ -146,10 +146,17 @@ struct vcpu;
  * instruction pointer ("program counter").
  */
 #ifdef __x86_64__
-#define current_text_addr() ({ void *pc; asm volatile("leaq 
1f(%%rip),%0\n1:":"=r"(pc)); pc; })
-#else
-#define current_text_addr() \
-  ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
+#define current_text_addr() ({                      \
+    void *pc;                                       \
+    asm ( "leaq 1f(%%rip),%0\n1:" : "=r" (pc) );    \
+    pc;                                             \
+})
+#else
+#define current_text_addr() ({                  \
+    void *pc;                                   \
+    asm ( "movl $1f,%0\n1:" : "=g" (pc) );      \
+    pc;                                         \
+})
 #endif
 
 struct cpuinfo_x86 {
@@ -211,12 +218,12 @@ static always_inline void detect_ht(stru
  * resulting in stale register contents being returned.
  */
 #define cpuid(_op,_eax,_ebx,_ecx,_edx)          \
-    __asm__("cpuid"                             \
-            : "=a" (*(int *)(_eax)),            \
-              "=b" (*(int *)(_ebx)),            \
-              "=c" (*(int *)(_ecx)),            \
-              "=d" (*(int *)(_edx))             \
-            : "0" (_op), "2" (0))
+    asm ( "cpuid"                               \
+          : "=a" (*(int *)(_eax)),              \
+            "=b" (*(int *)(_ebx)),              \
+            "=c" (*(int *)(_ecx)),              \
+            "=d" (*(int *)(_edx))               \
+          : "0" (_op), "2" (0) )
 
 /* Some CPUID calls want 'count' to be placed in ecx */
 static inline void cpuid_count(
@@ -227,9 +234,9 @@ static inline void cpuid_count(
     unsigned int *ecx,
     unsigned int *edx)
 {
-    __asm__("cpuid"
-            : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
-            : "0" (op), "c" (count));
+    asm ( "cpuid"
+          : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
+          : "0" (op), "c" (count) );
 }
 
 /*
@@ -239,88 +246,87 @@ static always_inline unsigned int cpuid_
 {
     unsigned int eax;
 
-    __asm__("cpuid"
-            : "=a" (eax)
-            : "0" (op)
-            : "bx", "cx", "dx");
+    asm ( "cpuid"
+          : "=a" (eax)
+          : "0" (op)
+          : "bx", "cx", "dx" );
     return eax;
 }
+
 static always_inline unsigned int cpuid_ebx(unsigned int op)
 {
     unsigned int eax, ebx;
 
-    __asm__("cpuid"
-            : "=a" (eax), "=b" (ebx)
-            : "0" (op)
-            : "cx", "dx" );
+    asm ( "cpuid"
+          : "=a" (eax), "=b" (ebx)
+          : "0" (op)
+          : "cx", "dx" );
     return ebx;
 }
+
 static always_inline unsigned int cpuid_ecx(unsigned int op)
 {
     unsigned int eax, ecx;
 
-    __asm__("cpuid"
-            : "=a" (eax), "=c" (ecx)
-            : "0" (op)
-            : "bx", "dx" );
+    asm ( "cpuid"
+          : "=a" (eax), "=c" (ecx)
+          : "0" (op)
+          : "bx", "dx" );
     return ecx;
 }
+
 static always_inline unsigned int cpuid_edx(unsigned int op)
 {
     unsigned int eax, edx;
 
-    __asm__("cpuid"
-            : "=a" (eax), "=d" (edx)
-            : "0" (op)
-            : "bx", "cx");
+    asm ( "cpuid"
+          : "=a" (eax), "=d" (edx)
+          : "0" (op)
+          : "bx", "cx" );
     return edx;
 }
 
-
-
 static inline unsigned long read_cr0(void)
 {
-    unsigned long __cr0;
-    __asm__("mov %%cr0,%0\n\t" :"=r" (__cr0));
-    return __cr0;
+    unsigned long cr0;
+    asm volatile ( "mov %%cr0,%0\n\t" : "=r" (cr0) );
+    return cr0;
 } 
 
 static inline void write_cr0(unsigned long val)
 {
-       __asm__("mov %0,%%cr0": :"r" ((unsigned long)val));
+    asm volatile ( "mov %0,%%cr0" : : "r" ((unsigned long)val) );
 }
 
 static inline unsigned long read_cr2(void)
 {
-    unsigned long __cr2;
-    __asm__("mov %%cr2,%0\n\t" :"=r" (__cr2));
-    return __cr2;
+    unsigned long cr2;
+    asm volatile ( "mov %%cr2,%0\n\t" : "=r" (cr2) );
+    return cr2;
 }
 
 static inline unsigned long read_cr4(void)
 {
-    unsigned long __cr4;
-    __asm__("mov %%cr4,%0\n\t" :"=r" (__cr4));
-    return __cr4;
+    unsigned long cr4;
+    asm volatile ( "mov %%cr4,%0\n\t" : "=r" (cr4) );
+    return cr4;
 } 
     
 static inline void write_cr4(unsigned long val)
 {
-       __asm__("mov %0,%%cr4": :"r" ((unsigned long)val));
-}
-
+    asm volatile ( "mov %0,%%cr4" : : "r" ((unsigned long)val) );
+}
 
 /* Clear and set 'TS' bit respectively */
 static inline void clts(void) 
 {
-    __asm__ __volatile__ ("clts");
+    asm volatile ( "clts" );
 }
 
 static inline void stts(void) 
 {
     write_cr0(X86_CR0_TS|read_cr0());
 }
-
 
 /*
  * Save the cr4 feature set we're using (ie
@@ -363,32 +369,36 @@ static always_inline void set_in_cr4 (un
 #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
 
 #define setCx86(reg, data) do { \
-       outb((reg), 0x22); \
-       outb((data), 0x23); \
+    outb((reg), 0x22); \
+    outb((data), 0x23); \
 } while (0)
 
 /* Stop speculative execution */
 static inline void sync_core(void)
 {
     int tmp;
-    asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
+    asm volatile (
+        "cpuid"
+        : "=a" (tmp)
+        : "0" (1)
+        : "ebx","ecx","edx","memory" );
 }
 
 static always_inline void __monitor(const void *eax, unsigned long ecx,
-               unsigned long edx)
-{
-       /* "monitor %eax,%ecx,%edx;" */
-       asm volatile(
-               ".byte 0x0f,0x01,0xc8;"
-               : :"a" (eax), "c" (ecx), "d"(edx));
+                                    unsigned long edx)
+{
+    /* "monitor %eax,%ecx,%edx;" */
+    asm volatile (
+        ".byte 0x0f,0x01,0xc8;"
+        : : "a" (eax), "c" (ecx), "d"(edx) );
 }
 
 static always_inline void __mwait(unsigned long eax, unsigned long ecx)
 {
-       /* "mwait %eax,%ecx;" */
-       asm volatile(
-               ".byte 0x0f,0x01,0xc9;"
-               : :"a" (eax), "c" (ecx));
+    /* "mwait %eax,%ecx;" */
+    asm volatile (
+        ".byte 0x0f,0x01,0xc9;"
+        : : "a" (eax), "c" (ecx) );
 }
 
 #define IOBMP_BYTES             8192
@@ -509,7 +519,7 @@ struct extended_sigtable {
 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
 static always_inline void rep_nop(void)
 {
-    __asm__ __volatile__ ( "rep;nop" : : : "memory" );
+    asm volatile ( "rep;nop" : : : "memory" );
 }
 
 #define cpu_relax() rep_nop()
@@ -520,7 +530,7 @@ static always_inline void rep_nop(void)
 #define ARCH_HAS_PREFETCH
 extern always_inline void prefetch(const void *x)
 {
-    __asm__ __volatile__ ("prefetchnta (%0)" : : "r"(x));
+    asm volatile ( "prefetchnta (%0)" : : "r"(x) );
 }
 
 #elif CONFIG_X86_USE_3DNOW
@@ -531,12 +541,12 @@ extern always_inline void prefetch(const
 
 extern always_inline void prefetch(const void *x)
 {
-    __asm__ __volatile__ ("prefetch (%0)" : : "r"(x));
+    asm volatile ( "prefetch (%0)" : : "r"(x) );
 }
 
 extern always_inline void prefetchw(const void *x)
 {
-    __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
+    asm volatile ( "prefetchw (%0)" : : "r"(x) );
 }
 #define spin_lock_prefetch(x)  prefetchw(x)
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86: Clean up asm keyword usage (asm volatile rather than __asm__, Xen patchbot-unstable <=