[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCHv4 2/5] arm: provide add_sized()



add_sized(ptr, inc) adds inc to the value at ptr using only the correct
size of loads and stores for the type of *ptr.  The add is /not/ atomic.

This is needed for ticket locks to ensure the increment of the head ticket
does not affect the tail ticket.

Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 xen/include/asm-arm/atomic.h |   27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git a/xen/include/asm-arm/atomic.h b/xen/include/asm-arm/atomic.h
index 7d15fb0..d80ae5d 100644
--- a/xen/include/asm-arm/atomic.h
+++ b/xen/include/asm-arm/atomic.h
@@ -23,6 +23,17 @@ static inline void name(volatile type *addr, type val) \
                  : reg (val));                         \
 }
 
+#define build_add_sized(name, size, width, type, reg) \
+static inline void name(volatile type *addr, type val)                  \
+{                                                                       \
+    type t;                                                             \
+    asm volatile("ldr" size " %"width"1,%0\n"                           \
+                 "add %"width"1,%"width"1,%"width"2\n"                  \
+                 "str" size " %"width"1,%0"                             \
+                 : "=m" (*(volatile type *)addr), "=r" (t)              \
+                 : reg (val));                                          \
+}
+
 #if defined (CONFIG_ARM_32)
 #define BYTE ""
 #define WORD ""
@@ -46,6 +57,10 @@ build_atomic_read(read_u64_atomic, "x", uint64_t, "=r")
 build_atomic_write(write_u64_atomic, "x", uint64_t, "r")
 #endif
 
+build_add_sized(add_u8_sized, "b", BYTE, uint8_t, "ri")
+build_add_sized(add_u16_sized, "h", WORD, uint16_t, "ri")
+build_add_sized(add_u32_sized, "", WORD, uint32_t, "ri")
+
 void __bad_atomic_size(void);
 
 #define read_atomic(p) ({                                               \
@@ -70,6 +85,18 @@ void __bad_atomic_size(void);
     __x;                                                                \
 })
 
+#define add_sized(p, x) ({                                              \
+    typeof(*p) __x = (x);                                               \
+    switch ( sizeof(*p) )                                               \
+    {                                                                   \
+    case 1: add_u8_sized((uint8_t *)p, (uint8_t)__x); break;            \
+    case 2: add_u16_sized((uint16_t *)p, (uint16_t)__x); break;         \
+    case 4: add_u32_sized((uint32_t *)p, (uint32_t)__x); break;         \
+    default: __bad_atomic_size(); break;                                \
+    }                                                                   \
+    __x;                                                                \
+})
+    
 /*
  * NB. I've pushed the volatile qualifier into the operations. This allows
  * fast accessors such as _atomic_read() and _atomic_set() which don't give
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.