2 * arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
14 #include <linux/compiler.h>
15 #include <linux/prefetch.h>
16 #include <linux/types.h>
17 #include <linux/irqflags.h>
18 #include <asm/barrier.h>
19 #include <asm/cmpxchg.h>
21 #define ATOMIC_INIT(i) { (i) }
26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return.
30 #define atomic_read(v) (*(volatile int *)&(v)->counter)
31 #define atomic_set(v,i) (((v)->counter) = (i))
33 #if __LINUX_ARM_ARCH__ >= 6
36 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
37 * store exclusive to ensure that these are atomic. We may loop
38 * to ensure that the update happens.
40 static inline void atomic_add(int i, atomic_t *v)
45 prefetchw(&v->counter);
46 __asm__ __volatile__("@ atomic_add\n"
49 " strex %1, %0, [%3]\n"
52 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
53 : "r" (&v->counter), "Ir" (i)
57 static inline int atomic_add_return(int i, atomic_t *v)
63 prefetchw(&v->counter);
65 __asm__ __volatile__("@ atomic_add_return\n"
68 " strex %1, %0, [%3]\n"
71 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
72 : "r" (&v->counter), "Ir" (i)
80 static inline void atomic_sub(int i, atomic_t *v)
85 prefetchw(&v->counter);
86 __asm__ __volatile__("@ atomic_sub\n"
89 " strex %1, %0, [%3]\n"
92 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
93 : "r" (&v->counter), "Ir" (i)
97 static inline int atomic_sub_return(int i, atomic_t *v)
103 prefetchw(&v->counter);
105 __asm__ __volatile__("@ atomic_sub_return\n"
106 "1: ldrex %0, [%3]\n"
108 " strex %1, %0, [%3]\n"
111 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
112 : "r" (&v->counter), "Ir" (i)
120 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
126 prefetchw(&ptr->counter);
129 __asm__ __volatile__("@ atomic_cmpxchg\n"
133 "strexeq %0, %5, [%3]\n"
134 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
135 : "r" (&ptr->counter), "Ir" (old), "r" (new)
144 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
150 prefetchw(&v->counter);
152 __asm__ __volatile__ ("@ atomic_add_unless\n"
153 "1: ldrex %0, [%4]\n"
157 " strex %2, %1, [%4]\n"
161 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
162 : "r" (&v->counter), "r" (u), "r" (a)
171 #else /* ARM_ARCH_6 */
174 #error SMP not supported on pre-ARMv6 CPUs
177 static inline int atomic_add_return(int i, atomic_t *v)
182 raw_local_irq_save(flags);
184 v->counter = val += i;
185 raw_local_irq_restore(flags);
189 #define atomic_add(i, v) (void) atomic_add_return(i, v)
191 static inline int atomic_sub_return(int i, atomic_t *v)
196 raw_local_irq_save(flags);
198 v->counter = val -= i;
199 raw_local_irq_restore(flags);
203 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
205 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
210 raw_local_irq_save(flags);
212 if (likely(ret == old))
214 raw_local_irq_restore(flags);
219 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
224 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
229 #endif /* __LINUX_ARM_ARCH__ */
231 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
233 #define atomic_inc(v) atomic_add(1, v)
234 #define atomic_dec(v) atomic_sub(1, v)
236 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
237 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
238 #define atomic_inc_return(v) (atomic_add_return(1, v))
239 #define atomic_dec_return(v) (atomic_sub_return(1, v))
240 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
242 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
244 #define smp_mb__before_atomic_dec() smp_mb()
245 #define smp_mb__after_atomic_dec() smp_mb()
246 #define smp_mb__before_atomic_inc() smp_mb()
247 #define smp_mb__after_atomic_inc() smp_mb()
249 #ifndef CONFIG_GENERIC_ATOMIC64
254 #define ATOMIC64_INIT(i) { (i) }
256 #ifdef CONFIG_ARM_LPAE
257 static inline long long atomic64_read(const atomic64_t *v)
261 __asm__ __volatile__("@ atomic64_read\n"
262 " ldrd %0, %H0, [%1]"
264 : "r" (&v->counter), "Qo" (v->counter)
270 static inline void atomic64_set(atomic64_t *v, long long i)
272 __asm__ __volatile__("@ atomic64_set\n"
273 " strd %2, %H2, [%1]"
275 : "r" (&v->counter), "r" (i)
279 static inline long long atomic64_read(const atomic64_t *v)
283 __asm__ __volatile__("@ atomic64_read\n"
284 " ldrexd %0, %H0, [%1]"
286 : "r" (&v->counter), "Qo" (v->counter)
292 static inline void atomic64_set(atomic64_t *v, long long i)
296 prefetchw(&v->counter);
297 __asm__ __volatile__("@ atomic64_set\n"
298 "1: ldrexd %0, %H0, [%2]\n"
299 " strexd %0, %3, %H3, [%2]\n"
302 : "=&r" (tmp), "=Qo" (v->counter)
303 : "r" (&v->counter), "r" (i)
308 static inline void atomic64_add(long long i, atomic64_t *v)
313 prefetchw(&v->counter);
314 __asm__ __volatile__("@ atomic64_add\n"
315 "1: ldrexd %0, %H0, [%3]\n"
316 " adds %Q0, %Q0, %Q4\n"
317 " adc %R0, %R0, %R4\n"
318 " strexd %1, %0, %H0, [%3]\n"
321 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
322 : "r" (&v->counter), "r" (i)
326 static inline long long atomic64_add_return(long long i, atomic64_t *v)
332 prefetchw(&v->counter);
334 __asm__ __volatile__("@ atomic64_add_return\n"
335 "1: ldrexd %0, %H0, [%3]\n"
336 " adds %Q0, %Q0, %Q4\n"
337 " adc %R0, %R0, %R4\n"
338 " strexd %1, %0, %H0, [%3]\n"
341 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
342 : "r" (&v->counter), "r" (i)
350 static inline void atomic64_sub(long long i, atomic64_t *v)
355 prefetchw(&v->counter);
356 __asm__ __volatile__("@ atomic64_sub\n"
357 "1: ldrexd %0, %H0, [%3]\n"
358 " subs %Q0, %Q0, %Q4\n"
359 " sbc %R0, %R0, %R4\n"
360 " strexd %1, %0, %H0, [%3]\n"
363 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
364 : "r" (&v->counter), "r" (i)
368 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
374 prefetchw(&v->counter);
376 __asm__ __volatile__("@ atomic64_sub_return\n"
377 "1: ldrexd %0, %H0, [%3]\n"
378 " subs %Q0, %Q0, %Q4\n"
379 " sbc %R0, %R0, %R4\n"
380 " strexd %1, %0, %H0, [%3]\n"
383 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
384 : "r" (&v->counter), "r" (i)
392 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
399 prefetchw(&ptr->counter);
402 __asm__ __volatile__("@ atomic64_cmpxchg\n"
403 "ldrexd %1, %H1, [%3]\n"
407 "strexdeq %0, %5, %H5, [%3]"
408 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
409 : "r" (&ptr->counter), "r" (old), "r" (new)
418 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
424 prefetchw(&ptr->counter);
426 __asm__ __volatile__("@ atomic64_xchg\n"
427 "1: ldrexd %0, %H0, [%3]\n"
428 " strexd %1, %4, %H4, [%3]\n"
431 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
432 : "r" (&ptr->counter), "r" (new)
440 static inline long long atomic64_dec_if_positive(atomic64_t *v)
446 prefetchw(&v->counter);
448 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
449 "1: ldrexd %0, %H0, [%3]\n"
450 " subs %Q0, %Q0, #1\n"
451 " sbc %R0, %R0, #0\n"
454 " strexd %1, %0, %H0, [%3]\n"
458 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
467 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
474 prefetchw(&v->counter);
476 __asm__ __volatile__("@ atomic64_add_unless\n"
477 "1: ldrexd %0, %H0, [%4]\n"
482 " adds %Q0, %Q0, %Q6\n"
483 " adc %R0, %R0, %R6\n"
484 " strexd %2, %0, %H0, [%4]\n"
488 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
489 : "r" (&v->counter), "r" (u), "r" (a)
498 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
499 #define atomic64_inc(v) atomic64_add(1LL, (v))
500 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
501 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
502 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
503 #define atomic64_dec(v) atomic64_sub(1LL, (v))
504 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
505 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
506 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
508 #endif /* !CONFIG_GENERIC_ATOMIC64 */