locking,arch,powerpc: Fold atomic_ops
authorPeter Zijlstra <peterz@infradead.org>
Wed, 26 Mar 2014 17:11:31 +0000 (18:11 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 14 Aug 2014 10:48:11 +0000 (12:48 +0200)
Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

Requires asm_op because PPC asm is weird :-)

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/20140508135852.713980957@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/powerpc/include/asm/atomic.h

index 28992d01292633f2d473eeae47e202497fa691a6..512d2782b043ddc506c865028b1e7dc54a871b75 100644 (file)
@@ -26,76 +26,53 @@ static __inline__ void atomic_set(atomic_t *v, int i)
        __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
 }
 
-static __inline__ void atomic_add(int a, atomic_t *v)
-{
-       int t;
-
-       __asm__ __volatile__(
-"1:    lwarx   %0,0,%3         # atomic_add\n\
-       add     %0,%2,%0\n"
-       PPC405_ERR77(0,%3)
-"      stwcx.  %0,0,%3 \n\
-       bne-    1b"
-       : "=&r" (t), "+m" (v->counter)
-       : "r" (a), "r" (&v->counter)
-       : "cc");
+#define ATOMIC_OP(op, asm_op)                                          \
+static __inline__ void atomic_##op(int a, atomic_t *v)                 \
+{                                                                      \
+       int t;                                                          \
+                                                                       \
+       __asm__ __volatile__(                                           \
+"1:    lwarx   %0,0,%3         # atomic_" #op "\n"                     \
+       #asm_op " %0,%2,%0\n"                                           \
+       PPC405_ERR77(0,%3)                                              \
+"      stwcx.  %0,0,%3 \n"                                             \
+"      bne-    1b\n"                                                   \
+       : "=&r" (t), "+m" (v->counter)                                  \
+       : "r" (a), "r" (&v->counter)                                    \
+       : "cc");                                                        \
+}                                                                      \
+
+#define ATOMIC_OP_RETURN(op, asm_op)                                   \
+static __inline__ int atomic_##op##_return(int a, atomic_t *v)         \
+{                                                                      \
+       int t;                                                          \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       PPC_ATOMIC_ENTRY_BARRIER                                        \
+"1:    lwarx   %0,0,%2         # atomic_" #op "_return\n"              \
+       #asm_op " %0,%1,%0\n"                                           \
+       PPC405_ERR77(0,%2)                                              \
+"      stwcx.  %0,0,%2 \n"                                             \
+"      bne-    1b\n"                                                   \
+       PPC_ATOMIC_EXIT_BARRIER                                         \
+       : "=&r" (t)                                                     \
+       : "r" (a), "r" (&v->counter)                                    \
+       : "cc", "memory");                                              \
+                                                                       \
+       return t;                                                       \
 }
 
-static __inline__ int atomic_add_return(int a, atomic_t *v)
-{
-       int t;
+#define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
 
-       __asm__ __volatile__(
-       PPC_ATOMIC_ENTRY_BARRIER
-"1:    lwarx   %0,0,%2         # atomic_add_return\n\
-       add     %0,%1,%0\n"
-       PPC405_ERR77(0,%2)
-"      stwcx.  %0,0,%2 \n\
-       bne-    1b"
-       PPC_ATOMIC_EXIT_BARRIER
-       : "=&r" (t)
-       : "r" (a), "r" (&v->counter)
-       : "cc", "memory");
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, subf)
 
-       return t;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
 
 #define atomic_add_negative(a, v)      (atomic_add_return((a), (v)) < 0)
 
-static __inline__ void atomic_sub(int a, atomic_t *v)
-{
-       int t;
-
-       __asm__ __volatile__(
-"1:    lwarx   %0,0,%3         # atomic_sub\n\
-       subf    %0,%2,%0\n"
-       PPC405_ERR77(0,%3)
-"      stwcx.  %0,0,%3 \n\
-       bne-    1b"
-       : "=&r" (t), "+m" (v->counter)
-       : "r" (a), "r" (&v->counter)
-       : "cc");
-}
-
-static __inline__ int atomic_sub_return(int a, atomic_t *v)
-{
-       int t;
-
-       __asm__ __volatile__(
-       PPC_ATOMIC_ENTRY_BARRIER
-"1:    lwarx   %0,0,%2         # atomic_sub_return\n\
-       subf    %0,%1,%0\n"
-       PPC405_ERR77(0,%2)
-"      stwcx.  %0,0,%2 \n\
-       bne-    1b"
-       PPC_ATOMIC_EXIT_BARRIER
-       : "=&r" (t)
-       : "r" (a), "r" (&v->counter)
-       : "cc", "memory");
-
-       return t;
-}
-
 static __inline__ void atomic_inc(atomic_t *v)
 {
        int t;
@@ -289,71 +266,50 @@ static __inline__ void atomic64_set(atomic64_t *v, long i)
        __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
 }
 
-static __inline__ void atomic64_add(long a, atomic64_t *v)
-{
-       long t;
-
-       __asm__ __volatile__(
-"1:    ldarx   %0,0,%3         # atomic64_add\n\
-       add     %0,%2,%0\n\
-       stdcx.  %0,0,%3 \n\
-       bne-    1b"
-       : "=&r" (t), "+m" (v->counter)
-       : "r" (a), "r" (&v->counter)
-       : "cc");
+#define ATOMIC64_OP(op, asm_op)                                                \
+static __inline__ void atomic64_##op(long a, atomic64_t *v)            \
+{                                                                      \
+       long t;                                                         \
+                                                                       \
+       __asm__ __volatile__(                                           \
+"1:    ldarx   %0,0,%3         # atomic64_" #op "\n"                   \
+       #asm_op " %0,%2,%0\n"                                           \
+"      stdcx.  %0,0,%3 \n"                                             \
+"      bne-    1b\n"                                                   \
+       : "=&r" (t), "+m" (v->counter)                                  \
+       : "r" (a), "r" (&v->counter)                                    \
+       : "cc");                                                        \
 }
 
-static __inline__ long atomic64_add_return(long a, atomic64_t *v)
-{
-       long t;
-
-       __asm__ __volatile__(
-       PPC_ATOMIC_ENTRY_BARRIER
-"1:    ldarx   %0,0,%2         # atomic64_add_return\n\
-       add     %0,%1,%0\n\
-       stdcx.  %0,0,%2 \n\
-       bne-    1b"
-       PPC_ATOMIC_EXIT_BARRIER
-       : "=&r" (t)
-       : "r" (a), "r" (&v->counter)
-       : "cc", "memory");
-
-       return t;
+#define ATOMIC64_OP_RETURN(op, asm_op)                                 \
+static __inline__ long atomic64_##op##_return(long a, atomic64_t *v)   \
+{                                                                      \
+       long t;                                                         \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       PPC_ATOMIC_ENTRY_BARRIER                                        \
+"1:    ldarx   %0,0,%2         # atomic64_" #op "_return\n"            \
+       #asm_op " %0,%1,%0\n"                                           \
+"      stdcx.  %0,0,%2 \n"                                             \
+"      bne-    1b\n"                                                   \
+       PPC_ATOMIC_EXIT_BARRIER                                         \
+       : "=&r" (t)                                                     \
+       : "r" (a), "r" (&v->counter)                                    \
+       : "cc", "memory");                                              \
+                                                                       \
+       return t;                                                       \
 }
 
-#define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
-
-static __inline__ void atomic64_sub(long a, atomic64_t *v)
-{
-       long t;
-
-       __asm__ __volatile__(
-"1:    ldarx   %0,0,%3         # atomic64_sub\n\
-       subf    %0,%2,%0\n\
-       stdcx.  %0,0,%3 \n\
-       bne-    1b"
-       : "=&r" (t), "+m" (v->counter)
-       : "r" (a), "r" (&v->counter)
-       : "cc");
-}
+#define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
 
-static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
-{
-       long t;
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, subf)
 
-       __asm__ __volatile__(
-       PPC_ATOMIC_ENTRY_BARRIER
-"1:    ldarx   %0,0,%2         # atomic64_sub_return\n\
-       subf    %0,%1,%0\n\
-       stdcx.  %0,0,%2 \n\
-       bne-    1b"
-       PPC_ATOMIC_EXIT_BARRIER
-       : "=&r" (t)
-       : "r" (a), "r" (&v->counter)
-       : "cc", "memory");
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
 
-       return t;
-}
+#define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
 
 static __inline__ void atomic64_inc(atomic64_t *v)
 {