Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-drm-fsl-dcu.git] / include / asm-arm / spinlock.h
index 406ca97a8ab29f99d381565ab3c9b745c1f147a3..800ba5254dafef9bff57c98a19dcb599b0982cbc 100644 (file)
@@ -85,7 +85,6 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  * Write locks are easy - we just set bit 31.  When unlocking, we can
  * just write zero since the lock is exclusively held.
  */
-#define rwlock_is_locked(x)    (*((volatile unsigned int *)(x)) != 0)
 
 static inline void __raw_write_lock(raw_rwlock_t *rw)
 {
@@ -199,9 +198,27 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
        : "cc");
 }
 
-#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
+static inline int __raw_read_trylock(raw_rwlock_t *rw)
+{
+       unsigned long tmp, tmp2 = 1;
+
+       __asm__ __volatile__(
+"1:    ldrex   %0, [%2]\n"
+"      adds    %0, %0, #1\n"
+"      strexpl %1, %0, [%2]\n"
+       : "=&r" (tmp), "+r" (tmp2)
+       : "r" (&rw->lock)
+       : "cc");
+
+       smp_mb();
+       return tmp2 == 0;
+}
 
 /* read_can_lock - would read_trylock() succeed? */
 #define __raw_read_can_lock(x)         ((x)->lock < 0x80000000)
 
+#define _raw_spin_relax(lock)  cpu_relax()
+#define _raw_read_relax(lock)  cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
 #endif /* __ASM_SPINLOCK_H */