Revert "ACPI / EC: Add query flushing support"
[linux-drm-fsl-dcu.git] / kernel / locking / mcs_spinlock.c
1 #include <linux/percpu.h>
2 #include <linux/sched.h>
3 #include "mcs_spinlock.h"
4
5 #ifdef CONFIG_SMP
6
7 /*
8  * An MCS like lock especially tailored for optimistic spinning for sleeping
9  * lock implementations (mutex, rwsem, etc).
10  *
11  * Using a single mcs node per CPU is safe because sleeping locks should not be
12  * called from interrupt context and we have preemption disabled while
13  * spinning.
14  */
15 static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
16
17 /*
18  * We use the value 0 to represent "no CPU", thus the encoded value
19  * will be the CPU number incremented by 1.
20  */
21 static inline int encode_cpu(int cpu_nr)
22 {
23         return cpu_nr + 1;
24 }
25
26 static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
27 {
28         int cpu_nr = encoded_cpu_val - 1;
29
30         return per_cpu_ptr(&osq_node, cpu_nr);
31 }
32
33 /*
34  * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
35  * Can return NULL in case we were the last queued and we updated @lock instead.
36  */
37 static inline struct optimistic_spin_node *
38 osq_wait_next(struct optimistic_spin_queue *lock,
39               struct optimistic_spin_node *node,
40               struct optimistic_spin_node *prev)
41 {
42         struct optimistic_spin_node *next = NULL;
43         int curr = encode_cpu(smp_processor_id());
44         int old;
45
46         /*
47          * If there is a prev node in queue, then the 'old' value will be
48          * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
49          * we're currently last in queue, then the queue will then become empty.
50          */
51         old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
52
53         for (;;) {
54                 if (atomic_read(&lock->tail) == curr &&
55                     atomic_cmpxchg(&lock->tail, curr, old) == curr) {
56                         /*
57                          * We were the last queued, we moved @lock back. @prev
58                          * will now observe @lock and will complete its
59                          * unlock()/unqueue().
60                          */
61                         break;
62                 }
63
64                 /*
65                  * We must xchg() the @node->next value, because if we were to
66                  * leave it in, a concurrent unlock()/unqueue() from
67                  * @node->next might complete Step-A and think its @prev is
68                  * still valid.
69                  *
70                  * If the concurrent unlock()/unqueue() wins the race, we'll
71                  * wait for either @lock to point to us, through its Step-B, or
72                  * wait for a new @node->next from its Step-C.
73                  */
74                 if (node->next) {
75                         next = xchg(&node->next, NULL);
76                         if (next)
77                                 break;
78                 }
79
80                 cpu_relax_lowlatency();
81         }
82
83         return next;
84 }
85
86 bool osq_lock(struct optimistic_spin_queue *lock)
87 {
88         struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
89         struct optimistic_spin_node *prev, *next;
90         int curr = encode_cpu(smp_processor_id());
91         int old;
92
93         node->locked = 0;
94         node->next = NULL;
95         node->cpu = curr;
96
97         old = atomic_xchg(&lock->tail, curr);
98         if (old == OSQ_UNLOCKED_VAL)
99                 return true;
100
101         prev = decode_cpu(old);
102         node->prev = prev;
103         ACCESS_ONCE(prev->next) = node;
104
105         /*
106          * Normally @prev is untouchable after the above store; because at that
107          * moment unlock can proceed and wipe the node element from stack.
108          *
109          * However, since our nodes are static per-cpu storage, we're
110          * guaranteed their existence -- this allows us to apply
111          * cmpxchg in an attempt to undo our queueing.
112          */
113
114         while (!smp_load_acquire(&node->locked)) {
115                 /*
116                  * If we need to reschedule bail... so we can block.
117                  */
118                 if (need_resched())
119                         goto unqueue;
120
121                 cpu_relax_lowlatency();
122         }
123         return true;
124
125 unqueue:
126         /*
127          * Step - A  -- stabilize @prev
128          *
129          * Undo our @prev->next assignment; this will make @prev's
130          * unlock()/unqueue() wait for a next pointer since @lock points to us
131          * (or later).
132          */
133
134         for (;;) {
135                 if (prev->next == node &&
136                     cmpxchg(&prev->next, node, NULL) == node)
137                         break;
138
139                 /*
140                  * We can only fail the cmpxchg() racing against an unlock(),
141                  * in which case we should observe @node->locked becomming
142                  * true.
143                  */
144                 if (smp_load_acquire(&node->locked))
145                         return true;
146
147                 cpu_relax_lowlatency();
148
149                 /*
150                  * Or we race against a concurrent unqueue()'s step-B, in which
151                  * case its step-C will write us a new @node->prev pointer.
152                  */
153                 prev = ACCESS_ONCE(node->prev);
154         }
155
156         /*
157          * Step - B -- stabilize @next
158          *
159          * Similar to unlock(), wait for @node->next or move @lock from @node
160          * back to @prev.
161          */
162
163         next = osq_wait_next(lock, node, prev);
164         if (!next)
165                 return false;
166
167         /*
168          * Step - C -- unlink
169          *
170          * @prev is stable because its still waiting for a new @prev->next
171          * pointer, @next is stable because our @node->next pointer is NULL and
172          * it will wait in Step-A.
173          */
174
175         ACCESS_ONCE(next->prev) = prev;
176         ACCESS_ONCE(prev->next) = next;
177
178         return false;
179 }
180
181 void osq_unlock(struct optimistic_spin_queue *lock)
182 {
183         struct optimistic_spin_node *node, *next;
184         int curr = encode_cpu(smp_processor_id());
185
186         /*
187          * Fast path for the uncontended case.
188          */
189         if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
190                 return;
191
192         /*
193          * Second most likely case.
194          */
195         node = this_cpu_ptr(&osq_node);
196         next = xchg(&node->next, NULL);
197         if (next) {
198                 ACCESS_ONCE(next->locked) = 1;
199                 return;
200         }
201
202         next = osq_wait_next(lock, node, NULL);
203         if (next)
204                 ACCESS_ONCE(next->locked) = 1;
205 }
206
207 #endif
208