Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0
2 : : #include <linux/percpu.h>
3 : : #include <linux/sched.h>
4 : : #include <linux/osq_lock.h>
5 : :
6 : : /*
7 : : * An MCS like lock especially tailored for optimistic spinning for sleeping
8 : : * lock implementations (mutex, rwsem, etc).
9 : : *
10 : : * Using a single mcs node per CPU is safe because sleeping locks should not be
11 : : * called from interrupt context and we have preemption disabled while
12 : : * spinning.
13 : : */
14 : : static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
15 : :
16 : : /*
17 : : * We use the value 0 to represent "no CPU", thus the encoded value
18 : : * will be the CPU number incremented by 1.
19 : : */
20 : 74 : static inline int encode_cpu(int cpu_nr)
21 : : {
22 : 74 : return cpu_nr + 1;
23 : : }
24 : :
25 : 0 : static inline int node_cpu(struct optimistic_spin_node *node)
26 : : {
27 : 0 : return node->cpu - 1;
28 : : }
29 : :
30 : 0 : static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
31 : : {
32 : 0 : int cpu_nr = encoded_cpu_val - 1;
33 : :
34 : 0 : return per_cpu_ptr(&osq_node, cpu_nr);
35 : : }
36 : :
37 : : /*
38 : : * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
39 : : * Can return NULL in case we were the last queued and we updated @lock instead.
40 : : */
41 : : static inline struct optimistic_spin_node *
42 : 0 : osq_wait_next(struct optimistic_spin_queue *lock,
43 : : struct optimistic_spin_node *node,
44 : : struct optimistic_spin_node *prev)
45 : : {
46 : 0 : struct optimistic_spin_node *next = NULL;
47 [ # # ]: 0 : int curr = encode_cpu(smp_processor_id());
48 : 0 : int old;
49 : :
50 : : /*
51 : : * If there is a prev node in queue, then the 'old' value will be
52 : : * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
53 : : * we're currently last in queue, then the queue will then become empty.
54 : : */
55 [ # # ]: 0 : old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
56 : :
57 : 0 : for (;;) {
58 [ # # # # ]: 0 : if (atomic_read(&lock->tail) == curr &&
59 : : atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
60 : : /*
61 : : * We were the last queued, we moved @lock back. @prev
62 : : * will now observe @lock and will complete its
63 : : * unlock()/unqueue().
64 : : */
65 : : break;
66 : : }
67 : :
68 : : /*
69 : : * We must xchg() the @node->next value, because if we were to
70 : : * leave it in, a concurrent unlock()/unqueue() from
71 : : * @node->next might complete Step-A and think its @prev is
72 : : * still valid.
73 : : *
74 : : * If the concurrent unlock()/unqueue() wins the race, we'll
75 : : * wait for either @lock to point to us, through its Step-B, or
76 : : * wait for a new @node->next from its Step-C.
77 : : */
78 [ # # ]: 0 : if (node->next) {
79 : 0 : next = xchg(&node->next, NULL);
80 [ # # ]: 0 : if (next)
81 : : break;
82 : : }
83 : :
84 : 0 : cpu_relax();
85 : : }
86 : :
87 : 0 : return next;
88 : : }
89 : :
90 : 37 : bool osq_lock(struct optimistic_spin_queue *lock)
91 : : {
92 : 37 : struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
93 : 37 : struct optimistic_spin_node *prev, *next;
94 : 37 : int curr = encode_cpu(smp_processor_id());
95 : 37 : int old;
96 : :
97 : 37 : node->locked = 0;
98 : 37 : node->next = NULL;
99 : 37 : node->cpu = curr;
100 : :
101 : : /*
102 : : * We need both ACQUIRE (pairs with corresponding RELEASE in
103 : : * unlock() uncontended, or fastpath) and RELEASE (to publish
104 : : * the node fields we just initialised) semantics when updating
105 : : * the lock tail.
106 : : */
107 : 37 : old = atomic_xchg(&lock->tail, curr);
108 [ - + ]: 37 : if (old == OSQ_UNLOCKED_VAL)
109 : : return true;
110 : :
111 : 0 : prev = decode_cpu(old);
112 : 0 : node->prev = prev;
113 : :
114 : : /*
115 : : * osq_lock() unqueue
116 : : *
117 : : * node->prev = prev osq_wait_next()
118 : : * WMB MB
119 : : * prev->next = node next->prev = prev // unqueue-C
120 : : *
121 : : * Here 'node->prev' and 'next->prev' are the same variable and we need
122 : : * to ensure these stores happen in-order to avoid corrupting the list.
123 : : */
124 : 0 : smp_wmb();
125 : :
126 : 0 : WRITE_ONCE(prev->next, node);
127 : :
128 : : /*
129 : : * Normally @prev is untouchable after the above store; because at that
130 : : * moment unlock can proceed and wipe the node element from stack.
131 : : *
132 : : * However, since our nodes are static per-cpu storage, we're
133 : : * guaranteed their existence -- this allows us to apply
134 : : * cmpxchg in an attempt to undo our queueing.
135 : : */
136 : :
137 : : /*
138 : : * Wait to acquire the lock or cancelation. Note that need_resched()
139 : : * will come with an IPI, which will wake smp_cond_load_relaxed() if it
140 : : * is implemented with a monitor-wait. vcpu_is_preempted() relies on
141 : : * polling, be careful.
142 : : */
143 [ # # # # : 0 : if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
# # ]
144 : : vcpu_is_preempted(node_cpu(node->prev))))
145 : : return true;
146 : :
147 : : /* unqueue */
148 : : /*
149 : : * Step - A -- stabilize @prev
150 : : *
151 : : * Undo our @prev->next assignment; this will make @prev's
152 : : * unlock()/unqueue() wait for a next pointer since @lock points to us
153 : : * (or later).
154 : : */
155 : :
156 : 0 : for (;;) {
157 [ # # ]: 0 : if (prev->next == node &&
158 [ # # ]: 0 : cmpxchg(&prev->next, node, NULL) == node)
159 : : break;
160 : :
161 : : /*
162 : : * We can only fail the cmpxchg() racing against an unlock(),
163 : : * in which case we should observe @node->locked becomming
164 : : * true.
165 : : */
166 [ # # ]: 0 : if (smp_load_acquire(&node->locked))
167 : : return true;
168 : :
169 : 0 : cpu_relax();
170 : :
171 : : /*
172 : : * Or we race against a concurrent unqueue()'s step-B, in which
173 : : * case its step-C will write us a new @node->prev pointer.
174 : : */
175 : 0 : prev = READ_ONCE(node->prev);
176 : : }
177 : :
178 : : /*
179 : : * Step - B -- stabilize @next
180 : : *
181 : : * Similar to unlock(), wait for @node->next or move @lock from @node
182 : : * back to @prev.
183 : : */
184 : :
185 : 0 : next = osq_wait_next(lock, node, prev);
186 [ # # ]: 0 : if (!next)
187 : : return false;
188 : :
189 : : /*
190 : : * Step - C -- unlink
191 : : *
192 : : * @prev is stable because its still waiting for a new @prev->next
193 : : * pointer, @next is stable because our @node->next pointer is NULL and
194 : : * it will wait in Step-A.
195 : : */
196 : :
197 : 0 : WRITE_ONCE(next->prev, prev);
198 : 0 : WRITE_ONCE(prev->next, next);
199 : :
200 : 0 : return false;
201 : : }
202 : :
203 : 37 : void osq_unlock(struct optimistic_spin_queue *lock)
204 : : {
205 : 37 : struct optimistic_spin_node *node, *next;
206 : 37 : int curr = encode_cpu(smp_processor_id());
207 : :
208 : : /*
209 : : * Fast path for the uncontended case.
210 : : */
211 [ - + ]: 74 : if (likely(atomic_cmpxchg_release(&lock->tail, curr,
212 : : OSQ_UNLOCKED_VAL) == curr))
213 : : return;
214 : :
215 : : /*
216 : : * Second most likely case.
217 : : */
218 : 0 : node = this_cpu_ptr(&osq_node);
219 : 0 : next = xchg(&node->next, NULL);
220 [ # # ]: 0 : if (next) {
221 : 0 : WRITE_ONCE(next->locked, 1);
222 : 0 : return;
223 : : }
224 : :
225 : 0 : next = osq_wait_next(lock, node, NULL);
226 [ # # ]: 0 : if (next)
227 : 0 : WRITE_ONCE(next->locked, 1);
228 : : }
|