Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0-only
2 : : /*
3 : : * Generic waiting primitives.
4 : : *
5 : : * (C) 2004 Nadia Yvette Chambers, Oracle
6 : : */
7 : : #include "sched.h"
8 : :
9 : 5131493 : void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
10 : : {
11 : 5131493 : spin_lock_init(&wq_head->lock);
12 : : lockdep_set_class_and_name(&wq_head->lock, key, name);
13 : 5131493 : INIT_LIST_HEAD(&wq_head->head);
14 : 5131493 : }
15 : :
16 : : EXPORT_SYMBOL(__init_waitqueue_head);
17 : :
18 : 2679066 : void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
19 : : {
20 : : unsigned long flags;
21 : :
22 : 2679066 : wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
23 : 2679066 : spin_lock_irqsave(&wq_head->lock, flags);
24 : : __add_wait_queue(wq_head, wq_entry);
25 : : spin_unlock_irqrestore(&wq_head->lock, flags);
26 : 2679263 : }
27 : : EXPORT_SYMBOL(add_wait_queue);
28 : :
29 : 30 : void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
30 : : {
31 : : unsigned long flags;
32 : :
33 : 30 : wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
34 : 30 : spin_lock_irqsave(&wq_head->lock, flags);
35 : : __add_wait_queue_entry_tail(wq_head, wq_entry);
36 : : spin_unlock_irqrestore(&wq_head->lock, flags);
37 : 30 : }
38 : : EXPORT_SYMBOL(add_wait_queue_exclusive);
39 : :
40 : 2595215 : void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
41 : : {
42 : : unsigned long flags;
43 : :
44 : 2595215 : spin_lock_irqsave(&wq_head->lock, flags);
45 : : __remove_wait_queue(wq_head, wq_entry);
46 : : spin_unlock_irqrestore(&wq_head->lock, flags);
47 : 2595752 : }
48 : : EXPORT_SYMBOL(remove_wait_queue);
49 : :
50 : : /*
51 : : * Scan threshold to break wait queue walk.
52 : : * This allows a waker to take a break from holding the
53 : : * wait queue lock during the wait queue walk.
54 : : */
55 : : #define WAITQUEUE_WALK_BREAK_CNT 64
56 : :
57 : : /*
58 : : * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59 : : * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60 : : * number) then we wake all the non-exclusive tasks and one exclusive task.
61 : : *
62 : : * There are circumstances in which we can try to wake a task which has already
63 : : * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
64 : : * zero in this (rare) case, and we handle it by continuing to scan the queue.
65 : : */
66 : 17784429 : static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
67 : : int nr_exclusive, int wake_flags, void *key,
68 : : wait_queue_entry_t *bookmark)
69 : : {
70 : : wait_queue_entry_t *curr, *next;
71 : : int cnt = 0;
72 : :
73 : : lockdep_assert_held(&wq_head->lock);
74 : :
75 [ + + - + ]: 17784429 : if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
76 : 0 : curr = list_next_entry(bookmark, entry);
77 : :
78 : : list_del(&bookmark->entry);
79 : 0 : bookmark->flags = 0;
80 : : } else
81 : 17784429 : curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
82 : :
83 [ + + ]: 17784429 : if (&curr->entry == &wq_head->head)
84 : : return nr_exclusive;
85 : :
86 [ + + ]: 9084442 : list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
87 : 5112105 : unsigned flags = curr->flags;
88 : : int ret;
89 : :
90 [ - + ]: 5112105 : if (flags & WQ_FLAG_BOOKMARK)
91 : 0 : continue;
92 : :
93 : 5112105 : ret = curr->func(curr, mode, wake_flags, key);
94 [ + + ]: 5110337 : if (ret < 0)
95 : : break;
96 [ + + + + : 5111064 : if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
+ + ]
97 : : break;
98 : :
99 [ + + - + : 4033396 : if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
# # ]
100 : 0 : (&next->entry != &wq_head->head)) {
101 : 0 : bookmark->flags = WQ_FLAG_BOOKMARK;
102 : 0 : list_add_tail(&bookmark->entry, &next->entry);
103 : : break;
104 : : }
105 : : }
106 : :
107 : 5049278 : return nr_exclusive;
108 : : }
109 : :
110 : 16432263 : static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
111 : : int nr_exclusive, int wake_flags, void *key)
112 : : {
113 : : unsigned long flags;
114 : : wait_queue_entry_t bookmark;
115 : :
116 : 16432263 : bookmark.flags = 0;
117 : 16432263 : bookmark.private = NULL;
118 : 16432263 : bookmark.func = NULL;
119 : : INIT_LIST_HEAD(&bookmark.entry);
120 : :
121 : : do {
122 : 16432263 : spin_lock_irqsave(&wq_head->lock, flags);
123 : 16438056 : nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
124 : : wake_flags, key, &bookmark);
125 : : spin_unlock_irqrestore(&wq_head->lock, flags);
126 [ - + ]: 16439328 : } while (bookmark.flags & WQ_FLAG_BOOKMARK);
127 : 16439328 : }
128 : :
129 : : /**
130 : : * __wake_up - wake up threads blocked on a waitqueue.
131 : : * @wq_head: the waitqueue
132 : : * @mode: which threads
133 : : * @nr_exclusive: how many wake-one or wake-many threads to wake up
134 : : * @key: is directly passed to the wakeup function
135 : : *
136 : : * If this function wakes up a task, it executes a full memory barrier before
137 : : * accessing the task state.
138 : : */
139 : 13765804 : void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
140 : : int nr_exclusive, void *key)
141 : : {
142 : 13765804 : __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
143 : 13766879 : }
144 : : EXPORT_SYMBOL(__wake_up);
145 : :
146 : : /*
147 : : * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
148 : : */
149 : 567425 : void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
150 : : {
151 : 567425 : __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
152 : 567278 : }
153 : : EXPORT_SYMBOL_GPL(__wake_up_locked);
154 : :
155 : 158665 : void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
156 : : {
157 : 158665 : __wake_up_common(wq_head, mode, 1, 0, key, NULL);
158 : 158613 : }
159 : : EXPORT_SYMBOL_GPL(__wake_up_locked_key);
160 : :
161 : 620011 : void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
162 : : unsigned int mode, void *key, wait_queue_entry_t *bookmark)
163 : : {
164 : 620011 : __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
165 : 620012 : }
166 : : EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
167 : :
168 : : /**
169 : : * __wake_up_sync_key - wake up threads blocked on a waitqueue.
170 : : * @wq_head: the waitqueue
171 : : * @mode: which threads
172 : : * @nr_exclusive: how many wake-one or wake-many threads to wake up
173 : : * @key: opaque value to be passed to wakeup targets
174 : : *
175 : : * The sync wakeup differs that the waker knows that it will schedule
176 : : * away soon, so while the target thread will be woken up, it will not
177 : : * be migrated to another CPU - ie. the two threads are 'synchronized'
178 : : * with each other. This can prevent needless bouncing between CPUs.
179 : : *
180 : : * On UP it can prevent extra preemption.
181 : : *
182 : : * If this function wakes up a task, it executes a full memory barrier before
183 : : * accessing the task state.
184 : : */
185 : 2672222 : void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
186 : : int nr_exclusive, void *key)
187 : : {
188 : : int wake_flags = 1; /* XXX WF_SYNC */
189 : :
190 [ # # + + ]: 2672222 : if (unlikely(!wq_head))
191 : 2672582 : return;
192 : :
193 [ # # - + ]: 2672321 : if (unlikely(nr_exclusive != 1))
194 : : wake_flags = 0;
195 : :
196 : 2672321 : __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
197 : : }
198 : : EXPORT_SYMBOL_GPL(__wake_up_sync_key);
199 : :
200 : : /*
201 : : * __wake_up_sync - see __wake_up_sync_key()
202 : : */
203 : 0 : void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
204 : : {
205 : : __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
206 : 0 : }
207 : : EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
208 : :
209 : : /*
210 : : * Note: we use "set_current_state()" _after_ the wait-queue add,
211 : : * because we need a memory barrier there on SMP, so that any
212 : : * wake-function that tests for the wait-queue being active
213 : : * will be guaranteed to see waitqueue addition _or_ subsequent
214 : : * tests in this thread will see the wakeup having taken place.
215 : : *
216 : : * The spin_unlock() itself is semi-permeable and only protects
217 : : * one way (it only protects stuff inside the critical region and
218 : : * stops them from bleeding out - it would still allow subsequent
219 : : * loads to move into the critical region).
220 : : */
221 : : void
222 : 357050 : prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
223 : : {
224 : : unsigned long flags;
225 : :
226 : 357050 : wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
227 : 357050 : spin_lock_irqsave(&wq_head->lock, flags);
228 [ + + ]: 714100 : if (list_empty(&wq_entry->entry))
229 : : __add_wait_queue(wq_head, wq_entry);
230 : 1071150 : set_current_state(state);
231 : : spin_unlock_irqrestore(&wq_head->lock, flags);
232 : 357050 : }
233 : : EXPORT_SYMBOL(prepare_to_wait);
234 : :
235 : : void
236 : 2965 : prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
237 : : {
238 : : unsigned long flags;
239 : :
240 : 2965 : wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
241 : 2965 : spin_lock_irqsave(&wq_head->lock, flags);
242 [ + - ]: 5930 : if (list_empty(&wq_entry->entry))
243 : : __add_wait_queue_entry_tail(wq_head, wq_entry);
244 : 8895 : set_current_state(state);
245 : : spin_unlock_irqrestore(&wq_head->lock, flags);
246 : 2965 : }
247 : : EXPORT_SYMBOL(prepare_to_wait_exclusive);
248 : :
249 : 346910 : void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
250 : : {
251 : 346910 : wq_entry->flags = flags;
252 : 346910 : wq_entry->private = current;
253 : 346910 : wq_entry->func = autoremove_wake_function;
254 : 346910 : INIT_LIST_HEAD(&wq_entry->entry);
255 : 346910 : }
256 : : EXPORT_SYMBOL(init_wait_entry);
257 : :
258 : 690217 : long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
259 : : {
260 : : unsigned long flags;
261 : : long ret = 0;
262 : :
263 : 690217 : spin_lock_irqsave(&wq_head->lock, flags);
264 [ - + ]: 1380680 : if (signal_pending_state(state, current)) {
265 : : /*
266 : : * Exclusive waiter must not fail if it was selected by wakeup,
267 : : * it should "consume" the condition we were waiting for.
268 : : *
269 : : * The caller will recheck the condition and return success if
270 : : * we were already woken up, we can not miss the event because
271 : : * wakeup locks/unlocks the same wq_head->lock.
272 : : *
273 : : * But we need to ensure that set-condition + wakeup after that
274 : : * can't see us, it should wake up another exclusive waiter if
275 : : * we fail.
276 : : */
277 : 0 : list_del_init(&wq_entry->entry);
278 : : ret = -ERESTARTSYS;
279 : : } else {
280 [ + + ]: 1380680 : if (list_empty(&wq_entry->entry)) {
281 [ + + ]: 690347 : if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
282 : : __add_wait_queue_entry_tail(wq_head, wq_entry);
283 : : else
284 : : __add_wait_queue(wq_head, wq_entry);
285 : : }
286 : 2071020 : set_current_state(state);
287 : : }
288 : : spin_unlock_irqrestore(&wq_head->lock, flags);
289 : :
290 : 690349 : return ret;
291 : : }
292 : : EXPORT_SYMBOL(prepare_to_wait_event);
293 : :
294 : : /*
295 : : * Note! These two wait functions are entered with the
296 : : * wait-queue lock held (and interrupts off in the _irq
297 : : * case), so there is no race with testing the wakeup
298 : : * condition in the caller before they add the wait
299 : : * entry to the wake queue.
300 : : */
301 : 0 : int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
302 : : {
303 [ # # ]: 0 : if (likely(list_empty(&wait->entry)))
304 : : __add_wait_queue_entry_tail(wq, wait);
305 : :
306 : 0 : set_current_state(TASK_INTERRUPTIBLE);
307 [ # # ]: 0 : if (signal_pending(current))
308 : : return -ERESTARTSYS;
309 : :
310 : : spin_unlock(&wq->lock);
311 : 0 : schedule();
312 : : spin_lock(&wq->lock);
313 : :
314 : 0 : return 0;
315 : : }
316 : : EXPORT_SYMBOL(do_wait_intr);
317 : :
318 : 0 : int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
319 : : {
320 [ # # ]: 0 : if (likely(list_empty(&wait->entry)))
321 : : __add_wait_queue_entry_tail(wq, wait);
322 : :
323 : 0 : set_current_state(TASK_INTERRUPTIBLE);
324 [ # # ]: 0 : if (signal_pending(current))
325 : : return -ERESTARTSYS;
326 : :
327 : : spin_unlock_irq(&wq->lock);
328 : 0 : schedule();
329 : : spin_lock_irq(&wq->lock);
330 : :
331 : 0 : return 0;
332 : : }
333 : : EXPORT_SYMBOL(do_wait_intr_irq);
334 : :
335 : : /**
336 : : * finish_wait - clean up after waiting in a queue
337 : : * @wq_head: waitqueue waited on
338 : : * @wq_entry: wait descriptor
339 : : *
340 : : * Sets current thread back to running state and removes
341 : : * the wait descriptor from the given waitqueue if still
342 : : * queued.
343 : : */
344 : 1328449 : void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
345 : : {
346 : : unsigned long flags;
347 : :
348 : 1328449 : __set_current_state(TASK_RUNNING);
349 : : /*
350 : : * We can check for list emptiness outside the lock
351 : : * IFF:
352 : : * - we use the "careful" check that verifies both
353 : : * the next and prev pointers, so that there cannot
354 : : * be any half-pending updates in progress on other
355 : : * CPU's that we haven't seen yet (and that might
356 : : * still change the stack area.
357 : : * and
358 : : * - all other users take the lock (ie we can only
359 : : * have _one_ other CPU that looks at or modifies
360 : : * the list).
361 : : */
362 [ + + ]: 2656898 : if (!list_empty_careful(&wq_entry->entry)) {
363 : 349211 : spin_lock_irqsave(&wq_head->lock, flags);
364 : : list_del_init(&wq_entry->entry);
365 : : spin_unlock_irqrestore(&wq_head->lock, flags);
366 : : }
367 : 1328450 : }
368 : : EXPORT_SYMBOL(finish_wait);
369 : :
370 : 1942944 : int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
371 : : {
372 : 1942944 : int ret = default_wake_function(wq_entry, mode, sync, key);
373 : :
374 [ + + ]: 1942960 : if (ret)
375 : 1941167 : list_del_init(&wq_entry->entry);
376 : :
377 : 1942960 : return ret;
378 : : }
379 : : EXPORT_SYMBOL(autoremove_wake_function);
380 : :
381 : 207 : static inline bool is_kthread_should_stop(void)
382 : : {
383 [ - + # # ]: 207 : return (current->flags & PF_KTHREAD) && kthread_should_stop();
384 : : }
385 : :
386 : : /*
387 : : * DEFINE_WAIT_FUNC(wait, woken_wake_func);
388 : : *
389 : : * add_wait_queue(&wq_head, &wait);
390 : : * for (;;) {
391 : : * if (condition)
392 : : * break;
393 : : *
394 : : * // in wait_woken() // in woken_wake_function()
395 : : *
396 : : * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
397 : : * smp_mb(); // A try_to_wake_up():
398 : : * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
399 : : * schedule() if (p->state & mode)
400 : : * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
401 : : * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
402 : : * smp_mb(); // B condition = true;
403 : : * } smp_mb(); // C
404 : : * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
405 : : */
406 : 207 : long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
407 : : {
408 : : /*
409 : : * The below executes an smp_mb(), which matches with the full barrier
410 : : * executed by the try_to_wake_up() in woken_wake_function() such that
411 : : * either we see the store to wq_entry->flags in woken_wake_function()
412 : : * or woken_wake_function() sees our store to current->state.
413 : : */
414 : 621 : set_current_state(mode); /* A */
415 [ + - + - ]: 207 : if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
416 : 207 : timeout = schedule_timeout(timeout);
417 : 0 : __set_current_state(TASK_RUNNING);
418 : :
419 : : /*
420 : : * The below executes an smp_mb(), which matches with the smp_mb() (C)
421 : : * in woken_wake_function() such that either we see the wait condition
422 : : * being true or the store to wq_entry->flags in woken_wake_function()
423 : : * follows ours in the coherence order.
424 : : */
425 : 0 : smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
426 : :
427 : 0 : return timeout;
428 : : }
429 : : EXPORT_SYMBOL(wait_woken);
430 : :
431 : 2799 : int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
432 : : {
433 : : /* Pairs with the smp_store_mb() in wait_woken(). */
434 : 2799 : smp_mb(); /* C */
435 : 2799 : wq_entry->flags |= WQ_FLAG_WOKEN;
436 : :
437 : 2799 : return default_wake_function(wq_entry, mode, sync, key);
438 : : }
439 : : EXPORT_SYMBOL(woken_wake_function);
|