Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0-only
2 : : /*
3 : : * Generic waiting primitives.
4 : : *
5 : : * (C) 2004 Nadia Yvette Chambers, Oracle
6 : : */
7 : : #include "sched.h"
8 : :
9 : 205851 : void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
10 : : {
11 : 205851 : spin_lock_init(&wq_head->lock);
12 : 205851 : lockdep_set_class_and_name(&wq_head->lock, key, name);
13 : 205851 : INIT_LIST_HEAD(&wq_head->head);
14 : 205851 : }
15 : :
16 : : EXPORT_SYMBOL(__init_waitqueue_head);
17 : :
18 : 37274 : void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
19 : : {
20 : 37274 : unsigned long flags;
21 : :
22 : 37274 : wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
23 : 37274 : spin_lock_irqsave(&wq_head->lock, flags);
24 : 37274 : __add_wait_queue(wq_head, wq_entry);
25 : 37274 : spin_unlock_irqrestore(&wq_head->lock, flags);
26 : 37274 : }
27 : : EXPORT_SYMBOL(add_wait_queue);
28 : :
29 : 0 : void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
30 : : {
31 : 0 : unsigned long flags;
32 : :
33 : 0 : wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
34 : 0 : spin_lock_irqsave(&wq_head->lock, flags);
35 : 0 : __add_wait_queue_entry_tail(wq_head, wq_entry);
36 : 0 : spin_unlock_irqrestore(&wq_head->lock, flags);
37 : 0 : }
38 : : EXPORT_SYMBOL(add_wait_queue_exclusive);
39 : :
40 : 34758 : void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
41 : : {
42 : 34758 : unsigned long flags;
43 : :
44 : 34758 : spin_lock_irqsave(&wq_head->lock, flags);
45 : 34758 : __remove_wait_queue(wq_head, wq_entry);
46 : 34758 : spin_unlock_irqrestore(&wq_head->lock, flags);
47 : 34758 : }
48 : : EXPORT_SYMBOL(remove_wait_queue);
49 : :
50 : : /*
51 : : * Scan threshold to break wait queue walk.
52 : : * This allows a waker to take a break from holding the
53 : : * wait queue lock during the wait queue walk.
54 : : */
55 : : #define WAITQUEUE_WALK_BREAK_CNT 64
56 : :
57 : : /*
58 : : * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
59 : : * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
60 : : * number) then we wake all the non-exclusive tasks and one exclusive task.
61 : : *
62 : : * There are circumstances in which we can try to wake a task which has already
63 : : * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
64 : : * zero in this (rare) case, and we handle it by continuing to scan the queue.
65 : : */
66 : 1114081 : static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
67 : : int nr_exclusive, int wake_flags, void *key,
68 : : wait_queue_entry_t *bookmark)
69 : : {
70 : 1114081 : wait_queue_entry_t *curr, *next;
71 : 1114081 : int cnt = 0;
72 : :
73 : 1114081 : lockdep_assert_held(&wq_head->lock);
74 : :
75 [ + + - + ]: 1114081 : if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
76 : 0 : curr = list_next_entry(bookmark, entry);
77 : :
78 : 0 : list_del(&bookmark->entry);
79 : 0 : bookmark->flags = 0;
80 : : } else
81 : 1114081 : curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
82 : :
83 [ + + ]: 1114081 : if (&curr->entry == &wq_head->head)
84 : : return nr_exclusive;
85 : :
86 [ + + ]: 257343 : list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
87 : 148476 : unsigned flags = curr->flags;
88 : 148476 : int ret;
89 : :
90 [ - + ]: 148476 : if (flags & WQ_FLAG_BOOKMARK)
91 : 0 : continue;
92 : :
93 : 148476 : ret = curr->func(curr, mode, wake_flags, key);
94 [ + - ]: 148476 : if (ret < 0)
95 : : break;
96 [ + + + + : 148476 : if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
+ + ]
97 : : break;
98 : :
99 [ + + - + ]: 111392 : if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
100 [ # # ]: 0 : (&next->entry != &wq_head->head)) {
101 : 0 : bookmark->flags = WQ_FLAG_BOOKMARK;
102 : 0 : list_add_tail(&bookmark->entry, &next->entry);
103 : : break;
104 : : }
105 : : }
106 : :
107 : : return nr_exclusive;
108 : : }
109 : :
110 : 1066882 : static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
111 : : int nr_exclusive, int wake_flags, void *key)
112 : : {
113 : 1066882 : unsigned long flags;
114 : 1066882 : wait_queue_entry_t bookmark;
115 : :
116 : 1066882 : bookmark.flags = 0;
117 : 1066882 : bookmark.private = NULL;
118 : 1066882 : bookmark.func = NULL;
119 : 1066882 : INIT_LIST_HEAD(&bookmark.entry);
120 : :
121 : 1066882 : do {
122 : 1066882 : spin_lock_irqsave(&wq_head->lock, flags);
123 : 1066882 : nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
124 : : wake_flags, key, &bookmark);
125 : 1066882 : spin_unlock_irqrestore(&wq_head->lock, flags);
126 [ - + ]: 1066882 : } while (bookmark.flags & WQ_FLAG_BOOKMARK);
127 : 1066882 : }
128 : :
129 : : /**
130 : : * __wake_up - wake up threads blocked on a waitqueue.
131 : : * @wq_head: the waitqueue
132 : : * @mode: which threads
133 : : * @nr_exclusive: how many wake-one or wake-many threads to wake up
134 : : * @key: is directly passed to the wakeup function
135 : : *
136 : : * If this function wakes up a task, it executes a full memory barrier before
137 : : * accessing the task state.
138 : : */
139 : 1001780 : void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
140 : : int nr_exclusive, void *key)
141 : : {
142 : 1001780 : __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
143 : 1001780 : }
144 : : EXPORT_SYMBOL(__wake_up);
145 : :
146 : : /*
147 : : * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
148 : : */
149 : 27956 : void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
150 : : {
151 : 27956 : __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
152 : 27956 : }
153 : : EXPORT_SYMBOL_GPL(__wake_up_locked);
154 : :
155 : 219 : void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
156 : : {
157 : 219 : __wake_up_common(wq_head, mode, 1, 0, key, NULL);
158 : 219 : }
159 : : EXPORT_SYMBOL_GPL(__wake_up_locked_key);
160 : :
161 : 19024 : void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
162 : : unsigned int mode, void *key, wait_queue_entry_t *bookmark)
163 : : {
164 : 19024 : __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
165 : 19024 : }
166 : : EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
167 : :
168 : : /**
169 : : * __wake_up_sync_key - wake up threads blocked on a waitqueue.
170 : : * @wq_head: the waitqueue
171 : : * @mode: which threads
172 : : * @key: opaque value to be passed to wakeup targets
173 : : *
174 : : * The sync wakeup differs that the waker knows that it will schedule
175 : : * away soon, so while the target thread will be woken up, it will not
176 : : * be migrated to another CPU - ie. the two threads are 'synchronized'
177 : : * with each other. This can prevent needless bouncing between CPUs.
178 : : *
179 : : * On UP it can prevent extra preemption.
180 : : *
181 : : * If this function wakes up a task, it executes a full memory barrier before
182 : : * accessing the task state.
183 : : */
184 : 65102 : void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
185 : : void *key)
186 : : {
187 [ + - ]: 65102 : if (unlikely(!wq_head))
188 : : return;
189 : :
190 : 65102 : __wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
191 : : }
192 : : EXPORT_SYMBOL_GPL(__wake_up_sync_key);
193 : :
194 : : /**
195 : : * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
196 : : * @wq_head: the waitqueue
197 : : * @mode: which threads
198 : : * @key: opaque value to be passed to wakeup targets
199 : : *
200 : : * The sync wakeup differs in that the waker knows that it will schedule
201 : : * away soon, so while the target thread will be woken up, it will not
202 : : * be migrated to another CPU - ie. the two threads are 'synchronized'
203 : : * with each other. This can prevent needless bouncing between CPUs.
204 : : *
205 : : * On UP it can prevent extra preemption.
206 : : *
207 : : * If this function wakes up a task, it executes a full memory barrier before
208 : : * accessing the task state.
209 : : */
210 : 0 : void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
211 : : unsigned int mode, void *key)
212 : : {
213 : 0 : __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
214 : 0 : }
215 : : EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
216 : :
217 : : /*
218 : : * __wake_up_sync - see __wake_up_sync_key()
219 : : */
220 : 0 : void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
221 : : {
222 [ # # ]: 0 : __wake_up_sync_key(wq_head, mode, NULL);
223 : 0 : }
224 : : EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
225 : :
226 : : /*
227 : : * Note: we use "set_current_state()" _after_ the wait-queue add,
228 : : * because we need a memory barrier there on SMP, so that any
229 : : * wake-function that tests for the wait-queue being active
230 : : * will be guaranteed to see waitqueue addition _or_ subsequent
231 : : * tests in this thread will see the wakeup having taken place.
232 : : *
233 : : * The spin_unlock() itself is semi-permeable and only protects
234 : : * one way (it only protects stuff inside the critical region and
235 : : * stops them from bleeding out - it would still allow subsequent
236 : : * loads to move into the critical region).
237 : : */
238 : : void
239 : 3887 : prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
240 : : {
241 : 3887 : unsigned long flags;
242 : :
243 : 3887 : wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
244 : 3887 : spin_lock_irqsave(&wq_head->lock, flags);
245 [ + - ]: 3887 : if (list_empty(&wq_entry->entry))
246 : 3887 : __add_wait_queue(wq_head, wq_entry);
247 : 3887 : set_current_state(state);
248 : 3887 : spin_unlock_irqrestore(&wq_head->lock, flags);
249 : 3887 : }
250 : : EXPORT_SYMBOL(prepare_to_wait);
251 : :
252 : : void
253 : 537 : prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
254 : : {
255 : 537 : unsigned long flags;
256 : :
257 : 537 : wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
258 : 537 : spin_lock_irqsave(&wq_head->lock, flags);
259 [ + - ]: 537 : if (list_empty(&wq_entry->entry))
260 : 537 : __add_wait_queue_entry_tail(wq_head, wq_entry);
261 : 537 : set_current_state(state);
262 : 537 : spin_unlock_irqrestore(&wq_head->lock, flags);
263 : 537 : }
264 : : EXPORT_SYMBOL(prepare_to_wait_exclusive);
265 : :
266 : 2354 : void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
267 : : {
268 : 2354 : wq_entry->flags = flags;
269 : 2354 : wq_entry->private = current;
270 : 2354 : wq_entry->func = autoremove_wake_function;
271 : 2354 : INIT_LIST_HEAD(&wq_entry->entry);
272 : 2354 : }
273 : : EXPORT_SYMBOL(init_wait_entry);
274 : :
275 : 4892 : long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
276 : : {
277 : 4892 : unsigned long flags;
278 : 4892 : long ret = 0;
279 : :
280 : 4892 : spin_lock_irqsave(&wq_head->lock, flags);
281 [ + + ]: 4892 : if (signal_pending_state(state, current)) {
282 : : /*
283 : : * Exclusive waiter must not fail if it was selected by wakeup,
284 : : * it should "consume" the condition we were waiting for.
285 : : *
286 : : * The caller will recheck the condition and return success if
287 : : * we were already woken up, we can not miss the event because
288 : : * wakeup locks/unlocks the same wq_head->lock.
289 : : *
290 : : * But we need to ensure that set-condition + wakeup after that
291 : : * can't see us, it should wake up another exclusive waiter if
292 : : * we fail.
293 : : */
294 : 27 : list_del_init(&wq_entry->entry);
295 : 27 : ret = -ERESTARTSYS;
296 : : } else {
297 [ + + ]: 4865 : if (list_empty(&wq_entry->entry)) {
298 [ + + ]: 4781 : if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
299 : 619 : __add_wait_queue_entry_tail(wq_head, wq_entry);
300 : : else
301 : 4162 : __add_wait_queue(wq_head, wq_entry);
302 : : }
303 : 4865 : set_current_state(state);
304 : : }
305 : 4892 : spin_unlock_irqrestore(&wq_head->lock, flags);
306 : :
307 : 4892 : return ret;
308 : : }
309 : : EXPORT_SYMBOL(prepare_to_wait_event);
310 : :
311 : : /*
312 : : * Note! These two wait functions are entered with the
313 : : * wait-queue lock held (and interrupts off in the _irq
314 : : * case), so there is no race with testing the wakeup
315 : : * condition in the caller before they add the wait
316 : : * entry to the wake queue.
317 : : */
318 : 0 : int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
319 : : {
320 [ # # ]: 0 : if (likely(list_empty(&wait->entry)))
321 : 0 : __add_wait_queue_entry_tail(wq, wait);
322 : :
323 : 0 : set_current_state(TASK_INTERRUPTIBLE);
324 [ # # ]: 0 : if (signal_pending(current))
325 : : return -ERESTARTSYS;
326 : :
327 : 0 : spin_unlock(&wq->lock);
328 : 0 : schedule();
329 : 0 : spin_lock(&wq->lock);
330 : :
331 : 0 : return 0;
332 : : }
333 : : EXPORT_SYMBOL(do_wait_intr);
334 : :
335 : 0 : int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
336 : : {
337 [ # # ]: 0 : if (likely(list_empty(&wait->entry)))
338 : 0 : __add_wait_queue_entry_tail(wq, wait);
339 : :
340 : 0 : set_current_state(TASK_INTERRUPTIBLE);
341 [ # # ]: 0 : if (signal_pending(current))
342 : : return -ERESTARTSYS;
343 : :
344 : 0 : spin_unlock_irq(&wq->lock);
345 : 0 : schedule();
346 : 0 : spin_lock_irq(&wq->lock);
347 : :
348 : 0 : return 0;
349 : : }
350 : : EXPORT_SYMBOL(do_wait_intr_irq);
351 : :
352 : : /**
353 : : * finish_wait - clean up after waiting in a queue
354 : : * @wq_head: waitqueue waited on
355 : : * @wq_entry: wait descriptor
356 : : *
357 : : * Sets current thread back to running state and removes
358 : : * the wait descriptor from the given waitqueue if still
359 : : * queued.
360 : : */
361 : 25973 : void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
362 : : {
363 : 25973 : unsigned long flags;
364 : :
365 [ + + ]: 25973 : __set_current_state(TASK_RUNNING);
366 : : /*
367 : : * We can check for list emptiness outside the lock
368 : : * IFF:
369 : : * - we use the "careful" check that verifies both
370 : : * the next and prev pointers, so that there cannot
371 : : * be any half-pending updates in progress on other
372 : : * CPU's that we haven't seen yet (and that might
373 : : * still change the stack area.
374 : : * and
375 : : * - all other users take the lock (ie we can only
376 : : * have _one_ other CPU that looks at or modifies
377 : : * the list).
378 : : */
379 [ + + ]: 25973 : if (!list_empty_careful(&wq_entry->entry)) {
380 : 2267 : spin_lock_irqsave(&wq_head->lock, flags);
381 : 2267 : list_del_init(&wq_entry->entry);
382 : 2267 : spin_unlock_irqrestore(&wq_head->lock, flags);
383 : : }
384 : 25973 : }
385 : : EXPORT_SYMBOL(finish_wait);
386 : :
387 : 25746 : int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
388 : : {
389 : 25746 : int ret = default_wake_function(wq_entry, mode, sync, key);
390 : :
391 [ + - ]: 25746 : if (ret)
392 : 25746 : list_del_init(&wq_entry->entry);
393 : :
394 : 25746 : return ret;
395 : : }
396 : : EXPORT_SYMBOL(autoremove_wake_function);
397 : :
398 : 0 : static inline bool is_kthread_should_stop(void)
399 : : {
400 [ # # # # ]: 0 : return (current->flags & PF_KTHREAD) && kthread_should_stop();
401 : : }
402 : :
403 : : /*
404 : : * DEFINE_WAIT_FUNC(wait, woken_wake_func);
405 : : *
406 : : * add_wait_queue(&wq_head, &wait);
407 : : * for (;;) {
408 : : * if (condition)
409 : : * break;
410 : : *
411 : : * // in wait_woken() // in woken_wake_function()
412 : : *
413 : : * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
414 : : * smp_mb(); // A try_to_wake_up():
415 : : * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
416 : : * schedule() if (p->state & mode)
417 : : * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
418 : : * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
419 : : * smp_mb(); // B condition = true;
420 : : * } smp_mb(); // C
421 : : * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
422 : : */
423 : 0 : long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
424 : : {
425 : : /*
426 : : * The below executes an smp_mb(), which matches with the full barrier
427 : : * executed by the try_to_wake_up() in woken_wake_function() such that
428 : : * either we see the store to wq_entry->flags in woken_wake_function()
429 : : * or woken_wake_function() sees our store to current->state.
430 : : */
431 : 0 : set_current_state(mode); /* A */
432 [ # # # # ]: 0 : if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
433 : 0 : timeout = schedule_timeout(timeout);
434 : 0 : __set_current_state(TASK_RUNNING);
435 : :
436 : : /*
437 : : * The below executes an smp_mb(), which matches with the smp_mb() (C)
438 : : * in woken_wake_function() such that either we see the wait condition
439 : : * being true or the store to wq_entry->flags in woken_wake_function()
440 : : * follows ours in the coherence order.
441 : : */
442 : 0 : smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
443 : :
444 : 0 : return timeout;
445 : : }
446 : : EXPORT_SYMBOL(wait_woken);
447 : :
448 : 18956 : int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
449 : : {
450 : : /* Pairs with the smp_store_mb() in wait_woken(). */
451 : 18956 : smp_mb(); /* C */
452 : 18956 : wq_entry->flags |= WQ_FLAG_WOKEN;
453 : :
454 : 18956 : return default_wake_function(wq_entry, mode, sync, key);
455 : : }
456 : : EXPORT_SYMBOL(woken_wake_function);
|