Branch data Line data Source code
1 : : /* SPDX-License-Identifier: GPL-2.0+ */
2 : : /*
3 : : * RCU expedited grace periods
4 : : *
5 : : * Copyright IBM Corporation, 2016
6 : : *
7 : : * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 : : */
9 : :
10 : : #include <linux/lockdep.h>
11 : :
12 : : static void rcu_exp_handler(void *unused);
13 : : static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14 : :
15 : : /*
16 : : * Record the start of an expedited grace period.
17 : : */
18 : 0 : static void rcu_exp_gp_seq_start(void)
19 : : {
20 : 0 : rcu_seq_start(&rcu_state.expedited_sequence);
21 : : }
22 : :
23 : : /*
24 : : * Return the value that the expedited-grace-period counter will have
25 : : * at the end of the current grace period.
26 : : */
27 : 0 : static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
28 : : {
29 : 0 : return rcu_seq_endval(&rcu_state.expedited_sequence);
30 : : }
31 : :
32 : : /*
33 : : * Record the end of an expedited grace period.
34 : : */
35 : 0 : static void rcu_exp_gp_seq_end(void)
36 : : {
37 : 0 : rcu_seq_end(&rcu_state.expedited_sequence);
38 : 0 : smp_mb(); /* Ensure that consecutive grace periods serialize. */
39 : 0 : }
40 : :
41 : : /*
42 : : * Take a snapshot of the expedited-grace-period counter, which is the
43 : : * earliest value that will indicate that a full grace period has
44 : : * elapsed since the current time.
45 : : */
46 : 0 : static unsigned long rcu_exp_gp_seq_snap(void)
47 : : {
48 : 0 : unsigned long s;
49 : :
50 : 0 : smp_mb(); /* Caller's modifications seen first by other CPUs. */
51 : 0 : s = rcu_seq_snap(&rcu_state.expedited_sequence);
52 : 0 : trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
53 : 0 : return s;
54 : : }
55 : :
56 : : /*
57 : : * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
58 : : * if a full expedited grace period has elapsed since that snapshot
59 : : * was taken.
60 : : */
61 : 0 : static bool rcu_exp_gp_seq_done(unsigned long s)
62 : : {
63 : 0 : return rcu_seq_done(&rcu_state.expedited_sequence, s);
64 : : }
65 : :
66 : : /*
67 : : * Reset the ->expmaskinit values in the rcu_node tree to reflect any
68 : : * recent CPU-online activity. Note that these masks are not cleared
69 : : * when CPUs go offline, so they reflect the union of all CPUs that have
70 : : * ever been online. This means that this function normally takes its
71 : : * no-work-to-do fastpath.
72 : : */
73 : 0 : static void sync_exp_reset_tree_hotplug(void)
74 : : {
75 : 0 : bool done;
76 : 0 : unsigned long flags;
77 : 0 : unsigned long mask;
78 : 0 : unsigned long oldmask;
79 : 0 : int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
80 : 0 : struct rcu_node *rnp;
81 : 0 : struct rcu_node *rnp_up;
82 : :
83 : : /* If no new CPUs onlined since last time, nothing to do. */
84 [ # # ]: 0 : if (likely(ncpus == rcu_state.ncpus_snap))
85 : : return;
86 : 0 : rcu_state.ncpus_snap = ncpus;
87 : :
88 : : /*
89 : : * Each pass through the following loop propagates newly onlined
90 : : * CPUs for the current rcu_node structure up the rcu_node tree.
91 : : */
92 [ # # ]: 0 : rcu_for_each_leaf_node(rnp) {
93 : 0 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
94 [ # # ]: 0 : if (rnp->expmaskinit == rnp->expmaskinitnext) {
95 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
96 : 0 : continue; /* No new CPUs, nothing to do. */
97 : : }
98 : :
99 : : /* Update this node's mask, track old value for propagation. */
100 : 0 : oldmask = rnp->expmaskinit;
101 : 0 : rnp->expmaskinit = rnp->expmaskinitnext;
102 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
103 : :
104 : : /* If was already nonzero, nothing to propagate. */
105 [ # # ]: 0 : if (oldmask)
106 : 0 : continue;
107 : :
108 : : /* Propagate the new CPU up the tree. */
109 : 0 : mask = rnp->grpmask;
110 : 0 : rnp_up = rnp->parent;
111 : 0 : done = false;
112 [ # # ]: 0 : while (rnp_up) {
113 : 0 : raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
114 [ # # ]: 0 : if (rnp_up->expmaskinit)
115 : 0 : done = true;
116 : 0 : rnp_up->expmaskinit |= mask;
117 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
118 [ # # ]: 0 : if (done)
119 : : break;
120 : 0 : mask = rnp_up->grpmask;
121 : 0 : rnp_up = rnp_up->parent;
122 : : }
123 : : }
124 : : }
125 : :
126 : : /*
127 : : * Reset the ->expmask values in the rcu_node tree in preparation for
128 : : * a new expedited grace period.
129 : : */
130 : 0 : static void __maybe_unused sync_exp_reset_tree(void)
131 : : {
132 : 0 : unsigned long flags;
133 : 0 : struct rcu_node *rnp;
134 : :
135 : 0 : sync_exp_reset_tree_hotplug();
136 [ # # ]: 0 : rcu_for_each_node_breadth_first(rnp) {
137 : 0 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
138 [ # # ]: 0 : WARN_ON_ONCE(rnp->expmask);
139 : 0 : WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
140 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
141 : : }
142 : 0 : }
143 : :
144 : : /*
145 : : * Return non-zero if there is no RCU expedited grace period in progress
146 : : * for the specified rcu_node structure, in other words, if all CPUs and
147 : : * tasks covered by the specified rcu_node structure have done their bit
148 : : * for the current expedited grace period.
149 : : */
150 : 0 : static bool sync_rcu_exp_done(struct rcu_node *rnp)
151 : : {
152 : 0 : raw_lockdep_assert_held_rcu_node(rnp);
153 [ # # # # ]: 0 : return rnp->exp_tasks == NULL &&
154 [ # # # # ]: 0 : READ_ONCE(rnp->expmask) == 0;
155 : : }
156 : :
157 : : /*
158 : : * Like sync_rcu_exp_done(), but where the caller does not hold the
159 : : * rcu_node's ->lock.
160 : : */
161 : 0 : static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
162 : : {
163 : 0 : unsigned long flags;
164 : 0 : bool ret;
165 : :
166 : 0 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
167 [ # # ]: 0 : ret = sync_rcu_exp_done(rnp);
168 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
169 : :
170 : 0 : return ret;
171 : : }
172 : :
173 : :
174 : : /*
175 : : * Report the exit from RCU read-side critical section for the last task
176 : : * that queued itself during or before the current expedited preemptible-RCU
177 : : * grace period. This event is reported either to the rcu_node structure on
178 : : * which the task was queued or to one of that rcu_node structure's ancestors,
179 : : * recursively up the tree. (Calm down, calm down, we do the recursion
180 : : * iteratively!)
181 : : */
182 : 0 : static void __rcu_report_exp_rnp(struct rcu_node *rnp,
183 : : bool wake, unsigned long flags)
184 : : __releases(rnp->lock)
185 : : {
186 : 0 : unsigned long mask;
187 : :
188 : 0 : raw_lockdep_assert_held_rcu_node(rnp);
189 : 0 : for (;;) {
190 [ # # # # ]: 0 : if (!sync_rcu_exp_done(rnp)) {
191 [ # # ]: 0 : if (!rnp->expmask)
192 : 0 : rcu_initiate_boost(rnp, flags);
193 : : else
194 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
195 : : break;
196 : : }
197 [ # # ]: 0 : if (rnp->parent == NULL) {
198 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
199 [ # # ]: 0 : if (wake) {
200 : 0 : smp_mb(); /* EGP done before wake_up(). */
201 : 0 : swake_up_one(&rcu_state.expedited_wq);
202 : : }
203 : : break;
204 : : }
205 : 0 : mask = rnp->grpmask;
206 : 0 : raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
207 : 0 : rnp = rnp->parent;
208 : 0 : raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
209 [ # # ]: 0 : WARN_ON_ONCE(!(rnp->expmask & mask));
210 : 0 : WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
211 : : }
212 : 0 : }
213 : :
214 : : /*
215 : : * Report expedited quiescent state for specified node. This is a
216 : : * lock-acquisition wrapper function for __rcu_report_exp_rnp().
217 : : */
218 : : static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
219 : : {
220 : : unsigned long flags;
221 : :
222 : : raw_spin_lock_irqsave_rcu_node(rnp, flags);
223 : : __rcu_report_exp_rnp(rnp, wake, flags);
224 : : }
225 : :
226 : : /*
227 : : * Report expedited quiescent state for multiple CPUs, all covered by the
228 : : * specified leaf rcu_node structure.
229 : : */
230 : 0 : static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
231 : : unsigned long mask, bool wake)
232 : : {
233 : 0 : int cpu;
234 : 0 : unsigned long flags;
235 : 0 : struct rcu_data *rdp;
236 : :
237 : 0 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
238 [ # # ]: 0 : if (!(rnp->expmask & mask)) {
239 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
240 : 0 : return;
241 : : }
242 : 0 : WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
243 [ # # ]: 0 : for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
244 : 0 : rdp = per_cpu_ptr(&rcu_data, cpu);
245 : 0 : if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
246 : 0 : continue;
247 : : rdp->rcu_forced_tick_exp = false;
248 : : tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
249 : : }
250 : 0 : __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
251 : : }
252 : :
253 : : /*
254 : : * Report expedited quiescent state for specified rcu_data (CPU).
255 : : */
256 : 0 : static void rcu_report_exp_rdp(struct rcu_data *rdp)
257 : : {
258 : 0 : WRITE_ONCE(rdp->exp_deferred_qs, false);
259 : 0 : rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
260 : 0 : }
261 : :
262 : : /* Common code for work-done checking. */
263 : 0 : static bool sync_exp_work_done(unsigned long s)
264 : : {
265 [ # # ]: 0 : if (rcu_exp_gp_seq_done(s)) {
266 : 0 : trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
267 : 0 : smp_mb(); /* Ensure test happens before caller kfree(). */
268 : 0 : return true;
269 : : }
270 : : return false;
271 : : }
272 : :
273 : : /*
274 : : * Funnel-lock acquisition for expedited grace periods. Returns true
275 : : * if some other task completed an expedited grace period that this task
276 : : * can piggy-back on, and with no mutex held. Otherwise, returns false
277 : : * with the mutex held, indicating that the caller must actually do the
278 : : * expedited grace period.
279 : : */
280 : 0 : static bool exp_funnel_lock(unsigned long s)
281 : : {
282 : 0 : struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
283 : 0 : struct rcu_node *rnp = rdp->mynode;
284 [ # # ]: 0 : struct rcu_node *rnp_root = rcu_get_root();
285 : :
286 : : /* Low-contention fastpath. */
287 [ # # # # ]: 0 : if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
288 [ # # ]: 0 : (rnp == rnp_root ||
289 [ # # # # ]: 0 : ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
290 : 0 : mutex_trylock(&rcu_state.exp_mutex))
291 : 0 : goto fastpath;
292 : :
293 : : /*
294 : : * Each pass through the following loop works its way up
295 : : * the rcu_node tree, returning if others have done the work or
296 : : * otherwise falls through to acquire ->exp_mutex. The mapping
297 : : * from CPU to rcu_node structure can be inexact, as it is just
298 : : * promoting locality and is not strictly needed for correctness.
299 : : */
300 [ # # ]: 0 : for (; rnp != NULL; rnp = rnp->parent) {
301 [ # # ]: 0 : if (sync_exp_work_done(s))
302 : : return true;
303 : :
304 : : /* Work not done, either wait here or go up. */
305 : 0 : spin_lock(&rnp->exp_lock);
306 [ # # ]: 0 : if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
307 : :
308 : : /* Someone else doing GP, so wait for them. */
309 : 0 : spin_unlock(&rnp->exp_lock);
310 : 0 : trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
311 : : rnp->grplo, rnp->grphi,
312 : 0 : TPS("wait"));
313 [ # # # # ]: 0 : wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
314 : : sync_exp_work_done(s));
315 : 0 : return true;
316 : : }
317 : 0 : rnp->exp_seq_rq = s; /* Followers can wait on us. */
318 : 0 : spin_unlock(&rnp->exp_lock);
319 : 0 : trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
320 : 0 : rnp->grplo, rnp->grphi, TPS("nxtlvl"));
321 : : }
322 : 0 : mutex_lock(&rcu_state.exp_mutex);
323 : 0 : fastpath:
324 [ # # ]: 0 : if (sync_exp_work_done(s)) {
325 : 0 : mutex_unlock(&rcu_state.exp_mutex);
326 : 0 : return true;
327 : : }
328 : 0 : rcu_exp_gp_seq_start();
329 : 0 : trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
330 : 0 : return false;
331 : : }
332 : :
333 : : /*
334 : : * Select the CPUs within the specified rcu_node that the upcoming
335 : : * expedited grace period needs to wait for.
336 : : */
337 : 0 : static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
338 : : {
339 : 0 : int cpu;
340 : 0 : unsigned long flags;
341 : 0 : unsigned long mask_ofl_test;
342 : 0 : unsigned long mask_ofl_ipi;
343 : 0 : int ret;
344 : 0 : struct rcu_exp_work *rewp =
345 : 0 : container_of(wp, struct rcu_exp_work, rew_work);
346 : 0 : struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
347 : :
348 : 0 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
349 : :
350 : : /* Each pass checks a CPU for identity, offline, and idle. */
351 : 0 : mask_ofl_test = 0;
352 [ # # ]: 0 : for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
353 : 0 : struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
354 : 0 : unsigned long mask = rdp->grpmask;
355 : 0 : int snap;
356 : :
357 [ # # ]: 0 : if (raw_smp_processor_id() == cpu ||
358 [ # # ]: 0 : !(rnp->qsmaskinitnext & mask)) {
359 : 0 : mask_ofl_test |= mask;
360 : : } else {
361 : 0 : snap = rcu_dynticks_snap(rdp);
362 [ # # ]: 0 : if (rcu_dynticks_in_eqs(snap))
363 : 0 : mask_ofl_test |= mask;
364 : : else
365 : 0 : rdp->exp_dynticks_snap = snap;
366 : : }
367 : : }
368 : 0 : mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
369 : :
370 : : /*
371 : : * Need to wait for any blocked tasks as well. Note that
372 : : * additional blocking tasks will also block the expedited GP
373 : : * until such time as the ->expmask bits are cleared.
374 : : */
375 : 0 : if (rcu_preempt_has_tasks(rnp))
376 : : rnp->exp_tasks = rnp->blkd_tasks.next;
377 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
378 : :
379 : : /* IPI the remaining CPUs for expedited quiescent state. */
380 [ # # ]: 0 : for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
381 : 0 : struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
382 : 0 : unsigned long mask = rdp->grpmask;
383 : :
384 : 0 : retry_ipi:
385 [ # # ]: 0 : if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
386 : 0 : mask_ofl_test |= mask;
387 : 0 : continue;
388 : : }
389 [ # # ]: 0 : if (get_cpu() == cpu) {
390 : 0 : put_cpu();
391 : 0 : continue;
392 : : }
393 : 0 : ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
394 : 0 : put_cpu();
395 : : /* The CPU will report the QS in response to the IPI. */
396 [ # # ]: 0 : if (!ret)
397 : 0 : continue;
398 : :
399 : : /* Failed, raced with CPU hotplug operation. */
400 : 0 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
401 [ # # ]: 0 : if ((rnp->qsmaskinitnext & mask) &&
402 [ # # ]: 0 : (rnp->expmask & mask)) {
403 : : /* Online, so delay for a bit and try again. */
404 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
405 : 0 : trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
406 : 0 : schedule_timeout_uninterruptible(1);
407 : 0 : goto retry_ipi;
408 : : }
409 : : /* CPU really is offline, so we must report its QS. */
410 [ # # ]: 0 : if (rnp->expmask & mask)
411 : 0 : mask_ofl_test |= mask;
412 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
413 : : }
414 : : /* Report quiescent states for those that went offline. */
415 [ # # ]: 0 : if (mask_ofl_test)
416 : 0 : rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
417 : 0 : }
418 : :
419 : : /*
420 : : * Select the nodes that the upcoming expedited grace period needs
421 : : * to wait for.
422 : : */
423 : 0 : static void sync_rcu_exp_select_cpus(void)
424 : : {
425 : 0 : int cpu;
426 : 0 : struct rcu_node *rnp;
427 : :
428 : 0 : trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
429 : 0 : sync_exp_reset_tree();
430 : 0 : trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
431 : :
432 : : /* Schedule work for each leaf rcu_node structure. */
433 [ # # ]: 0 : rcu_for_each_leaf_node(rnp) {
434 : 0 : rnp->exp_need_flush = false;
435 [ # # ]: 0 : if (!READ_ONCE(rnp->expmask))
436 : 0 : continue; /* Avoid early boot non-existent wq. */
437 [ # # ]: 0 : if (!READ_ONCE(rcu_par_gp_wq) ||
438 [ # # ]: 0 : rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
439 [ # # ]: 0 : rcu_is_last_leaf_node(rnp)) {
440 : : /* No workqueues yet or last leaf, do direct call. */
441 : 0 : sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
442 : 0 : continue;
443 : : }
444 : 0 : INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
445 : 0 : cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
446 : : /* If all offline, queue the work on an unbound CPU. */
447 [ # # ]: 0 : if (unlikely(cpu > rnp->grphi - rnp->grplo))
448 : : cpu = WORK_CPU_UNBOUND;
449 : : else
450 : 0 : cpu += rnp->grplo;
451 : 0 : queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
452 : 0 : rnp->exp_need_flush = true;
453 : : }
454 : :
455 : : /* Wait for workqueue jobs (if any) to complete. */
456 [ # # ]: 0 : rcu_for_each_leaf_node(rnp)
457 [ # # ]: 0 : if (rnp->exp_need_flush)
458 : 0 : flush_work(&rnp->rew.rew_work);
459 : 0 : }
460 : :
461 : : /*
462 : : * Wait for the expedited grace period to elapse, within time limit.
463 : : * If the time limit is exceeded without the grace period elapsing,
464 : : * return false, otherwise return true.
465 : : */
466 : 0 : static bool synchronize_rcu_expedited_wait_once(long tlimit)
467 : : {
468 : 0 : int t;
469 : 0 : struct rcu_node *rnp_root = rcu_get_root();
470 : :
471 [ # # # # : 0 : t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
# # # # ]
472 : : sync_rcu_exp_done_unlocked(rnp_root),
473 : : tlimit);
474 : : // Workqueues should not be signaled.
475 [ # # # # ]: 0 : if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
476 : 0 : return true;
477 [ # # ]: 0 : WARN_ON(t < 0); /* workqueues should not be signaled. */
478 : : return false;
479 : : }
480 : :
481 : : /*
482 : : * Wait for the expedited grace period to elapse, issuing any needed
483 : : * RCU CPU stall warnings along the way.
484 : : */
485 : 0 : static void synchronize_rcu_expedited_wait(void)
486 : : {
487 : 0 : int cpu;
488 : 0 : unsigned long jiffies_stall;
489 : 0 : unsigned long jiffies_start;
490 : 0 : unsigned long mask;
491 : 0 : int ndetected;
492 : 0 : struct rcu_data *rdp;
493 : 0 : struct rcu_node *rnp;
494 : 0 : struct rcu_node *rnp_root = rcu_get_root();
495 : :
496 : 0 : trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
497 [ # # ]: 0 : jiffies_stall = rcu_jiffies_till_stall_check();
498 : 0 : jiffies_start = jiffies;
499 : 0 : if (IS_ENABLED(CONFIG_NO_HZ_FULL)) {
500 : : if (synchronize_rcu_expedited_wait_once(1))
501 : : return;
502 : : rcu_for_each_leaf_node(rnp) {
503 : : for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
504 : : rdp = per_cpu_ptr(&rcu_data, cpu);
505 : : if (rdp->rcu_forced_tick_exp)
506 : : continue;
507 : : rdp->rcu_forced_tick_exp = true;
508 : : tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
509 : : }
510 : : }
511 : : }
512 : :
513 : 0 : for (;;) {
514 [ # # ]: 0 : if (synchronize_rcu_expedited_wait_once(jiffies_stall))
515 : 0 : return;
516 [ # # ]: 0 : if (rcu_cpu_stall_suppress)
517 : 0 : continue;
518 [ # # ]: 0 : panic_on_rcu_stall();
519 : 0 : pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
520 : : rcu_state.name);
521 : 0 : ndetected = 0;
522 [ # # ]: 0 : rcu_for_each_leaf_node(rnp) {
523 : 0 : ndetected += rcu_print_task_exp_stall(rnp);
524 [ # # ]: 0 : for_each_leaf_node_possible_cpu(rnp, cpu) {
525 : 0 : struct rcu_data *rdp;
526 : :
527 : 0 : mask = leaf_node_cpu_bit(rnp, cpu);
528 [ # # ]: 0 : if (!(READ_ONCE(rnp->expmask) & mask))
529 : 0 : continue;
530 : 0 : ndetected++;
531 : 0 : rdp = per_cpu_ptr(&rcu_data, cpu);
532 : 0 : pr_cont(" %d-%c%c%c", cpu,
533 : : "O."[!!cpu_online(cpu)],
534 : : "o."[!!(rdp->grpmask & rnp->expmaskinit)],
535 : : "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
536 : : }
537 : : }
538 : 0 : pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
539 : : jiffies - jiffies_start, rcu_state.expedited_sequence,
540 : : READ_ONCE(rnp_root->expmask),
541 : : ".T"[!!rnp_root->exp_tasks]);
542 [ # # ]: 0 : if (ndetected) {
543 : 0 : pr_err("blocking rcu_node structures:");
544 [ # # ]: 0 : rcu_for_each_node_breadth_first(rnp) {
545 [ # # ]: 0 : if (rnp == rnp_root)
546 : 0 : continue; /* printed unconditionally */
547 [ # # ]: 0 : if (sync_rcu_exp_done_unlocked(rnp))
548 : 0 : continue;
549 : 0 : pr_cont(" l=%u:%d-%d:%#lx/%c",
550 : : rnp->level, rnp->grplo, rnp->grphi,
551 : : READ_ONCE(rnp->expmask),
552 : : ".T"[!!rnp->exp_tasks]);
553 : : }
554 : 0 : pr_cont("\n");
555 : : }
556 [ # # ]: 0 : rcu_for_each_leaf_node(rnp) {
557 [ # # ]: 0 : for_each_leaf_node_possible_cpu(rnp, cpu) {
558 : 0 : mask = leaf_node_cpu_bit(rnp, cpu);
559 [ # # ]: 0 : if (!(READ_ONCE(rnp->expmask) & mask))
560 : 0 : continue;
561 : 0 : dump_cpu_task(cpu);
562 : : }
563 : : }
564 [ # # ]: 0 : jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
565 : : }
566 : : }
567 : :
568 : : /*
569 : : * Wait for the current expedited grace period to complete, and then
570 : : * wake up everyone who piggybacked on the just-completed expedited
571 : : * grace period. Also update all the ->exp_seq_rq counters as needed
572 : : * in order to avoid counter-wrap problems.
573 : : */
574 : 0 : static void rcu_exp_wait_wake(unsigned long s)
575 : : {
576 : 0 : struct rcu_node *rnp;
577 : :
578 : 0 : synchronize_rcu_expedited_wait();
579 : :
580 : : // Switch over to wakeup mode, allowing the next GP to proceed.
581 : : // End the previous grace period only after acquiring the mutex
582 : : // to ensure that only one GP runs concurrently with wakeups.
583 : 0 : mutex_lock(&rcu_state.exp_wake_mutex);
584 : 0 : rcu_exp_gp_seq_end();
585 : 0 : trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
586 : :
587 [ # # ]: 0 : rcu_for_each_node_breadth_first(rnp) {
588 [ # # ]: 0 : if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
589 : 0 : spin_lock(&rnp->exp_lock);
590 : : /* Recheck, avoid hang in case someone just arrived. */
591 [ # # ]: 0 : if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
592 : 0 : rnp->exp_seq_rq = s;
593 : 0 : spin_unlock(&rnp->exp_lock);
594 : : }
595 : 0 : smp_mb(); /* All above changes before wakeup. */
596 : 0 : wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
597 : : }
598 : 0 : trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
599 : 0 : mutex_unlock(&rcu_state.exp_wake_mutex);
600 : 0 : }
601 : :
602 : : /*
603 : : * Common code to drive an expedited grace period forward, used by
604 : : * workqueues and mid-boot-time tasks.
605 : : */
606 : 0 : static void rcu_exp_sel_wait_wake(unsigned long s)
607 : : {
608 : : /* Initialize the rcu_node tree in preparation for the wait. */
609 : 0 : sync_rcu_exp_select_cpus();
610 : :
611 : : /* Wait and clean up, including waking everyone. */
612 : 0 : rcu_exp_wait_wake(s);
613 : 0 : }
614 : :
615 : : /*
616 : : * Work-queue handler to drive an expedited grace period forward.
617 : : */
618 : 0 : static void wait_rcu_exp_gp(struct work_struct *wp)
619 : : {
620 : 0 : struct rcu_exp_work *rewp;
621 : :
622 : 0 : rewp = container_of(wp, struct rcu_exp_work, rew_work);
623 : 0 : rcu_exp_sel_wait_wake(rewp->rew_s);
624 : 0 : }
625 : :
626 : : #ifdef CONFIG_PREEMPT_RCU
627 : :
628 : : /*
629 : : * Remote handler for smp_call_function_single(). If there is an
630 : : * RCU read-side critical section in effect, request that the
631 : : * next rcu_read_unlock() record the quiescent state up the
632 : : * ->expmask fields in the rcu_node tree. Otherwise, immediately
633 : : * report the quiescent state.
634 : : */
635 : : static void rcu_exp_handler(void *unused)
636 : : {
637 : : unsigned long flags;
638 : : struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
639 : : struct rcu_node *rnp = rdp->mynode;
640 : : struct task_struct *t = current;
641 : :
642 : : /*
643 : : * First, the common case of not being in an RCU read-side
644 : : * critical section. If also enabled or idle, immediately
645 : : * report the quiescent state, otherwise defer.
646 : : */
647 : : if (!rcu_preempt_depth()) {
648 : : if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
649 : : rcu_dynticks_curr_cpu_in_eqs()) {
650 : : rcu_report_exp_rdp(rdp);
651 : : } else {
652 : : rdp->exp_deferred_qs = true;
653 : : set_tsk_need_resched(t);
654 : : set_preempt_need_resched();
655 : : }
656 : : return;
657 : : }
658 : :
659 : : /*
660 : : * Second, the less-common case of being in an RCU read-side
661 : : * critical section. In this case we can count on a future
662 : : * rcu_read_unlock(). However, this rcu_read_unlock() might
663 : : * execute on some other CPU, but in that case there will be
664 : : * a future context switch. Either way, if the expedited
665 : : * grace period is still waiting on this CPU, set ->deferred_qs
666 : : * so that the eventual quiescent state will be reported.
667 : : * Note that there is a large group of race conditions that
668 : : * can have caused this quiescent state to already have been
669 : : * reported, so we really do need to check ->expmask.
670 : : */
671 : : if (rcu_preempt_depth() > 0) {
672 : : raw_spin_lock_irqsave_rcu_node(rnp, flags);
673 : : if (rnp->expmask & rdp->grpmask) {
674 : : rdp->exp_deferred_qs = true;
675 : : t->rcu_read_unlock_special.b.exp_hint = true;
676 : : }
677 : : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
678 : : return;
679 : : }
680 : :
681 : : /*
682 : : * The final and least likely case is where the interrupted
683 : : * code was just about to or just finished exiting the RCU-preempt
684 : : * read-side critical section, and no, we can't tell which.
685 : : * So either way, set ->deferred_qs to flag later code that
686 : : * a quiescent state is required.
687 : : *
688 : : * If the CPU is fully enabled (or if some buggy RCU-preempt
689 : : * read-side critical section is being used from idle), just
690 : : * invoke rcu_preempt_deferred_qs() to immediately report the
691 : : * quiescent state. We cannot use rcu_read_unlock_special()
692 : : * because we are in an interrupt handler, which will cause that
693 : : * function to take an early exit without doing anything.
694 : : *
695 : : * Otherwise, force a context switch after the CPU enables everything.
696 : : */
697 : : rdp->exp_deferred_qs = true;
698 : : if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
699 : : WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
700 : : rcu_preempt_deferred_qs(t);
701 : : } else {
702 : : set_tsk_need_resched(t);
703 : : set_preempt_need_resched();
704 : : }
705 : : }
706 : :
707 : : /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
708 : : static void sync_sched_exp_online_cleanup(int cpu)
709 : : {
710 : : }
711 : :
712 : : /*
713 : : * Scan the current list of tasks blocked within RCU read-side critical
714 : : * sections, printing out the tid of each that is blocking the current
715 : : * expedited grace period.
716 : : */
717 : : static int rcu_print_task_exp_stall(struct rcu_node *rnp)
718 : : {
719 : : struct task_struct *t;
720 : : int ndetected = 0;
721 : :
722 : : if (!rnp->exp_tasks)
723 : : return 0;
724 : : t = list_entry(rnp->exp_tasks->prev,
725 : : struct task_struct, rcu_node_entry);
726 : : list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
727 : : pr_cont(" P%d", t->pid);
728 : : ndetected++;
729 : : }
730 : : return ndetected;
731 : : }
732 : :
733 : : #else /* #ifdef CONFIG_PREEMPT_RCU */
734 : :
735 : : /* Request an expedited quiescent state. */
736 : 0 : static void rcu_exp_need_qs(void)
737 : : {
738 : 0 : __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
739 : : /* Store .exp before .rcu_urgent_qs. */
740 : 0 : smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
741 : 0 : set_tsk_need_resched(current);
742 : 0 : set_preempt_need_resched();
743 : 0 : }
744 : :
745 : : /* Invoked on each online non-idle CPU for expedited quiescent state. */
746 : 0 : static void rcu_exp_handler(void *unused)
747 : : {
748 : 0 : struct rcu_data *rdp;
749 : 0 : struct rcu_node *rnp;
750 : :
751 : 0 : rdp = this_cpu_ptr(&rcu_data);
752 : 0 : rnp = rdp->mynode;
753 [ # # # # ]: 0 : if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
754 [ # # ]: 0 : __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
755 : : return;
756 [ # # # # ]: 0 : if (rcu_is_cpu_rrupt_from_idle()) {
757 : 0 : rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
758 : 0 : return;
759 : : }
760 : 0 : rcu_exp_need_qs();
761 : : }
762 : :
763 : : /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
764 : 0 : static void sync_sched_exp_online_cleanup(int cpu)
765 : : {
766 : 0 : unsigned long flags;
767 : 0 : int my_cpu;
768 : 0 : struct rcu_data *rdp;
769 : 0 : int ret;
770 : 0 : struct rcu_node *rnp;
771 : :
772 : 0 : rdp = per_cpu_ptr(&rcu_data, cpu);
773 : 0 : rnp = rdp->mynode;
774 [ # # ]: 0 : my_cpu = get_cpu();
775 : : /* Quiescent state either not needed or already requested, leave. */
776 [ # # # # ]: 0 : if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
777 [ # # ]: 0 : __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) {
778 : 0 : put_cpu();
779 : 0 : return;
780 : : }
781 : : /* Quiescent state needed on current CPU, so set it up locally. */
782 [ # # ]: 0 : if (my_cpu == cpu) {
783 : 0 : local_irq_save(flags);
784 : 0 : rcu_exp_need_qs();
785 : 0 : local_irq_restore(flags);
786 : 0 : put_cpu();
787 : 0 : return;
788 : : }
789 : : /* Quiescent state needed on some other CPU, send IPI. */
790 : 0 : ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
791 : 0 : put_cpu();
792 [ # # ]: 0 : WARN_ON_ONCE(ret);
793 : : }
794 : :
795 : : /*
796 : : * Because preemptible RCU does not exist, we never have to check for
797 : : * tasks blocked within RCU read-side critical sections that are
798 : : * blocking the current expedited grace period.
799 : : */
800 : 0 : static int rcu_print_task_exp_stall(struct rcu_node *rnp)
801 : : {
802 : 0 : return 0;
803 : : }
804 : :
805 : : #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
806 : :
807 : : /**
808 : : * synchronize_rcu_expedited - Brute-force RCU grace period
809 : : *
810 : : * Wait for an RCU grace period, but expedite it. The basic idea is to
811 : : * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether
812 : : * the CPU is in an RCU critical section, and if so, it sets a flag that
813 : : * causes the outermost rcu_read_unlock() to report the quiescent state
814 : : * for RCU-preempt or asks the scheduler for help for RCU-sched. On the
815 : : * other hand, if the CPU is not in an RCU read-side critical section,
816 : : * the IPI handler reports the quiescent state immediately.
817 : : *
818 : : * Although this is a great improvement over previous expedited
819 : : * implementations, it is still unfriendly to real-time workloads, so is
820 : : * thus not recommended for any sort of common-case code. In fact, if
821 : : * you are using synchronize_rcu_expedited() in a loop, please restructure
822 : : * your code to batch your updates, and then use a single synchronize_rcu()
823 : : * instead.
824 : : *
825 : : * This has the same semantics as (but is more brutal than) synchronize_rcu().
826 : : */
827 : 140 : void synchronize_rcu_expedited(void)
828 : : {
829 : 140 : bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
830 : 140 : struct rcu_exp_work rew;
831 : 140 : struct rcu_node *rnp;
832 : 140 : unsigned long s;
833 : :
834 : 140 : RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
835 : : lock_is_held(&rcu_lock_map) ||
836 : : lock_is_held(&rcu_sched_lock_map),
837 : : "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
838 : :
839 : : /* Is the state is such that the call is a grace period? */
840 [ - + ]: 140 : if (rcu_blocking_is_gp())
841 : 140 : return;
842 : :
843 : : /* If expedited grace periods are prohibited, fall back to normal. */
844 [ # # ]: 0 : if (rcu_gp_is_normal()) {
845 : 0 : wait_rcu_gp(call_rcu);
846 : 0 : return;
847 : : }
848 : :
849 : : /* Take a snapshot of the sequence number. */
850 : 0 : s = rcu_exp_gp_seq_snap();
851 [ # # ]: 0 : if (exp_funnel_lock(s))
852 : : return; /* Someone else did our work for us. */
853 : :
854 : : /* Ensure that load happens before action based on it. */
855 [ # # ]: 0 : if (unlikely(boottime)) {
856 : : /* Direct call during scheduler init and early_initcalls(). */
857 : 0 : rcu_exp_sel_wait_wake(s);
858 : : } else {
859 : : /* Marshall arguments & schedule the expedited grace period. */
860 : 0 : rew.rew_s = s;
861 : 0 : INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
862 : 0 : queue_work(rcu_gp_wq, &rew.rew_work);
863 : : }
864 : :
865 : : /* Wait for expedited grace period to complete. */
866 : 0 : rnp = rcu_get_root();
867 [ # # # # ]: 0 : wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
868 : : sync_exp_work_done(s));
869 : 0 : smp_mb(); /* Workqueue actions happen before return. */
870 : :
871 : : /* Let the next expedited grace period start. */
872 : 0 : mutex_unlock(&rcu_state.exp_mutex);
873 : :
874 : 0 : if (likely(!boottime))
875 : : destroy_work_on_stack(&rew.rew_work);
876 : : }
877 : : EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|