Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0+
2 : : /*
3 : : * Read-Copy Update mechanism for mutual exclusion
4 : : *
5 : : * Copyright IBM Corporation, 2008
6 : : *
7 : : * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 : : * Manfred Spraul <manfred@colorfullife.com>
9 : : * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical version
10 : : *
11 : : * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
12 : : * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13 : : *
14 : : * For detailed explanation of Read-Copy Update mechanism see -
15 : : * Documentation/RCU
16 : : */
17 : :
18 : : #define pr_fmt(fmt) "rcu: " fmt
19 : :
20 : : #include <linux/types.h>
21 : : #include <linux/kernel.h>
22 : : #include <linux/init.h>
23 : : #include <linux/spinlock.h>
24 : : #include <linux/smp.h>
25 : : #include <linux/rcupdate_wait.h>
26 : : #include <linux/interrupt.h>
27 : : #include <linux/sched.h>
28 : : #include <linux/sched/debug.h>
29 : : #include <linux/nmi.h>
30 : : #include <linux/atomic.h>
31 : : #include <linux/bitops.h>
32 : : #include <linux/export.h>
33 : : #include <linux/completion.h>
34 : : #include <linux/moduleparam.h>
35 : : #include <linux/percpu.h>
36 : : #include <linux/notifier.h>
37 : : #include <linux/cpu.h>
38 : : #include <linux/mutex.h>
39 : : #include <linux/time.h>
40 : : #include <linux/kernel_stat.h>
41 : : #include <linux/wait.h>
42 : : #include <linux/kthread.h>
43 : : #include <uapi/linux/sched/types.h>
44 : : #include <linux/prefetch.h>
45 : : #include <linux/delay.h>
46 : : #include <linux/stop_machine.h>
47 : : #include <linux/random.h>
48 : : #include <linux/trace_events.h>
49 : : #include <linux/suspend.h>
50 : : #include <linux/ftrace.h>
51 : : #include <linux/tick.h>
52 : : #include <linux/sysrq.h>
53 : : #include <linux/kprobes.h>
54 : : #include <linux/gfp.h>
55 : : #include <linux/oom.h>
56 : : #include <linux/smpboot.h>
57 : : #include <linux/jiffies.h>
58 : : #include <linux/sched/isolation.h>
59 : : #include <linux/sched/clock.h>
60 : : #include "../time/tick-internal.h"
61 : :
62 : : #include "tree.h"
63 : : #include "rcu.h"
64 : :
65 : : #ifdef MODULE_PARAM_PREFIX
66 : : #undef MODULE_PARAM_PREFIX
67 : : #endif
68 : : #define MODULE_PARAM_PREFIX "rcutree."
69 : :
70 : : /* Data structures. */
71 : :
72 : : /*
73 : : * Steal a bit from the bottom of ->dynticks for idle entry/exit
74 : : * control. Initially this is for TLB flushing.
75 : : */
76 : : #define RCU_DYNTICK_CTRL_MASK 0x1
77 : : #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
78 : : #ifndef rcu_eqs_special_exit
79 : : #define rcu_eqs_special_exit() do { } while (0)
80 : : #endif
81 : :
82 : : static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
83 : : .dynticks_nesting = 1,
84 : : .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
85 : : .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
86 : : };
87 : : struct rcu_state rcu_state = {
88 : : .level = { &rcu_state.node[0] },
89 : : .gp_state = RCU_GP_IDLE,
90 : : .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
91 : : .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
92 : : .name = RCU_NAME,
93 : : .abbr = RCU_ABBR,
94 : : .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
95 : : .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
96 : : .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
97 : : };
98 : :
99 : : /* Dump rcu_node combining tree at boot to verify correct setup. */
100 : : static bool dump_tree;
101 : : module_param(dump_tree, bool, 0444);
102 : : /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
103 : : static bool use_softirq = 1;
104 : : module_param(use_softirq, bool, 0444);
105 : : /* Control rcu_node-tree auto-balancing at boot time. */
106 : : static bool rcu_fanout_exact;
107 : : module_param(rcu_fanout_exact, bool, 0444);
108 : : /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
109 : : static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
110 : : module_param(rcu_fanout_leaf, int, 0444);
111 : : int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
112 : : /* Number of rcu_nodes at specified level. */
113 : : int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
114 : : int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
115 : :
116 : : /*
117 : : * The rcu_scheduler_active variable is initialized to the value
118 : : * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
119 : : * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
120 : : * RCU can assume that there is but one task, allowing RCU to (for example)
121 : : * optimize synchronize_rcu() to a simple barrier(). When this variable
122 : : * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
123 : : * to detect real grace periods. This variable is also used to suppress
124 : : * boot-time false positives from lockdep-RCU error checking. Finally, it
125 : : * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
126 : : * is fully initialized, including all of its kthreads having been spawned.
127 : : */
128 : : int rcu_scheduler_active __read_mostly;
129 : : EXPORT_SYMBOL_GPL(rcu_scheduler_active);
130 : :
131 : : /*
132 : : * The rcu_scheduler_fully_active variable transitions from zero to one
133 : : * during the early_initcall() processing, which is after the scheduler
134 : : * is capable of creating new tasks. So RCU processing (for example,
135 : : * creating tasks for RCU priority boosting) must be delayed until after
136 : : * rcu_scheduler_fully_active transitions from zero to one. We also
137 : : * currently delay invocation of any RCU callbacks until after this point.
138 : : *
139 : : * It might later prove better for people registering RCU callbacks during
140 : : * early boot to take responsibility for these callbacks, but one step at
141 : : * a time.
142 : : */
143 : : static int rcu_scheduler_fully_active __read_mostly;
144 : :
145 : : static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
146 : : unsigned long gps, unsigned long flags);
147 : : static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
148 : : static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
149 : : static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
150 : : static void invoke_rcu_core(void);
151 : : static void rcu_report_exp_rdp(struct rcu_data *rdp);
152 : : static void sync_sched_exp_online_cleanup(int cpu);
153 : :
154 : : /* rcuc/rcub kthread realtime priority */
155 : : static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
156 : : module_param(kthread_prio, int, 0444);
157 : :
158 : : /* Delay in jiffies for grace-period initialization delays, debug only. */
159 : :
160 : : static int gp_preinit_delay;
161 : : module_param(gp_preinit_delay, int, 0444);
162 : : static int gp_init_delay;
163 : : module_param(gp_init_delay, int, 0444);
164 : : static int gp_cleanup_delay;
165 : : module_param(gp_cleanup_delay, int, 0444);
166 : :
167 : : /* Retrieve RCU kthreads priority for rcutorture */
168 : 0 : int rcu_get_gp_kthreads_prio(void)
169 : : {
170 : 0 : return kthread_prio;
171 : : }
172 : : EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
173 : :
174 : : /*
175 : : * Number of grace periods between delays, normalized by the duration of
176 : : * the delay. The longer the delay, the more the grace periods between
177 : : * each delay. The reason for this normalization is that it means that,
178 : : * for non-zero delays, the overall slowdown of grace periods is constant
179 : : * regardless of the duration of the delay. This arrangement balances
180 : : * the need for long delays to increase some race probabilities with the
181 : : * need for fast grace periods to increase other race probabilities.
182 : : */
183 : : #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
184 : :
185 : : /*
186 : : * Compute the mask of online CPUs for the specified rcu_node structure.
187 : : * This will not be stable unless the rcu_node structure's ->lock is
188 : : * held, but the bit corresponding to the current CPU will be stable
189 : : * in most contexts.
190 : : */
191 : 0 : unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
192 : : {
193 : 0 : return READ_ONCE(rnp->qsmaskinitnext);
194 : : }
195 : :
196 : : /*
197 : : * Return true if an RCU grace period is in progress. The READ_ONCE()s
198 : : * permit this function to be invoked without holding the root rcu_node
199 : : * structure's ->lock, but of course results can be subject to change.
200 : : */
201 : : static int rcu_gp_in_progress(void)
202 : : {
203 : : return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
204 : : }
205 : :
206 : : /*
207 : : * Return the number of callbacks queued on the specified CPU.
208 : : * Handles both the nocbs and normal cases.
209 : : */
210 : : static long rcu_get_n_cbs_cpu(int cpu)
211 : : {
212 : 0 : struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
213 : :
214 [ # # # # : 0 : if (rcu_segcblist_is_enabled(&rdp->cblist))
# # ]
215 : : return rcu_segcblist_n_cbs(&rdp->cblist);
216 : : return 0;
217 : : }
218 : :
219 : 1811804 : void rcu_softirq_qs(void)
220 : : {
221 : 1811804 : rcu_qs();
222 : : rcu_preempt_deferred_qs(current);
223 : 1811764 : }
224 : :
225 : : /*
226 : : * Record entry into an extended quiescent state. This is only to be
227 : : * called when not already in an extended quiescent state.
228 : : */
229 : 129850005 : static void rcu_dynticks_eqs_enter(void)
230 : : {
231 : 259700010 : struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
232 : : int seq;
233 : :
234 : : /*
235 : : * CPUs seeing atomic_add_return() must see prior RCU read-side
236 : : * critical sections, and we also must force ordering with the
237 : : * next idle sojourn.
238 : : */
239 : 129850005 : seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
240 : : /* Better be in an extended quiescent state! */
241 : : WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
242 : : (seq & RCU_DYNTICK_CTRL_CTR));
243 : : /* Better not have special action (TLB flush) pending! */
244 : : WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
245 : : (seq & RCU_DYNTICK_CTRL_MASK));
246 : 129844488 : }
247 : :
248 : : /*
249 : : * Record exit from an extended quiescent state. This is only to be
250 : : * called from an extended quiescent state.
251 : : */
252 : 129638196 : static void rcu_dynticks_eqs_exit(void)
253 : : {
254 : 259276392 : struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
255 : : int seq;
256 : :
257 : : /*
258 : : * CPUs seeing atomic_add_return() must see prior idle sojourns,
259 : : * and we also must force ordering with the next RCU read-side
260 : : * critical section.
261 : : */
262 : 129638196 : seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
263 : : WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
264 : : !(seq & RCU_DYNTICK_CTRL_CTR));
265 [ - + ]: 129836507 : if (seq & RCU_DYNTICK_CTRL_MASK) {
266 : : atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
267 : 0 : smp_mb__after_atomic(); /* _exit after clearing mask. */
268 : : /* Prefer duplicate flushes to losing a flush. */
269 : : rcu_eqs_special_exit();
270 : : }
271 : 129836507 : }
272 : :
273 : : /*
274 : : * Reset the current CPU's ->dynticks counter to indicate that the
275 : : * newly onlined CPU is no longer in an extended quiescent state.
276 : : * This will either leave the counter unchanged, or increment it
277 : : * to the next non-quiescent value.
278 : : *
279 : : * The non-atomic test/increment sequence works because the upper bits
280 : : * of the ->dynticks counter are manipulated only by the corresponding CPU,
281 : : * or when the corresponding CPU is offline.
282 : : */
283 : 828 : static void rcu_dynticks_eqs_online(void)
284 : : {
285 : 1656 : struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
286 : :
287 [ - + ]: 828 : if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
288 : 828 : return;
289 : 0 : atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
290 : : }
291 : :
292 : : /*
293 : : * Is the current CPU in an extended quiescent state?
294 : : *
295 : : * No ordering, as we are sampling CPU-local information.
296 : : */
297 : 35180281 : bool rcu_dynticks_curr_cpu_in_eqs(void)
298 : : {
299 : 126345130 : struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
300 : :
301 : 63172565 : return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
302 : : }
303 : :
304 : : /*
305 : : * Snapshot the ->dynticks counter with full ordering so as to allow
306 : : * stable comparison of this counter with past and future snapshots.
307 : : */
308 : 483307 : int rcu_dynticks_snap(struct rcu_data *rdp)
309 : : {
310 : 483307 : int snap = atomic_add_return(0, &rdp->dynticks);
311 : :
312 : 483307 : return snap & ~RCU_DYNTICK_CTRL_MASK;
313 : : }
314 : :
315 : : /*
316 : : * Return true if the snapshot returned from rcu_dynticks_snap()
317 : : * indicates that RCU is in an extended quiescent state.
318 : : */
319 : : static bool rcu_dynticks_in_eqs(int snap)
320 : : {
321 : 426404 : return !(snap & RCU_DYNTICK_CTRL_CTR);
322 : : }
323 : :
324 : : /*
325 : : * Return true if the CPU corresponding to the specified rcu_data
326 : : * structure has spent some time in an extended quiescent state since
327 : : * rcu_dynticks_snap() returned the specified snapshot.
328 : : */
329 : : static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
330 : : {
331 : 56901 : return snap != rcu_dynticks_snap(rdp);
332 : : }
333 : :
334 : : /*
335 : : * Set the special (bottom) bit of the specified CPU so that it
336 : : * will take special action (such as flushing its TLB) on the
337 : : * next exit from an extended quiescent state. Returns true if
338 : : * the bit was successfully set, or false if the CPU was not in
339 : : * an extended quiescent state.
340 : : */
341 : 0 : bool rcu_eqs_special_set(int cpu)
342 : : {
343 : : int old;
344 : : int new;
345 : 0 : struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
346 : :
347 : : do {
348 : 0 : old = atomic_read(&rdp->dynticks);
349 [ # # ]: 0 : if (old & RCU_DYNTICK_CTRL_CTR)
350 : : return false;
351 : 0 : new = old | RCU_DYNTICK_CTRL_MASK;
352 [ # # ]: 0 : } while (atomic_cmpxchg(&rdp->dynticks, old, new) != old);
353 : : return true;
354 : : }
355 : :
356 : : /*
357 : : * Let the RCU core know that this CPU has gone through the scheduler,
358 : : * which is a quiescent state. This is called when the need for a
359 : : * quiescent state is urgent, so we burn an atomic operation and full
360 : : * memory barriers to let the RCU core know about it, regardless of what
361 : : * this CPU might (or might not) do in the near future.
362 : : *
363 : : * We inform the RCU core by emulating a zero-duration dyntick-idle period.
364 : : *
365 : : * The caller must have disabled interrupts and must not be idle.
366 : : */
367 : 197 : static void __maybe_unused rcu_momentary_dyntick_idle(void)
368 : : {
369 : : int special;
370 : :
371 : 394 : raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
372 : 197 : special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
373 : 394 : &this_cpu_ptr(&rcu_data)->dynticks);
374 : : /* It is illegal to call this from idle state. */
375 [ - + # # ]: 197 : WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
376 : : rcu_preempt_deferred_qs(current);
377 : 197 : }
378 : :
379 : : /**
380 : : * rcu_is_cpu_rrupt_from_idle - see if interrupted from idle
381 : : *
382 : : * If the current CPU is idle and running at a first-level (not nested)
383 : : * interrupt from idle, return true. The caller must have at least
384 : : * disabled preemption.
385 : : */
386 : : static int rcu_is_cpu_rrupt_from_idle(void)
387 : : {
388 : : /* Called only from within the scheduling-clock interrupt */
389 : : lockdep_assert_in_irq();
390 : :
391 : : /* Check for counter underflows */
392 : : RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
393 : : "RCU dynticks_nesting counter underflow!");
394 : : RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
395 : : "RCU dynticks_nmi_nesting counter underflow/zero!");
396 : :
397 : : /* Are we at first interrupt nesting level? */
398 [ + + + + : 4111652 : if (__this_cpu_read(rcu_data.dynticks_nmi_nesting) != 1)
- + ]
399 : : return false;
400 : :
401 : : /* Does CPU appear to be idle from an RCU standpoint? */
402 : 2379242 : return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
403 : : }
404 : :
405 : : #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch ... */
406 : : #define DEFAULT_MAX_RCU_BLIMIT 10000 /* ... even during callback flood. */
407 : : static long blimit = DEFAULT_RCU_BLIMIT;
408 : : #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
409 : : static long qhimark = DEFAULT_RCU_QHIMARK;
410 : : #define DEFAULT_RCU_QLOMARK 100 /* Once only this many pending, use blimit. */
411 : : static long qlowmark = DEFAULT_RCU_QLOMARK;
412 : :
413 : : module_param(blimit, long, 0444);
414 : : module_param(qhimark, long, 0444);
415 : : module_param(qlowmark, long, 0444);
416 : :
417 : : static ulong jiffies_till_first_fqs = ULONG_MAX;
418 : : static ulong jiffies_till_next_fqs = ULONG_MAX;
419 : : static bool rcu_kick_kthreads;
420 : : static int rcu_divisor = 7;
421 : : module_param(rcu_divisor, int, 0644);
422 : :
423 : : /* Force an exit from rcu_do_batch() after 3 milliseconds. */
424 : : static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
425 : : module_param(rcu_resched_ns, long, 0644);
426 : :
427 : : /*
428 : : * How long the grace period must be before we start recruiting
429 : : * quiescent-state help from rcu_note_context_switch().
430 : : */
431 : : static ulong jiffies_till_sched_qs = ULONG_MAX;
432 : : module_param(jiffies_till_sched_qs, ulong, 0444);
433 : : static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
434 : : module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
435 : :
436 : : /*
437 : : * Make sure that we give the grace-period kthread time to detect any
438 : : * idle CPUs before taking active measures to force quiescent states.
439 : : * However, don't go below 100 milliseconds, adjusted upwards for really
440 : : * large systems.
441 : : */
442 : 207 : static void adjust_jiffies_till_sched_qs(void)
443 : : {
444 : : unsigned long j;
445 : :
446 : : /* If jiffies_till_sched_qs was specified, respect the request. */
447 [ - + ]: 207 : if (jiffies_till_sched_qs != ULONG_MAX) {
448 : : WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
449 : 207 : return;
450 : : }
451 : : /* Otherwise, set to third fqs scan, but bound below on large system. */
452 : 207 : j = READ_ONCE(jiffies_till_first_fqs) +
453 : 207 : 2 * READ_ONCE(jiffies_till_next_fqs);
454 [ + - ]: 207 : if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
455 : : j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
456 : 207 : pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
457 : : WRITE_ONCE(jiffies_to_sched_qs, j);
458 : : }
459 : :
460 : 0 : static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
461 : : {
462 : : ulong j;
463 : : int ret = kstrtoul(val, 0, &j);
464 : :
465 [ # # ]: 0 : if (!ret) {
466 : 0 : WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
467 : 0 : adjust_jiffies_till_sched_qs();
468 : : }
469 : 0 : return ret;
470 : : }
471 : :
472 : 0 : static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
473 : : {
474 : : ulong j;
475 : : int ret = kstrtoul(val, 0, &j);
476 : :
477 [ # # ]: 0 : if (!ret) {
478 [ # # # # ]: 0 : WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
479 : 0 : adjust_jiffies_till_sched_qs();
480 : : }
481 : 0 : return ret;
482 : : }
483 : :
484 : : static struct kernel_param_ops first_fqs_jiffies_ops = {
485 : : .set = param_set_first_fqs_jiffies,
486 : : .get = param_get_ulong,
487 : : };
488 : :
489 : : static struct kernel_param_ops next_fqs_jiffies_ops = {
490 : : .set = param_set_next_fqs_jiffies,
491 : : .get = param_get_ulong,
492 : : };
493 : :
494 : : module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
495 : : module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
496 : : module_param(rcu_kick_kthreads, bool, 0644);
497 : :
498 : : static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
499 : : static int rcu_pending(void);
500 : :
501 : : /*
502 : : * Return the number of RCU GPs completed thus far for debug & stats.
503 : : */
504 : 0 : unsigned long rcu_get_gp_seq(void)
505 : : {
506 : 0 : return READ_ONCE(rcu_state.gp_seq);
507 : : }
508 : : EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
509 : :
510 : : /*
511 : : * Return the number of RCU expedited batches completed thus far for
512 : : * debug & stats. Odd numbers mean that a batch is in progress, even
513 : : * numbers mean idle. The value returned will thus be roughly double
514 : : * the cumulative batches since boot.
515 : : */
516 : 0 : unsigned long rcu_exp_batches_completed(void)
517 : : {
518 : 0 : return rcu_state.expedited_sequence;
519 : : }
520 : : EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
521 : :
522 : : /*
523 : : * Return the root node of the rcu_state structure.
524 : : */
525 : : static struct rcu_node *rcu_get_root(void)
526 : : {
527 : : return &rcu_state.node[0];
528 : : }
529 : :
530 : : /*
531 : : * Convert a ->gp_state value to a character string.
532 : : */
533 : : static const char *gp_state_getname(short gs)
534 : : {
535 [ # # # # ]: 0 : if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
536 : : return "???";
537 : 0 : return gp_state_names[gs];
538 : : }
539 : :
540 : : /*
541 : : * Send along grace-period-related data for rcutorture diagnostics.
542 : : */
543 : 0 : void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
544 : : unsigned long *gp_seq)
545 : : {
546 [ # # ]: 0 : switch (test_type) {
547 : : case RCU_FLAVOR:
548 : 0 : *flags = READ_ONCE(rcu_state.gp_flags);
549 : 0 : *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
550 : 0 : break;
551 : : default:
552 : : break;
553 : : }
554 : 0 : }
555 : : EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
556 : :
557 : : /*
558 : : * Enter an RCU extended quiescent state, which can be either the
559 : : * idle loop or adaptive-tickless usermode execution.
560 : : *
561 : : * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
562 : : * the possibility of usermode upcalls having messed up our count
563 : : * of interrupt nesting level during the prior busy period.
564 : : */
565 : 118244465 : static void rcu_eqs_enter(bool user)
566 : : {
567 : 236488930 : struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
568 : :
569 [ - + # # ]: 118244465 : WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
570 : : WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
571 : : WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
572 : : rdp->dynticks_nesting == 0);
573 [ - + ]: 118271279 : if (rdp->dynticks_nesting != 1) {
574 : 0 : rdp->dynticks_nesting--;
575 : 118286459 : return;
576 : : }
577 : :
578 : : lockdep_assert_irqs_disabled();
579 : : trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
580 : : WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
581 : 236542558 : rdp = this_cpu_ptr(&rcu_data);
582 : : do_nocb_deferred_wakeup(rdp);
583 : : rcu_prepare_for_idle();
584 : : rcu_preempt_deferred_qs(current);
585 : : WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
586 : 118271279 : rcu_dynticks_eqs_enter();
587 : : rcu_dynticks_task_enter();
588 : : }
589 : :
590 : : /**
591 : : * rcu_idle_enter - inform RCU that current CPU is entering idle
592 : : *
593 : : * Enter idle mode, in other words, -leave- the mode in which RCU
594 : : * read-side critical sections can occur. (Though RCU read-side
595 : : * critical sections can occur in irq handlers in idle, a possibility
596 : : * handled by irq_enter() and irq_exit().)
597 : : *
598 : : * If you add or remove a call to rcu_idle_enter(), be sure to test with
599 : : * CONFIG_RCU_EQS_DEBUG=y.
600 : : */
601 : 118245080 : void rcu_idle_enter(void)
602 : : {
603 : : lockdep_assert_irqs_disabled();
604 : 118245080 : rcu_eqs_enter(false);
605 : 118288797 : }
606 : :
607 : : #ifdef CONFIG_NO_HZ_FULL
608 : : /**
609 : : * rcu_user_enter - inform RCU that we are resuming userspace.
610 : : *
611 : : * Enter RCU idle mode right before resuming userspace. No use of RCU
612 : : * is permitted between this call and rcu_user_exit(). This way the
613 : : * CPU doesn't need to maintain the tick for RCU maintenance purposes
614 : : * when the CPU runs in userspace.
615 : : *
616 : : * If you add or remove a call to rcu_user_enter(), be sure to test with
617 : : * CONFIG_RCU_EQS_DEBUG=y.
618 : : */
619 : : void rcu_user_enter(void)
620 : : {
621 : : lockdep_assert_irqs_disabled();
622 : : rcu_eqs_enter(true);
623 : : }
624 : : #endif /* CONFIG_NO_HZ_FULL */
625 : :
626 : : /*
627 : : * If we are returning from the outermost NMI handler that interrupted an
628 : : * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
629 : : * to let the RCU grace-period handling know that the CPU is back to
630 : : * being RCU-idle.
631 : : *
632 : : * If you add or remove a call to rcu_nmi_exit_common(), be sure to test
633 : : * with CONFIG_RCU_EQS_DEBUG=y.
634 : : */
635 : : static __always_inline void rcu_nmi_exit_common(bool irq)
636 : : {
637 : 17630683 : struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
638 : :
639 : : /*
640 : : * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
641 : : * (We are exiting an NMI handler, so RCU better be paying attention
642 : : * to us!)
643 : : */
644 [ - + # # : 17671539 : WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
# # # # ]
645 [ - + # # : 17671539 : WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
# # # # ]
646 : :
647 : : /*
648 : : * If the nesting level is not 1, the CPU wasn't RCU-idle, so
649 : : * leave it in non-RCU-idle state.
650 : : */
651 [ + + # # ]: 17647411 : if (rdp->dynticks_nmi_nesting != 1) {
652 : 6094739 : trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
653 : : atomic_read(&rdp->dynticks));
654 : : WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
655 : : rdp->dynticks_nmi_nesting - 2);
656 : : return;
657 : : }
658 : :
659 : : /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
660 : : trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
661 : : WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
662 : :
663 : : if (irq)
664 : 11552672 : rcu_prepare_for_idle();
665 : :
666 : 11563581 : rcu_dynticks_eqs_enter();
667 : :
668 : : if (irq)
669 : 11574533 : rcu_dynticks_task_enter();
670 : : }
671 : :
672 : : /**
673 : : * rcu_nmi_exit - inform RCU of exit from NMI context
674 : : *
675 : : * If you add or remove a call to rcu_nmi_exit(), be sure to test
676 : : * with CONFIG_RCU_EQS_DEBUG=y.
677 : : */
678 : 0 : void rcu_nmi_exit(void)
679 : : {
680 : : rcu_nmi_exit_common(false);
681 : 0 : }
682 : :
683 : : /**
684 : : * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
685 : : *
686 : : * Exit from an interrupt handler, which might possibly result in entering
687 : : * idle mode, in other words, leaving the mode in which read-side critical
688 : : * sections can occur. The caller must have disabled interrupts.
689 : : *
690 : : * This code assumes that the idle loop never does anything that might
691 : : * result in unbalanced calls to irq_enter() and irq_exit(). If your
692 : : * architecture's idle loop violates this assumption, RCU will give you what
693 : : * you deserve, good and hard. But very infrequently and irreproducibly.
694 : : *
695 : : * Use things like work queues to work around this limitation.
696 : : *
697 : : * You have been warned.
698 : : *
699 : : * If you add or remove a call to rcu_irq_exit(), be sure to test with
700 : : * CONFIG_RCU_EQS_DEBUG=y.
701 : : */
702 : 17630683 : void rcu_irq_exit(void)
703 : : {
704 : : lockdep_assert_irqs_disabled();
705 : : rcu_nmi_exit_common(true);
706 : 17669830 : }
707 : :
708 : : /*
709 : : * Wrapper for rcu_irq_exit() where interrupts are enabled.
710 : : *
711 : : * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
712 : : * with CONFIG_RCU_EQS_DEBUG=y.
713 : : */
714 : 0 : void rcu_irq_exit_irqson(void)
715 : : {
716 : : unsigned long flags;
717 : :
718 : 0 : local_irq_save(flags);
719 : 0 : rcu_irq_exit();
720 [ # # ]: 0 : local_irq_restore(flags);
721 : 0 : }
722 : :
723 : : /*
724 : : * Exit an RCU extended quiescent state, which can be either the
725 : : * idle loop or adaptive-tickless usermode execution.
726 : : *
727 : : * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
728 : : * allow for the possibility of usermode upcalls messing up our count of
729 : : * interrupt nesting level during the busy period that is just now starting.
730 : : */
731 : 118279865 : static void rcu_eqs_exit(bool user)
732 : : {
733 : : struct rcu_data *rdp;
734 : : long oldval;
735 : :
736 : : lockdep_assert_irqs_disabled();
737 : 236559730 : rdp = this_cpu_ptr(&rcu_data);
738 : 118279865 : oldval = rdp->dynticks_nesting;
739 : : WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
740 [ - + ]: 118279865 : if (oldval) {
741 : 0 : rdp->dynticks_nesting++;
742 : 118276286 : return;
743 : : }
744 : : rcu_dynticks_task_exit();
745 : 118279865 : rcu_dynticks_eqs_exit();
746 : : rcu_cleanup_after_idle();
747 : : trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
748 : : WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
749 : : WRITE_ONCE(rdp->dynticks_nesting, 1);
750 [ - + # # ]: 118284963 : WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
751 : : WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
752 : : }
753 : :
754 : : /**
755 : : * rcu_idle_exit - inform RCU that current CPU is leaving idle
756 : : *
757 : : * Exit idle mode, in other words, -enter- the mode in which RCU
758 : : * read-side critical sections can occur.
759 : : *
760 : : * If you add or remove a call to rcu_idle_exit(), be sure to test with
761 : : * CONFIG_RCU_EQS_DEBUG=y.
762 : : */
763 : 118282888 : void rcu_idle_exit(void)
764 : : {
765 : : unsigned long flags;
766 : :
767 : 118281582 : local_irq_save(flags);
768 : 118291363 : rcu_eqs_exit(false);
769 [ - + ]: 118288507 : local_irq_restore(flags);
770 : 118291762 : }
771 : :
772 : : #ifdef CONFIG_NO_HZ_FULL
773 : : /**
774 : : * rcu_user_exit - inform RCU that we are exiting userspace.
775 : : *
776 : : * Exit RCU idle mode while entering the kernel because it can
777 : : * run a RCU read side critical section anytime.
778 : : *
779 : : * If you add or remove a call to rcu_user_exit(), be sure to test with
780 : : * CONFIG_RCU_EQS_DEBUG=y.
781 : : */
782 : : void rcu_user_exit(void)
783 : : {
784 : : rcu_eqs_exit(1);
785 : : }
786 : : #endif /* CONFIG_NO_HZ_FULL */
787 : :
788 : : /**
789 : : * rcu_nmi_enter_common - inform RCU of entry to NMI context
790 : : * @irq: Is this call from rcu_irq_enter?
791 : : *
792 : : * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
793 : : * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
794 : : * that the CPU is active. This implementation permits nested NMIs, as
795 : : * long as the nesting level does not overflow an int. (You will probably
796 : : * run out of stack space first.)
797 : : *
798 : : * If you add or remove a call to rcu_nmi_enter_common(), be sure to test
799 : : * with CONFIG_RCU_EQS_DEBUG=y.
800 : : */
801 : : static __always_inline void rcu_nmi_enter_common(bool irq)
802 : : {
803 : 17344993 : struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
804 : : long incby = 2;
805 : :
806 : : /* Complain about underflow. */
807 [ - + # # : 17414737 : WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
# # # # ]
808 : :
809 : : /*
810 : : * If idle from RCU viewpoint, atomically increment ->dynticks
811 : : * to mark non-idle and increment ->dynticks_nmi_nesting by one.
812 : : * Otherwise, increment ->dynticks_nmi_nesting by two. This means
813 : : * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
814 : : * to be in the outermost NMI handler that interrupted an RCU-idle
815 : : * period (observation due to Andy Lutomirski).
816 : : */
817 [ + + # # ]: 17414737 : if (rcu_dynticks_curr_cpu_in_eqs()) {
818 : :
819 : : if (irq)
820 : 11554820 : rcu_dynticks_task_exit();
821 : :
822 : 11557004 : rcu_dynticks_eqs_exit();
823 : :
824 : : if (irq)
825 : 11567113 : rcu_cleanup_after_idle();
826 : :
827 : : incby = 1;
828 : : }
829 : 17486815 : trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
830 : : rdp->dynticks_nmi_nesting,
831 : 17486815 : rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
832 : : WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
833 : : rdp->dynticks_nmi_nesting + incby);
834 : 17486815 : barrier();
835 : : }
836 : :
837 : : /**
838 : : * rcu_nmi_enter - inform RCU of entry to NMI context
839 : : */
840 : 0 : void rcu_nmi_enter(void)
841 : : {
842 : : rcu_nmi_enter_common(false);
843 : 0 : }
844 : : NOKPROBE_SYMBOL(rcu_nmi_enter);
845 : :
846 : : /**
847 : : * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
848 : : *
849 : : * Enter an interrupt handler, which might possibly result in exiting
850 : : * idle mode, in other words, entering the mode in which read-side critical
851 : : * sections can occur. The caller must have disabled interrupts.
852 : : *
853 : : * Note that the Linux kernel is fully capable of entering an interrupt
854 : : * handler that it never exits, for example when doing upcalls to user mode!
855 : : * This code assumes that the idle loop never does upcalls to user mode.
856 : : * If your architecture's idle loop does do upcalls to user mode (or does
857 : : * anything else that results in unbalanced calls to the irq_enter() and
858 : : * irq_exit() functions), RCU will give you what you deserve, good and hard.
859 : : * But very infrequently and irreproducibly.
860 : : *
861 : : * Use things like work queues to work around this limitation.
862 : : *
863 : : * You have been warned.
864 : : *
865 : : * If you add or remove a call to rcu_irq_enter(), be sure to test with
866 : : * CONFIG_RCU_EQS_DEBUG=y.
867 : : */
868 : 17344993 : void rcu_irq_enter(void)
869 : : {
870 : : lockdep_assert_irqs_disabled();
871 : : rcu_nmi_enter_common(true);
872 : 17484978 : }
873 : :
874 : : /*
875 : : * Wrapper for rcu_irq_enter() where interrupts are enabled.
876 : : *
877 : : * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
878 : : * with CONFIG_RCU_EQS_DEBUG=y.
879 : : */
880 : 0 : void rcu_irq_enter_irqson(void)
881 : : {
882 : : unsigned long flags;
883 : :
884 : 0 : local_irq_save(flags);
885 : 0 : rcu_irq_enter();
886 [ # # ]: 0 : local_irq_restore(flags);
887 : 0 : }
888 : :
889 : : /**
890 : : * rcu_is_watching - see if RCU thinks that the current CPU is not idle
891 : : *
892 : : * Return true if RCU is watching the running CPU, which means that this
893 : : * CPU can safely enter RCU read-side critical sections. In other words,
894 : : * if the current CPU is not in its idle loop or is in an interrupt or
895 : : * NMI handler, return true.
896 : : */
897 : 2070 : bool notrace rcu_is_watching(void)
898 : : {
899 : : bool ret;
900 : :
901 : 27992428 : preempt_disable_notrace();
902 : 2070 : ret = !rcu_dynticks_curr_cpu_in_eqs();
903 : 27992284 : preempt_enable_notrace();
904 : 2070 : return ret;
905 : : }
906 : : EXPORT_SYMBOL_GPL(rcu_is_watching);
907 : :
908 : : /*
909 : : * If a holdout task is actually running, request an urgent quiescent
910 : : * state from its CPU. This is unsynchronized, so migrations can cause
911 : : * the request to go to the wrong CPU. Which is OK, all that will happen
912 : : * is that the CPU's next context switch will be a bit slower and next
913 : : * time around this task will generate another request.
914 : : */
915 : 0 : void rcu_request_urgent_qs_task(struct task_struct *t)
916 : : {
917 : : int cpu;
918 : :
919 : 0 : barrier();
920 : 0 : cpu = task_cpu(t);
921 [ # # ]: 0 : if (!task_curr(t))
922 : 0 : return; /* This task is not running on that CPU. */
923 : 0 : smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
924 : : }
925 : :
926 : : #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
927 : :
928 : : /*
929 : : * Is the current CPU online as far as RCU is concerned?
930 : : *
931 : : * Disable preemption to avoid false positives that could otherwise
932 : : * happen due to the current CPU number being sampled, this task being
933 : : * preempted, its old CPU being taken offline, resuming on some other CPU,
934 : : * then determining that its old CPU is now offline.
935 : : *
936 : : * Disable checking if in an NMI handler because we cannot safely
937 : : * report errors from NMI handlers anyway. In addition, it is OK to use
938 : : * RCU on an offline processor during initial boot, hence the check for
939 : : * rcu_scheduler_fully_active.
940 : : */
941 : : bool rcu_lockdep_current_cpu_online(void)
942 : : {
943 : : struct rcu_data *rdp;
944 : : struct rcu_node *rnp;
945 : : bool ret = false;
946 : :
947 : : if (in_nmi() || !rcu_scheduler_fully_active)
948 : : return true;
949 : : preempt_disable();
950 : : rdp = this_cpu_ptr(&rcu_data);
951 : : rnp = rdp->mynode;
952 : : if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
953 : : ret = true;
954 : : preempt_enable();
955 : : return ret;
956 : : }
957 : : EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
958 : :
959 : : #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
960 : :
961 : : /*
962 : : * We are reporting a quiescent state on behalf of some other CPU, so
963 : : * it is our responsibility to check for and handle potential overflow
964 : : * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
965 : : * After all, the CPU might be in deep idle state, and thus executing no
966 : : * code whatsoever.
967 : : */
968 : : static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
969 : : {
970 : : raw_lockdep_assert_held_rcu_node(rnp);
971 [ - + - + : 1491665 : if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
- + - + ]
972 : : rnp->gp_seq))
973 : : WRITE_ONCE(rdp->gpwrap, true);
974 [ - + - + : 1491665 : if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
- + - + ]
975 : 0 : rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
976 : : }
977 : :
978 : : /*
979 : : * Snapshot the specified CPU's dynticks counter so that we can later
980 : : * credit them with an implicit quiescent state. Return 1 if this CPU
981 : : * is in dynticks idle mode, which is an extended quiescent state.
982 : : */
983 : 416228 : static int dyntick_save_progress_counter(struct rcu_data *rdp)
984 : : {
985 : 416228 : rdp->dynticks_snap = rcu_dynticks_snap(rdp);
986 [ + + ]: 416228 : if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
987 : : trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
988 : 191231 : rcu_gpnum_ovf(rdp->mynode, rdp);
989 : : return 1;
990 : : }
991 : : return 0;
992 : : }
993 : :
994 : : /*
995 : : * Return true if the specified CPU has passed through a quiescent
996 : : * state by virtue of being in or having passed through an dynticks
997 : : * idle state since the last call to dyntick_save_progress_counter()
998 : : * for this same CPU, or by virtue of having been offline.
999 : : */
1000 : 54388 : static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1001 : : {
1002 : : unsigned long jtsq;
1003 : : bool *rnhqp;
1004 : : bool *ruqp;
1005 : 54388 : struct rcu_node *rnp = rdp->mynode;
1006 : :
1007 : : /*
1008 : : * If the CPU passed through or entered a dynticks idle phase with
1009 : : * no active irq/NMI handlers, then we can safely pretend that the CPU
1010 : : * already acknowledged the request to pass through a quiescent
1011 : : * state. Either way, that CPU cannot possibly be in an RCU
1012 : : * read-side critical section that started before the beginning
1013 : : * of the current RCU grace period.
1014 : : */
1015 [ + + ]: 108776 : if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1016 : : trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1017 : : rcu_gpnum_ovf(rnp, rdp);
1018 : : return 1;
1019 : : }
1020 : :
1021 : : /* If waiting too long on an offline CPU, complain. */
1022 [ - + ]: 77034 : if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
1023 [ # # ]: 0 : time_after(jiffies, rcu_state.gp_start + HZ)) {
1024 : : bool onl;
1025 : : struct rcu_node *rnp1;
1026 : :
1027 : 0 : WARN_ON(1); /* Offline CPUs are supposed to report QS! */
1028 : 0 : pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1029 : : __func__, rnp->grplo, rnp->grphi, rnp->level,
1030 : : (long)rnp->gp_seq, (long)rnp->completedqs);
1031 [ # # ]: 0 : for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1032 : 0 : pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1033 : : __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1034 : 0 : onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1035 : 0 : pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1036 : : __func__, rdp->cpu, ".o"[onl],
1037 : : (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1038 : : (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1039 : 0 : return 1; /* Break things loose after complaining. */
1040 : : }
1041 : :
1042 : : /*
1043 : : * A CPU running for an extended time within the kernel can
1044 : : * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1045 : : * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1046 : : * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
1047 : : * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1048 : : * variable are safe because the assignments are repeated if this
1049 : : * CPU failed to pass through a quiescent state. This code
1050 : : * also checks .jiffies_resched in case jiffies_to_sched_qs
1051 : : * is set way high.
1052 : : */
1053 : : jtsq = READ_ONCE(jiffies_to_sched_qs);
1054 : 38517 : ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1055 : 38517 : rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
1056 [ + + ]: 38517 : if (!READ_ONCE(*rnhqp) &&
1057 [ + + ]: 18213 : (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1058 [ - + ]: 18012 : time_after(jiffies, rcu_state.jiffies_resched))) {
1059 : : WRITE_ONCE(*rnhqp, true);
1060 : : /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1061 : 402 : smp_store_release(ruqp, true);
1062 [ + + ]: 38316 : } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1063 : : WRITE_ONCE(*ruqp, true);
1064 : : }
1065 : :
1066 : : /*
1067 : : * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1068 : : * The above code handles this, but only for straight cond_resched().
1069 : : * And some in-kernel loops check need_resched() before calling
1070 : : * cond_resched(), which defeats the above code for CPUs that are
1071 : : * running in-kernel with scheduling-clock interrupts disabled.
1072 : : * So hit them over the head with the resched_cpu() hammer!
1073 : : */
1074 : : if (tick_nohz_full_cpu(rdp->cpu) &&
1075 : : time_after(jiffies,
1076 : : READ_ONCE(rdp->last_fqs_resched) + jtsq * 3)) {
1077 : : resched_cpu(rdp->cpu);
1078 : : WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1079 : : }
1080 : :
1081 : : /*
1082 : : * If more than halfway to RCU CPU stall-warning time, invoke
1083 : : * resched_cpu() more frequently to try to loosen things up a bit.
1084 : : * Also check to see if the CPU is getting hammered with interrupts,
1085 : : * but only once per grace period, just to keep the IPIs down to
1086 : : * a dull roar.
1087 : : */
1088 [ - + ]: 38517 : if (time_after(jiffies, rcu_state.jiffies_resched)) {
1089 [ # # ]: 0 : if (time_after(jiffies,
1090 : : READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1091 : 0 : resched_cpu(rdp->cpu);
1092 : 0 : WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1093 : : }
1094 [ # # ]: 0 : if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1095 [ # # # # ]: 0 : !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1096 : 0 : (rnp->ffmask & rdp->grpmask)) {
1097 : : init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
1098 : 0 : rdp->rcu_iw_pending = true;
1099 : 0 : rdp->rcu_iw_gp_seq = rnp->gp_seq;
1100 : 0 : irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1101 : : }
1102 : : }
1103 : :
1104 : : return 0;
1105 : : }
1106 : :
1107 : : /* Trace-event wrapper function for trace_rcu_future_grace_period. */
1108 : : static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1109 : : unsigned long gp_seq_req, const char *s)
1110 : : {
1111 : : trace_rcu_future_grace_period(rcu_state.name, rnp->gp_seq, gp_seq_req,
1112 : : rnp->level, rnp->grplo, rnp->grphi, s);
1113 : : }
1114 : :
1115 : : /*
1116 : : * rcu_start_this_gp - Request the start of a particular grace period
1117 : : * @rnp_start: The leaf node of the CPU from which to start.
1118 : : * @rdp: The rcu_data corresponding to the CPU from which to start.
1119 : : * @gp_seq_req: The gp_seq of the grace period to start.
1120 : : *
1121 : : * Start the specified grace period, as needed to handle newly arrived
1122 : : * callbacks. The required future grace periods are recorded in each
1123 : : * rcu_node structure's ->gp_seq_needed field. Returns true if there
1124 : : * is reason to awaken the grace-period kthread.
1125 : : *
1126 : : * The caller must hold the specified rcu_node structure's ->lock, which
1127 : : * is why the caller is responsible for waking the grace-period kthread.
1128 : : *
1129 : : * Returns true if the GP thread needs to be awakened else false.
1130 : : */
1131 : 1856544 : static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1132 : : unsigned long gp_seq_req)
1133 : : {
1134 : : bool ret = false;
1135 : : struct rcu_node *rnp;
1136 : :
1137 : : /*
1138 : : * Use funnel locking to either acquire the root rcu_node
1139 : : * structure's lock or bail out if the need for this grace period
1140 : : * has already been recorded -- or if that grace period has in
1141 : : * fact already started. If there is already a grace period in
1142 : : * progress in a non-leaf node, no recording is needed because the
1143 : : * end of the grace period will scan the leaf rcu_node structures.
1144 : : * Note that rnp_start->lock must not be released.
1145 : : */
1146 : : raw_lockdep_assert_held_rcu_node(rnp_start);
1147 : : trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1148 : : for (rnp = rnp_start; 1; rnp = rnp->parent) {
1149 [ - + ]: 1856544 : if (rnp != rnp_start)
1150 : 0 : raw_spin_lock_rcu_node(rnp);
1151 [ + + + - ]: 2136955 : if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1152 [ - + ]: 280411 : rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1153 [ # # ]: 0 : (rnp != rnp_start &&
1154 : : rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1155 : : trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1156 : : TPS("Prestarted"));
1157 : : goto unlock_out;
1158 : : }
1159 : 280411 : rnp->gp_seq_needed = gp_seq_req;
1160 [ + + ]: 280411 : if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1161 : : /*
1162 : : * We just marked the leaf or internal node, and a
1163 : : * grace period is in progress, which means that
1164 : : * rcu_gp_cleanup() will see the marking. Bail to
1165 : : * reduce contention.
1166 : : */
1167 : : trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1168 : : TPS("Startedleaf"));
1169 : : goto unlock_out;
1170 : : }
1171 [ - + # # ]: 12150 : if (rnp != rnp_start && rnp->parent != NULL)
1172 : : raw_spin_unlock_rcu_node(rnp);
1173 [ - + ]: 12150 : if (!rnp->parent)
1174 : : break; /* At root, and perhaps also leaf. */
1175 : : }
1176 : :
1177 : : /* If GP already in progress, just leave, otherwise start one. */
1178 [ + + ]: 12150 : if (rcu_gp_in_progress()) {
1179 : : trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1180 : : goto unlock_out;
1181 : : }
1182 : : trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1183 : 10359 : WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1184 : 10359 : rcu_state.gp_req_activity = jiffies;
1185 [ + + ]: 10359 : if (!rcu_state.gp_kthread) {
1186 : : trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1187 : : goto unlock_out;
1188 : : }
1189 : : trace_rcu_grace_period(rcu_state.name, READ_ONCE(rcu_state.gp_seq), TPS("newreq"));
1190 : : ret = true; /* Caller must wake GP kthread. */
1191 : : unlock_out:
1192 : : /* Push furthest requested GP to leaf node and rcu_data structure. */
1193 [ - + ]: 1856544 : if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1194 : 0 : rnp_start->gp_seq_needed = rnp->gp_seq_needed;
1195 : 0 : rdp->gp_seq_needed = rnp->gp_seq_needed;
1196 : : }
1197 [ - + ]: 1856544 : if (rnp != rnp_start)
1198 : : raw_spin_unlock_rcu_node(rnp);
1199 : 1856544 : return ret;
1200 : : }
1201 : :
1202 : : /*
1203 : : * Clean up any old requests for the just-ended grace period. Also return
1204 : : * whether any additional grace periods have been requested.
1205 : : */
1206 : : static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1207 : : {
1208 : : bool needmore;
1209 : 280007 : struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1210 : :
1211 : 280007 : needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1212 [ + + ]: 280007 : if (!needmore)
1213 : 10152 : rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1214 : : trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1215 : : needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1216 : : return needmore;
1217 : : }
1218 : :
1219 : : /*
1220 : : * Awaken the grace-period kthread. Don't do a self-awaken (unless in
1221 : : * an interrupt or softirq handler), and don't bother awakening when there
1222 : : * is nothing for the grace-period kthread to do (as in several CPUs raced
1223 : : * to awaken, and we lost), and finally don't try to awaken a kthread that
1224 : : * has not yet been created. If all those checks are passed, track some
1225 : : * debug information and awaken.
1226 : : *
1227 : : * So why do the self-wakeup when in an interrupt or softirq handler
1228 : : * in the grace-period kthread's context? Because the kthread might have
1229 : : * been interrupted just as it was going to sleep, and just after the final
1230 : : * pre-sleep check of the awaken condition. In this case, a wakeup really
1231 : : * is required, and is therefore supplied.
1232 : : */
1233 : 290159 : static void rcu_gp_kthread_wake(void)
1234 : : {
1235 [ + + + - ]: 385284 : if ((current == rcu_state.gp_kthread &&
1236 [ + + + + ]: 385286 : !in_irq() && !in_serving_softirq()) ||
1237 [ + - ]: 390037 : !READ_ONCE(rcu_state.gp_flags) ||
1238 : : !rcu_state.gp_kthread)
1239 : 290159 : return;
1240 : 195001 : WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1241 : : WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1242 : 195001 : swake_up_one(&rcu_state.gp_wq);
1243 : : }
1244 : :
1245 : : /*
1246 : : * If there is room, assign a ->gp_seq number to any callbacks on this
1247 : : * CPU that have not already been assigned. Also accelerate any callbacks
1248 : : * that were previously assigned a ->gp_seq number that has since proven
1249 : : * to be too conservative, which can happen if callbacks get assigned a
1250 : : * ->gp_seq number while RCU is idle, but with reference to a non-root
1251 : : * rcu_node structure. This function is idempotent, so it does not hurt
1252 : : * to call it repeatedly. Returns an flag saying that we should awaken
1253 : : * the RCU grace-period kthread.
1254 : : *
1255 : : * The caller must hold rnp->lock with interrupts disabled.
1256 : : */
1257 : 2337683 : static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1258 : : {
1259 : : unsigned long gp_seq_req;
1260 : : bool ret = false;
1261 : :
1262 : : rcu_lockdep_assert_cblist_protected(rdp);
1263 : : raw_lockdep_assert_held_rcu_node(rnp);
1264 : :
1265 : : /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1266 [ + + ]: 2337683 : if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1267 : : return false;
1268 : :
1269 : : /*
1270 : : * Callbacks are often registered with incomplete grace-period
1271 : : * information. Something about the fact that getting exact
1272 : : * information requires acquiring a global lock... RCU therefore
1273 : : * makes a conservative estimate of the grace period number at which
1274 : : * a given callback will become ready to invoke. The following
1275 : : * code checks this estimate and improves it when possible, thus
1276 : : * accelerating callback invocation to an earlier grace-period
1277 : : * number.
1278 : : */
1279 : : gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1280 [ + + ]: 1856548 : if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1281 : 1856544 : ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1282 : :
1283 : : /* Trace depending on how much we were able to accelerate. */
1284 : : if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1285 : : trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB"));
1286 : : else
1287 : : trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB"));
1288 : 1856548 : return ret;
1289 : : }
1290 : :
1291 : : /*
1292 : : * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1293 : : * rcu_node structure's ->lock be held. It consults the cached value
1294 : : * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1295 : : * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1296 : : * while holding the leaf rcu_node structure's ->lock.
1297 : : */
1298 : 7908 : static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1299 : : struct rcu_data *rdp)
1300 : : {
1301 : : unsigned long c;
1302 : : bool needwake;
1303 : :
1304 : : rcu_lockdep_assert_cblist_protected(rdp);
1305 : : c = rcu_seq_snap(&rcu_state.gp_seq);
1306 [ + + + + ]: 7890 : if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1307 : : /* Old request still live, so mark recent callbacks. */
1308 : 36 : (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1309 : 7945 : return;
1310 : : }
1311 : 7854 : raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1312 : 7873 : needwake = rcu_accelerate_cbs(rnp, rdp);
1313 : : raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1314 [ + + ]: 7873 : if (needwake)
1315 : 6791 : rcu_gp_kthread_wake();
1316 : : }
1317 : :
1318 : : /*
1319 : : * Move any callbacks whose grace period has completed to the
1320 : : * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1321 : : * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1322 : : * sublist. This function is idempotent, so it does not hurt to
1323 : : * invoke it repeatedly. As long as it is not invoked -too- often...
1324 : : * Returns true if the RCU grace-period kthread needs to be awakened.
1325 : : *
1326 : : * The caller must hold rnp->lock with interrupts disabled.
1327 : : */
1328 : 988814 : static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1329 : : {
1330 : : rcu_lockdep_assert_cblist_protected(rdp);
1331 : : raw_lockdep_assert_held_rcu_node(rnp);
1332 : :
1333 : : /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1334 [ + + ]: 988814 : if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1335 : : return false;
1336 : :
1337 : : /*
1338 : : * Find all callbacks whose ->gp_seq numbers indicate that they
1339 : : * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1340 : : */
1341 : 842566 : rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1342 : :
1343 : : /* Classify any remaining callbacks. */
1344 : 842566 : return rcu_accelerate_cbs(rnp, rdp);
1345 : : }
1346 : :
1347 : : /*
1348 : : * Move and classify callbacks, but only if doing so won't require
1349 : : * that the RCU grace-period kthread be awakened.
1350 : : */
1351 : : static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1352 : : struct rcu_data *rdp)
1353 : : {
1354 : : rcu_lockdep_assert_cblist_protected(rdp);
1355 : : if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) ||
1356 : : !raw_spin_trylock_rcu_node(rnp))
1357 : : return;
1358 : : WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1359 : : raw_spin_unlock_rcu_node(rnp);
1360 : : }
1361 : :
1362 : : /*
1363 : : * Update CPU-local rcu_data state to record the beginnings and ends of
1364 : : * grace periods. The caller must hold the ->lock of the leaf rcu_node
1365 : : * structure corresponding to the current CPU, and must have irqs disabled.
1366 : : * Returns true if the grace-period kthread needs to be awakened.
1367 : : */
1368 : 1283735 : static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1369 : : {
1370 : : bool ret = false;
1371 : : bool need_gp;
1372 : : const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
1373 : : rcu_segcblist_is_offloaded(&rdp->cblist);
1374 : :
1375 : : raw_lockdep_assert_held_rcu_node(rnp);
1376 : :
1377 [ + - ]: 1283735 : if (rdp->gp_seq == rnp->gp_seq)
1378 : : return false; /* Nothing to do. */
1379 : :
1380 : : /* Handle the ends of any preceding grace periods first. */
1381 [ + + - + ]: 1578656 : if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1382 : 294921 : unlikely(READ_ONCE(rdp->gpwrap))) {
1383 : : if (!offloaded)
1384 : 988814 : ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1385 : : trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1386 : : } else {
1387 : : if (!offloaded)
1388 : 294921 : ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1389 : : }
1390 : :
1391 : : /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1392 [ + + - + ]: 2859506 : if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1393 : 292036 : unlikely(READ_ONCE(rdp->gpwrap))) {
1394 : : /*
1395 : : * If the current grace period is waiting for this CPU,
1396 : : * set up to detect a quiescent state, otherwise don't
1397 : : * go looking for one.
1398 : : */
1399 : : trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1400 : 991699 : need_gp = !!(rnp->qsmask & rdp->grpmask);
1401 : 991699 : rdp->cpu_no_qs.b.norm = need_gp;
1402 : 991699 : rdp->core_needs_qs = need_gp;
1403 : : zero_cpu_stall_ticks(rdp);
1404 : : }
1405 : 1283735 : rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1406 [ + + - + ]: 1283735 : if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1407 : 964119 : rdp->gp_seq_needed = rnp->gp_seq_needed;
1408 : : WRITE_ONCE(rdp->gpwrap, false);
1409 : : rcu_gpnum_ovf(rnp, rdp);
1410 : 1283735 : return ret;
1411 : : }
1412 : :
1413 : 3825264 : static void note_gp_changes(struct rcu_data *rdp)
1414 : : {
1415 : : unsigned long flags;
1416 : : bool needwake;
1417 : : struct rcu_node *rnp;
1418 : :
1419 : 3759854 : local_irq_save(flags);
1420 : 3851929 : rnp = rdp->mynode;
1421 [ + + + + ]: 10588600 : if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1422 [ + + ]: 3833064 : !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1423 : 1009721 : !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1424 [ - + ]: 3067016 : local_irq_restore(flags);
1425 : 3854950 : return;
1426 : : }
1427 : 723514 : needwake = __note_gp_changes(rnp, rdp);
1428 : 723514 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1429 [ + + ]: 723514 : if (needwake)
1430 : 3361 : rcu_gp_kthread_wake();
1431 : : }
1432 : :
1433 : 840435 : static void rcu_gp_slow(int delay)
1434 : : {
1435 [ - + # # ]: 840435 : if (delay > 0 &&
1436 : 0 : !(rcu_seq_ctr(rcu_state.gp_seq) %
1437 : 0 : (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1438 : 0 : schedule_timeout_uninterruptible(delay);
1439 : 840435 : }
1440 : :
1441 : : /*
1442 : : * Initialize a new grace period. Return false if no grace period required.
1443 : : */
1444 : 280214 : static bool rcu_gp_init(void)
1445 : : {
1446 : : unsigned long flags;
1447 : : unsigned long oldmask;
1448 : : unsigned long mask;
1449 : : struct rcu_data *rdp;
1450 : : struct rcu_node *rnp = rcu_get_root();
1451 : :
1452 : 280214 : WRITE_ONCE(rcu_state.gp_activity, jiffies);
1453 : 280214 : raw_spin_lock_irq_rcu_node(rnp);
1454 [ - + ]: 280214 : if (!READ_ONCE(rcu_state.gp_flags)) {
1455 : : /* Spurious wakeup, tell caller to go back to sleep. */
1456 : 0 : raw_spin_unlock_irq_rcu_node(rnp);
1457 : 0 : return false;
1458 : : }
1459 : : WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1460 : :
1461 [ - + # # : 280214 : if (WARN_ON_ONCE(rcu_gp_in_progress())) {
- + ]
1462 : : /*
1463 : : * Grace period already in progress, don't start another.
1464 : : * Not supposed to be able to happen.
1465 : : */
1466 : 0 : raw_spin_unlock_irq_rcu_node(rnp);
1467 : 0 : return false;
1468 : : }
1469 : :
1470 : : /* Advance to a new grace period and initialize state. */
1471 : 280214 : record_gp_stall_check_time();
1472 : : /* Record GP times before starting GP, hence rcu_seq_start(). */
1473 : 280214 : rcu_seq_start(&rcu_state.gp_seq);
1474 : : trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1475 : 280214 : raw_spin_unlock_irq_rcu_node(rnp);
1476 : :
1477 : : /*
1478 : : * Apply per-leaf buffered online and offline operations to the
1479 : : * rcu_node tree. Note that this new grace period need not wait
1480 : : * for subsequent online CPUs, and that quiescent-state forcing
1481 : : * will handle subsequent offline CPUs.
1482 : : */
1483 : 280214 : rcu_state.gp_state = RCU_GP_ONOFF;
1484 [ + + ]: 560428 : rcu_for_each_leaf_node(rnp) {
1485 : 280214 : raw_spin_lock(&rcu_state.ofl_lock);
1486 : 280214 : raw_spin_lock_irq_rcu_node(rnp);
1487 [ + + + - ]: 559807 : if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1488 : 279593 : !rnp->wait_blkd_tasks) {
1489 : : /* Nothing to do on this leaf rcu_node structure. */
1490 : 279593 : raw_spin_unlock_irq_rcu_node(rnp);
1491 : : raw_spin_unlock(&rcu_state.ofl_lock);
1492 : 279593 : continue;
1493 : : }
1494 : :
1495 : : /* Record old state, apply changes to ->qsmaskinit field. */
1496 : : oldmask = rnp->qsmaskinit;
1497 : 621 : rnp->qsmaskinit = rnp->qsmaskinitnext;
1498 : :
1499 : : /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1500 [ + + ]: 621 : if (!oldmask != !rnp->qsmaskinit) {
1501 [ + - ]: 207 : if (!oldmask) { /* First online CPU for rcu_node. */
1502 [ + - ]: 207 : if (!rnp->wait_blkd_tasks) /* Ever offline? */
1503 : 207 : rcu_init_new_rnp(rnp);
1504 : : } else if (rcu_preempt_has_tasks(rnp)) {
1505 : : rnp->wait_blkd_tasks = true; /* blocked tasks */
1506 : : } else { /* Last offline CPU and can propagate. */
1507 : : rcu_cleanup_dead_rnp(rnp);
1508 : : }
1509 : : }
1510 : :
1511 : : /*
1512 : : * If all waited-on tasks from prior grace period are
1513 : : * done, and if all this rcu_node structure's CPUs are
1514 : : * still offline, propagate up the rcu_node tree and
1515 : : * clear ->wait_blkd_tasks. Otherwise, if one of this
1516 : : * rcu_node structure's CPUs has since come back online,
1517 : : * simply clear ->wait_blkd_tasks.
1518 : : */
1519 [ - + ]: 621 : if (rnp->wait_blkd_tasks &&
1520 : : (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1521 : 0 : rnp->wait_blkd_tasks = false;
1522 : : if (!rnp->qsmaskinit)
1523 : : rcu_cleanup_dead_rnp(rnp);
1524 : : }
1525 : :
1526 : 621 : raw_spin_unlock_irq_rcu_node(rnp);
1527 : : raw_spin_unlock(&rcu_state.ofl_lock);
1528 : : }
1529 : 280214 : rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1530 : :
1531 : : /*
1532 : : * Set the quiescent-state-needed bits in all the rcu_node
1533 : : * structures for all currently online CPUs in breadth-first
1534 : : * order, starting from the root rcu_node structure, relying on the
1535 : : * layout of the tree within the rcu_state.node[] array. Note that
1536 : : * other CPUs will access only the leaves of the hierarchy, thus
1537 : : * seeing that no grace period is in progress, at least until the
1538 : : * corresponding leaf node has been initialized.
1539 : : *
1540 : : * The grace period cannot complete until the initialization
1541 : : * process finishes, because this kthread handles both.
1542 : : */
1543 : 280214 : rcu_state.gp_state = RCU_GP_INIT;
1544 [ + + ]: 560428 : rcu_for_each_node_breadth_first(rnp) {
1545 : 280214 : rcu_gp_slow(gp_init_delay);
1546 : 280214 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
1547 : 560428 : rdp = this_cpu_ptr(&rcu_data);
1548 : 280214 : rcu_preempt_check_blocked_tasks(rnp);
1549 : 280214 : rnp->qsmask = rnp->qsmaskinit;
1550 : 280214 : WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1551 [ + - ]: 280214 : if (rnp == rdp->mynode)
1552 : 280214 : (void)__note_gp_changes(rnp, rdp);
1553 : : rcu_preempt_boost_start_gp(rnp);
1554 : : trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1555 : : rnp->level, rnp->grplo,
1556 : : rnp->grphi, rnp->qsmask);
1557 : : /* Quiescent states for tasks on any now-offline CPUs. */
1558 : 280214 : mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1559 : 280214 : rnp->rcu_gp_init_mask = mask;
1560 [ + - - + : 280214 : if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
# # ]
1561 : 0 : rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1562 : : else
1563 : 280214 : raw_spin_unlock_irq_rcu_node(rnp);
1564 : 280214 : cond_resched_tasks_rcu_qs();
1565 : 280214 : WRITE_ONCE(rcu_state.gp_activity, jiffies);
1566 : : }
1567 : :
1568 : : return true;
1569 : : }
1570 : :
1571 : : /*
1572 : : * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1573 : : * time.
1574 : : */
1575 : : static bool rcu_gp_fqs_check_wake(int *gfp)
1576 : : {
1577 : : struct rcu_node *rnp = rcu_get_root();
1578 : :
1579 : : /* Someone like call_rcu() requested a force-quiescent-state scan. */
1580 : 1559639 : *gfp = READ_ONCE(rcu_state.gp_flags);
1581 [ + + + + ]: 1559639 : if (*gfp & RCU_GP_FLAG_FQS)
1582 : : return true;
1583 : :
1584 : : /* The current grace period has completed. */
1585 [ + + + + ]: 1375665 : if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1586 : : return true;
1587 : :
1588 : : return false;
1589 : : }
1590 : :
1591 : : /*
1592 : : * Do one round of quiescent-state forcing.
1593 : : */
1594 : 303760 : static void rcu_gp_fqs(bool first_time)
1595 : : {
1596 : : struct rcu_node *rnp = rcu_get_root();
1597 : :
1598 : 303760 : WRITE_ONCE(rcu_state.gp_activity, jiffies);
1599 : 303760 : rcu_state.n_force_qs++;
1600 [ + + ]: 303760 : if (first_time) {
1601 : : /* Collect dyntick-idle snapshots. */
1602 : 250518 : force_qs_rnp(dyntick_save_progress_counter);
1603 : : } else {
1604 : : /* Handle dyntick-idle and offline CPUs. */
1605 : 53242 : force_qs_rnp(rcu_implicit_dynticks_qs);
1606 : : }
1607 : : /* Clear flag to prevent immediate re-entry. */
1608 [ + + ]: 303760 : if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1609 : 95689 : raw_spin_lock_irq_rcu_node(rnp);
1610 : 95689 : WRITE_ONCE(rcu_state.gp_flags,
1611 : : READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1612 : 95689 : raw_spin_unlock_irq_rcu_node(rnp);
1613 : : }
1614 : 303760 : }
1615 : :
1616 : : /*
1617 : : * Loop doing repeated quiescent-state forcing until the grace period ends.
1618 : : */
1619 : 280213 : static void rcu_gp_fqs_loop(void)
1620 : : {
1621 : : bool first_gp_fqs;
1622 : : int gf;
1623 : : unsigned long j;
1624 : : int ret;
1625 : : struct rcu_node *rnp = rcu_get_root();
1626 : :
1627 : : first_gp_fqs = true;
1628 : : j = READ_ONCE(jiffies_till_first_fqs);
1629 : : ret = 0;
1630 : : for (;;) {
1631 [ + - ]: 583973 : if (!ret) {
1632 : 583973 : rcu_state.jiffies_force_qs = jiffies + j;
1633 [ + - ]: 583973 : WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1634 : : jiffies + (j ? 3 * j : 2));
1635 : : }
1636 : : trace_rcu_grace_period(rcu_state.name,
1637 : : READ_ONCE(rcu_state.gp_seq),
1638 : : TPS("fqswait"));
1639 : 583973 : rcu_state.gp_state = RCU_GP_WAIT_FQS;
1640 [ - + + + : 3119278 : ret = swait_event_idle_timeout_exclusive(
+ + + + ]
1641 : : rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
1642 : 583767 : rcu_state.gp_state = RCU_GP_DOING_FQS;
1643 : : /* Locking provides needed memory barriers. */
1644 : : /* If grace period done, leave loop. */
1645 [ + + ]: 583767 : if (!READ_ONCE(rnp->qsmask) &&
1646 : : !rcu_preempt_blocked_readers_cgp(rnp))
1647 : : break;
1648 : : /* If time for quiescent-state forcing, do it. */
1649 [ - + # # ]: 303760 : if (ULONG_CMP_GE(jiffies, rcu_state.jiffies_force_qs) ||
1650 : 0 : (gf & RCU_GP_FLAG_FQS)) {
1651 : : trace_rcu_grace_period(rcu_state.name,
1652 : : READ_ONCE(rcu_state.gp_seq),
1653 : : TPS("fqsstart"));
1654 : 303760 : rcu_gp_fqs(first_gp_fqs);
1655 : : first_gp_fqs = false;
1656 : : trace_rcu_grace_period(rcu_state.name,
1657 : : READ_ONCE(rcu_state.gp_seq),
1658 : : TPS("fqsend"));
1659 : 303760 : cond_resched_tasks_rcu_qs();
1660 : 303760 : WRITE_ONCE(rcu_state.gp_activity, jiffies);
1661 : : ret = 0; /* Force full wait till next FQS. */
1662 : 303760 : j = READ_ONCE(jiffies_till_next_fqs);
1663 : : } else {
1664 : : /* Deal with stray signal. */
1665 : 0 : cond_resched_tasks_rcu_qs();
1666 : 0 : WRITE_ONCE(rcu_state.gp_activity, jiffies);
1667 [ # # ]: 0 : WARN_ON(signal_pending(current));
1668 : : trace_rcu_grace_period(rcu_state.name,
1669 : : READ_ONCE(rcu_state.gp_seq),
1670 : : TPS("fqswaitsig"));
1671 : : ret = 1; /* Keep old FQS timing. */
1672 : 0 : j = jiffies;
1673 [ # # ]: 0 : if (time_after(jiffies, rcu_state.jiffies_force_qs))
1674 : : j = 1;
1675 : : else
1676 : 0 : j = rcu_state.jiffies_force_qs - j;
1677 : : }
1678 : : }
1679 : 280007 : }
1680 : :
1681 : : /*
1682 : : * Clean up after the old grace period.
1683 : : */
1684 : 280007 : static void rcu_gp_cleanup(void)
1685 : : {
1686 : : unsigned long gp_duration;
1687 : : bool needgp = false;
1688 : : unsigned long new_gp_seq;
1689 : : bool offloaded;
1690 : : struct rcu_data *rdp;
1691 : : struct rcu_node *rnp = rcu_get_root();
1692 : : struct swait_queue_head *sq;
1693 : :
1694 : 280007 : WRITE_ONCE(rcu_state.gp_activity, jiffies);
1695 : 280007 : raw_spin_lock_irq_rcu_node(rnp);
1696 : 280007 : rcu_state.gp_end = jiffies;
1697 : 280007 : gp_duration = rcu_state.gp_end - rcu_state.gp_start;
1698 [ + + ]: 280007 : if (gp_duration > rcu_state.gp_max)
1699 : 745 : rcu_state.gp_max = gp_duration;
1700 : :
1701 : : /*
1702 : : * We know the grace period is complete, but to everyone else
1703 : : * it appears to still be ongoing. But it is also the case
1704 : : * that to everyone else it looks like there is nothing that
1705 : : * they can do to advance the grace period. It is therefore
1706 : : * safe for us to drop the lock in order to mark the grace
1707 : : * period as completed in all of the rcu_node structures.
1708 : : */
1709 : 280007 : raw_spin_unlock_irq_rcu_node(rnp);
1710 : :
1711 : : /*
1712 : : * Propagate new ->gp_seq value to rcu_node structures so that
1713 : : * other CPUs don't have to wait until the start of the next grace
1714 : : * period to process their callbacks. This also avoids some nasty
1715 : : * RCU grace-period initialization races by forcing the end of
1716 : : * the current grace period to be completely recorded in all of
1717 : : * the rcu_node structures before the beginning of the next grace
1718 : : * period is recorded in any of the rcu_node structures.
1719 : : */
1720 : 280007 : new_gp_seq = rcu_state.gp_seq;
1721 : 280007 : rcu_seq_end(&new_gp_seq);
1722 [ + + ]: 560014 : rcu_for_each_node_breadth_first(rnp) {
1723 : 280007 : raw_spin_lock_irq_rcu_node(rnp);
1724 : : if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
1725 : : dump_blkd_tasks(rnp, 10);
1726 [ - + # # ]: 280007 : WARN_ON_ONCE(rnp->qsmask);
1727 : 280007 : WRITE_ONCE(rnp->gp_seq, new_gp_seq);
1728 : 560014 : rdp = this_cpu_ptr(&rcu_data);
1729 [ + - ]: 280007 : if (rnp == rdp->mynode)
1730 [ + - + - ]: 280007 : needgp = __note_gp_changes(rnp, rdp) || needgp;
1731 : : /* smp_mb() provided by prior unlock-lock pair. */
1732 [ + + + - ]: 280007 : needgp = rcu_future_gp_cleanup(rnp) || needgp;
1733 : : sq = rcu_nocb_gp_get(rnp);
1734 : 280007 : raw_spin_unlock_irq_rcu_node(rnp);
1735 : : rcu_nocb_gp_cleanup(sq);
1736 : 280007 : cond_resched_tasks_rcu_qs();
1737 : 280007 : WRITE_ONCE(rcu_state.gp_activity, jiffies);
1738 : 280007 : rcu_gp_slow(gp_cleanup_delay);
1739 : : }
1740 : : rnp = rcu_get_root();
1741 : 280007 : raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
1742 : :
1743 : : /* Declare grace period done, trace first to use old GP number. */
1744 : : trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
1745 : 280007 : rcu_seq_end(&rcu_state.gp_seq);
1746 : 280007 : rcu_state.gp_state = RCU_GP_IDLE;
1747 : : /* Check for GP requests since above loop. */
1748 : 560014 : rdp = this_cpu_ptr(&rcu_data);
1749 [ + + - + ]: 280007 : if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
1750 : : trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
1751 : : TPS("CleanupMore"));
1752 : : needgp = true;
1753 : : }
1754 : : /* Advance CBs to reduce false positives below. */
1755 : : offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
1756 : : rcu_segcblist_is_offloaded(&rdp->cblist);
1757 [ + - + + ]: 280007 : if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
1758 : : WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
1759 : 269855 : rcu_state.gp_req_activity = jiffies;
1760 : : trace_rcu_grace_period(rcu_state.name,
1761 : : READ_ONCE(rcu_state.gp_seq),
1762 : : TPS("newreq"));
1763 : : } else {
1764 : 10152 : WRITE_ONCE(rcu_state.gp_flags,
1765 : : rcu_state.gp_flags & RCU_GP_FLAG_INIT);
1766 : : }
1767 : 280007 : raw_spin_unlock_irq_rcu_node(rnp);
1768 : 280006 : }
1769 : :
1770 : : /*
1771 : : * Body of kthread that handles grace periods.
1772 : : */
1773 : 207 : static int __noreturn rcu_gp_kthread(void *unused)
1774 : : {
1775 : : rcu_bind_gp_kthread();
1776 : : for (;;) {
1777 : :
1778 : : /* Handle grace-period start. */
1779 : : for (;;) {
1780 : : trace_rcu_grace_period(rcu_state.name,
1781 : : READ_ONCE(rcu_state.gp_seq),
1782 : : TPS("reqwait"));
1783 : 280213 : rcu_state.gp_state = RCU_GP_WAIT_GPS;
1784 [ + + + + ]: 320821 : swait_event_idle_exclusive(rcu_state.gp_wq,
1785 : : READ_ONCE(rcu_state.gp_flags) &
1786 : : RCU_GP_FLAG_INIT);
1787 : 280213 : rcu_state.gp_state = RCU_GP_DONE_GPS;
1788 : : /* Locking provides needed memory barrier. */
1789 [ - + ]: 280213 : if (rcu_gp_init())
1790 : : break;
1791 : 0 : cond_resched_tasks_rcu_qs();
1792 : 0 : WRITE_ONCE(rcu_state.gp_activity, jiffies);
1793 [ # # ]: 0 : WARN_ON(signal_pending(current));
1794 : : trace_rcu_grace_period(rcu_state.name,
1795 : : READ_ONCE(rcu_state.gp_seq),
1796 : : TPS("reqwaitsig"));
1797 : : }
1798 : :
1799 : : /* Handle quiescent-state forcing. */
1800 : 280213 : rcu_gp_fqs_loop();
1801 : :
1802 : : /* Handle grace-period end. */
1803 : 280007 : rcu_state.gp_state = RCU_GP_CLEANUP;
1804 : 280007 : rcu_gp_cleanup();
1805 : 280006 : rcu_state.gp_state = RCU_GP_CLEANED;
1806 : 280006 : }
1807 : : }
1808 : :
1809 : : /*
1810 : : * Report a full set of quiescent states to the rcu_state data structure.
1811 : : * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
1812 : : * another grace period is required. Whether we wake the grace-period
1813 : : * kthread or it awakens itself for the next round of quiescent-state
1814 : : * forcing, that kthread will clean up after the just-completed grace
1815 : : * period. Note that the caller must hold rnp->lock, which is released
1816 : : * before return.
1817 : : */
1818 : 280007 : static void rcu_report_qs_rsp(unsigned long flags)
1819 : : __releases(rcu_get_root()->lock)
1820 : : {
1821 : : raw_lockdep_assert_held_rcu_node(rcu_get_root());
1822 [ - + # # ]: 280007 : WARN_ON_ONCE(!rcu_gp_in_progress());
1823 : 280007 : WRITE_ONCE(rcu_state.gp_flags,
1824 : : READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
1825 : 280007 : raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
1826 : 280007 : rcu_gp_kthread_wake();
1827 : 280006 : }
1828 : :
1829 : : /*
1830 : : * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1831 : : * Allows quiescent states for a group of CPUs to be reported at one go
1832 : : * to the specified rcu_node structure, though all the CPUs in the group
1833 : : * must be represented by the same rcu_node structure (which need not be a
1834 : : * leaf rcu_node structure, though it often will be). The gps parameter
1835 : : * is the grace-period snapshot, which means that the quiescent states
1836 : : * are valid only if rnp->gp_seq is equal to gps. That structure's lock
1837 : : * must be held upon entry, and it is released before return.
1838 : : *
1839 : : * As a special case, if mask is zero, the bit-already-cleared check is
1840 : : * disabled. This allows propagating quiescent state due to resumed tasks
1841 : : * during grace-period initialization.
1842 : : */
1843 : 1048643 : static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
1844 : : unsigned long gps, unsigned long flags)
1845 : : __releases(rnp->lock)
1846 : : {
1847 : : unsigned long oldmask = 0;
1848 : : struct rcu_node *rnp_c;
1849 : :
1850 : : raw_lockdep_assert_held_rcu_node(rnp);
1851 : :
1852 : : /* Walk up the rcu_node hierarchy. */
1853 : : for (;;) {
1854 [ - + # # : 1048643 : if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
- + ]
1855 : :
1856 : : /*
1857 : : * Our bit has already been cleared, or the
1858 : : * relevant grace period is already over, so done.
1859 : : */
1860 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1861 : 0 : return;
1862 : : }
1863 [ - + # # ]: 1048643 : WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
1864 : : WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
1865 : : rcu_preempt_blocked_readers_cgp(rnp));
1866 : 1048643 : rnp->qsmask &= ~mask;
1867 : : trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
1868 : : mask, rnp->qsmask, rnp->level,
1869 : : rnp->grplo, rnp->grphi,
1870 : : !!rnp->gp_tasks);
1871 [ + + ]: 1048643 : if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1872 : :
1873 : : /* Other bits still set at this level, so done. */
1874 : 768636 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1875 : 768635 : return;
1876 : : }
1877 : 280007 : rnp->completedqs = rnp->gp_seq;
1878 : 280007 : mask = rnp->grpmask;
1879 [ - + ]: 280007 : if (rnp->parent == NULL) {
1880 : :
1881 : : /* No more levels. Exit loop holding root lock. */
1882 : :
1883 : : break;
1884 : : }
1885 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1886 : : rnp_c = rnp;
1887 : 0 : rnp = rnp->parent;
1888 : 0 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
1889 : 0 : oldmask = rnp_c->qsmask;
1890 : 0 : }
1891 : :
1892 : : /*
1893 : : * Get here if we are the last CPU to pass through a quiescent
1894 : : * state for this grace period. Invoke rcu_report_qs_rsp()
1895 : : * to clean up and start the next grace period if one is needed.
1896 : : */
1897 : 280007 : rcu_report_qs_rsp(flags); /* releases rnp->lock. */
1898 : : }
1899 : :
1900 : : /*
1901 : : * Record a quiescent state for all tasks that were previously queued
1902 : : * on the specified rcu_node structure and that were blocking the current
1903 : : * RCU grace period. The caller must hold the corresponding rnp->lock with
1904 : : * irqs disabled, and this lock is released upon return, but irqs remain
1905 : : * disabled.
1906 : : */
1907 : : static void __maybe_unused
1908 : : rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1909 : : __releases(rnp->lock)
1910 : : {
1911 : : unsigned long gps;
1912 : : unsigned long mask;
1913 : : struct rcu_node *rnp_p;
1914 : :
1915 : : raw_lockdep_assert_held_rcu_node(rnp);
1916 : : if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPTION)) ||
1917 : : WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
1918 : : rnp->qsmask != 0) {
1919 : : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1920 : : return; /* Still need more quiescent states! */
1921 : : }
1922 : :
1923 : : rnp->completedqs = rnp->gp_seq;
1924 : : rnp_p = rnp->parent;
1925 : : if (rnp_p == NULL) {
1926 : : /*
1927 : : * Only one rcu_node structure in the tree, so don't
1928 : : * try to report up to its nonexistent parent!
1929 : : */
1930 : : rcu_report_qs_rsp(flags);
1931 : : return;
1932 : : }
1933 : :
1934 : : /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
1935 : : gps = rnp->gp_seq;
1936 : : mask = rnp->grpmask;
1937 : : raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1938 : : raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
1939 : : rcu_report_qs_rnp(mask, rnp_p, gps, flags);
1940 : : }
1941 : :
1942 : : /*
1943 : : * Record a quiescent state for the specified CPU to that CPU's rcu_data
1944 : : * structure. This must be called from the specified CPU.
1945 : : */
1946 : : static void
1947 : 903393 : rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
1948 : : {
1949 : : unsigned long flags;
1950 : : unsigned long mask;
1951 : : bool needwake = false;
1952 : : const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
1953 : : rcu_segcblist_is_offloaded(&rdp->cblist);
1954 : : struct rcu_node *rnp;
1955 : :
1956 : 903393 : rnp = rdp->mynode;
1957 : 903393 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
1958 [ + - + + : 1865882 : if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
- + ]
1959 : 927630 : rdp->gpwrap) {
1960 : :
1961 : : /*
1962 : : * The grace period in which this quiescent state was
1963 : : * recorded has ended, so don't report it upwards.
1964 : : * We will instead need a new quiescent state that lies
1965 : : * within the current grace period.
1966 : : */
1967 : 10622 : rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
1968 : 10622 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1969 : 948861 : return;
1970 : : }
1971 : 927630 : mask = rdp->grpmask;
1972 : 927630 : rdp->core_needs_qs = false;
1973 [ + + ]: 927630 : if ((rnp->qsmask & mask) == 0) {
1974 : 15318 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1975 : : } else {
1976 : : /*
1977 : : * This GP can't end until cpu checks in, so all of our
1978 : : * callbacks can be processed during the next GP.
1979 : : */
1980 : : if (!offloaded)
1981 : 912312 : needwake = rcu_accelerate_cbs(rnp, rdp);
1982 : :
1983 : 912312 : rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1984 : : /* ^^^ Released rnp->lock */
1985 [ - + ]: 912305 : if (needwake)
1986 : 0 : rcu_gp_kthread_wake();
1987 : : }
1988 : : }
1989 : :
1990 : : /*
1991 : : * Check to see if there is a new grace period of which this CPU
1992 : : * is not yet aware, and if so, set up local rcu_data state for it.
1993 : : * Otherwise, see if this CPU has just passed through its first
1994 : : * quiescent state for this grace period, and record that fact if so.
1995 : : */
1996 : : static void
1997 : 3726724 : rcu_check_quiescent_state(struct rcu_data *rdp)
1998 : : {
1999 : : /* Check for grace-period ends and beginnings. */
2000 : 3726724 : note_gp_changes(rdp);
2001 : :
2002 : : /*
2003 : : * Does this CPU still need to do its part for current grace period?
2004 : : * If no, return and let the other CPUs do their part as well.
2005 : : */
2006 [ + + ]: 3852588 : if (!rdp->core_needs_qs)
2007 : : return;
2008 : :
2009 : : /*
2010 : : * Was there a quiescent state since the beginning of the grace
2011 : : * period? If no, then exit and wait for the next call.
2012 : : */
2013 [ + + ]: 2259354 : if (rdp->cpu_no_qs.b.norm)
2014 : : return;
2015 : :
2016 : : /*
2017 : : * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2018 : : * judge of that).
2019 : : */
2020 : 903118 : rcu_report_qs_rdp(rdp->cpu, rdp);
2021 : : }
2022 : :
2023 : : /*
2024 : : * Near the end of the offline process. Trace the fact that this CPU
2025 : : * is going offline.
2026 : : */
2027 : 0 : int rcutree_dying_cpu(unsigned int cpu)
2028 : : {
2029 : : bool blkd;
2030 : 0 : struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2031 : : struct rcu_node *rnp = rdp->mynode;
2032 : :
2033 : : if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2034 : : return 0;
2035 : :
2036 : : blkd = !!(rnp->qsmask & rdp->grpmask);
2037 : : trace_rcu_grace_period(rcu_state.name, rnp->gp_seq,
2038 : : blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
2039 : : return 0;
2040 : : }
2041 : :
2042 : : /*
2043 : : * All CPUs for the specified rcu_node structure have gone offline,
2044 : : * and all tasks that were preempted within an RCU read-side critical
2045 : : * section while running on one of those CPUs have since exited their RCU
2046 : : * read-side critical section. Some other CPU is reporting this fact with
2047 : : * the specified rcu_node structure's ->lock held and interrupts disabled.
2048 : : * This function therefore goes up the tree of rcu_node structures,
2049 : : * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2050 : : * the leaf rcu_node structure's ->qsmaskinit field has already been
2051 : : * updated.
2052 : : *
2053 : : * This function does check that the specified rcu_node structure has
2054 : : * all CPUs offline and no blocked tasks, so it is OK to invoke it
2055 : : * prematurely. That said, invoking it after the fact will cost you
2056 : : * a needless lock acquisition. So once it has done its work, don't
2057 : : * invoke it again.
2058 : : */
2059 : : static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2060 : : {
2061 : : long mask;
2062 : : struct rcu_node *rnp = rnp_leaf;
2063 : :
2064 : : raw_lockdep_assert_held_rcu_node(rnp_leaf);
2065 : : if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2066 : : WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2067 : : WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2068 : : return;
2069 : : for (;;) {
2070 : : mask = rnp->grpmask;
2071 : : rnp = rnp->parent;
2072 : : if (!rnp)
2073 : : break;
2074 : : raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2075 : : rnp->qsmaskinit &= ~mask;
2076 : : /* Between grace periods, so better already be zero! */
2077 : : WARN_ON_ONCE(rnp->qsmask);
2078 : : if (rnp->qsmaskinit) {
2079 : : raw_spin_unlock_rcu_node(rnp);
2080 : : /* irqs remain disabled. */
2081 : : return;
2082 : : }
2083 : : raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2084 : : }
2085 : : }
2086 : :
2087 : : /*
2088 : : * The CPU has been completely removed, and some other CPU is reporting
2089 : : * this fact from process context. Do the remainder of the cleanup.
2090 : : * There can only be one CPU hotplug operation at a time, so no need for
2091 : : * explicit locking.
2092 : : */
2093 : 0 : int rcutree_dead_cpu(unsigned int cpu)
2094 : : {
2095 : 0 : struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2096 : : struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
2097 : :
2098 : : if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2099 : : return 0;
2100 : :
2101 : : /* Adjust any no-longer-needed kthreads. */
2102 : : rcu_boost_kthread_setaffinity(rnp, -1);
2103 : : /* Do any needed no-CB deferred wakeups from this CPU. */
2104 : : do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
2105 : : return 0;
2106 : : }
2107 : :
2108 : : /*
2109 : : * Invoke any RCU callbacks that have made it to the end of their grace
2110 : : * period. Thottle as specified by rdp->blimit.
2111 : : */
2112 : 2767212 : static void rcu_do_batch(struct rcu_data *rdp)
2113 : : {
2114 : : unsigned long flags;
2115 : : const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2116 : : rcu_segcblist_is_offloaded(&rdp->cblist);
2117 : : struct rcu_head *rhp;
2118 : 2767212 : struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2119 : : long bl, count;
2120 : : long pending, tlimit = 0;
2121 : :
2122 : : /* If no callbacks are ready, just return. */
2123 [ - + ]: 2767212 : if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2124 : : trace_rcu_batch_start(rcu_state.name,
2125 : : rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2126 : : rcu_segcblist_n_cbs(&rdp->cblist), 0);
2127 : : trace_rcu_batch_end(rcu_state.name, 0,
2128 : : !rcu_segcblist_empty(&rdp->cblist),
2129 : : need_resched(), is_idle_task(current),
2130 : : rcu_is_callbacks_kthread());
2131 : 2767224 : return;
2132 : : }
2133 : :
2134 : : /*
2135 : : * Extract the list of ready callbacks, disabling to prevent
2136 : : * races with call_rcu() from interrupt handlers. Leave the
2137 : : * callback counts, as rcu_barrier() needs to be conservative.
2138 : : */
2139 : 2767059 : local_irq_save(flags);
2140 : : rcu_nocb_lock(rdp);
2141 [ - + # # ]: 5534584 : WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2142 : : pending = rcu_segcblist_n_cbs(&rdp->cblist);
2143 : 2767238 : bl = max(rdp->blimit, pending >> rcu_divisor);
2144 [ - + ]: 2767238 : if (unlikely(bl > 100))
2145 : 0 : tlimit = local_clock() + rcu_resched_ns;
2146 : : trace_rcu_batch_start(rcu_state.name,
2147 : : rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2148 : : rcu_segcblist_n_cbs(&rdp->cblist), bl);
2149 : 2767238 : rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2150 : : if (offloaded)
2151 : : rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2152 : 2766428 : rcu_nocb_unlock_irqrestore(rdp, flags);
2153 : :
2154 : : /* Invoke callbacks. */
2155 : 2767238 : rhp = rcu_cblist_dequeue(&rcl);
2156 [ + + ]: 28623800 : for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2157 : : debug_rcu_head_unqueue(rhp);
2158 [ + + ]: 27968597 : if (__rcu_reclaim(rcu_state.name, rhp))
2159 : : rcu_cblist_dequeued_lazy(&rcl);
2160 : : /*
2161 : : * Stop only if limit reached and CPU has something to do.
2162 : : * Note: The rcl structure counts down from zero.
2163 : : */
2164 [ + + + + ]: 33436602 : if (-rcl.len >= bl && !offloaded &&
2165 [ + + ]: 5401568 : (need_resched() ||
2166 : 5401568 : (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2167 : : break;
2168 [ - + ]: 25856997 : if (unlikely(tlimit)) {
2169 : : /* only call local_clock() every 32 callbacks */
2170 [ # # # # ]: 0 : if (likely((-rcl.len & 31) || local_clock() < tlimit))
2171 : 0 : continue;
2172 : : /* Exceeded the time limit, so leave. */
2173 : : break;
2174 : : }
2175 : : if (offloaded) {
2176 : : WARN_ON_ONCE(in_serving_softirq());
2177 : : local_bh_enable();
2178 : : lockdep_assert_irqs_enabled();
2179 : : cond_resched_tasks_rcu_qs();
2180 : : lockdep_assert_irqs_enabled();
2181 : : local_bh_disable();
2182 : : }
2183 : : }
2184 : :
2185 : 2767097 : local_irq_save(flags);
2186 : : rcu_nocb_lock(rdp);
2187 : : count = -rcl.len;
2188 : : trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2189 : : is_idle_task(current), rcu_is_callbacks_kthread());
2190 : :
2191 : : /* Update counts and requeue any remaining callbacks. */
2192 : 2766375 : rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2193 : 2766799 : smp_mb(); /* List handling before counting for rcu_barrier(). */
2194 : 2767183 : rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2195 : :
2196 : : /* Reinstate batch limit if we have worked down the excess. */
2197 : : count = rcu_segcblist_n_cbs(&rdp->cblist);
2198 [ - + # # ]: 2766797 : if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2199 : 0 : rdp->blimit = blimit;
2200 : :
2201 : : /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2202 [ + + - + ]: 2766797 : if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2203 : 0 : rdp->qlen_last_fqs_check = 0;
2204 : 0 : rdp->n_force_qs_snap = rcu_state.n_force_qs;
2205 [ - + ]: 2766797 : } else if (count < rdp->qlen_last_fqs_check - qhimark)
2206 : 0 : rdp->qlen_last_fqs_check = count;
2207 : :
2208 : : /*
2209 : : * The following usually indicates a double call_rcu(). To track
2210 : : * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2211 : : */
2212 [ + + + + : 2833520 : WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist));
- + # # ]
2213 [ + + + + : 5465440 : WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
- + # # ]
2214 : : count != 0 && rcu_segcblist_empty(&rdp->cblist));
2215 : :
2216 : 2765167 : rcu_nocb_unlock_irqrestore(rdp, flags);
2217 : :
2218 : : /* Re-invoke RCU core processing if there are callbacks remaining. */
2219 [ + + ]: 2767273 : if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
2220 : 2083317 : invoke_rcu_core();
2221 : : }
2222 : :
2223 : : /*
2224 : : * This function is invoked from each scheduling-clock interrupt,
2225 : : * and checks to see if this CPU is in a non-context-switch quiescent
2226 : : * state, for example, user mode or idle loop. It also schedules RCU
2227 : : * core processing. If the current grace period has gone on too long,
2228 : : * it will ask the scheduler to manufacture a context switch for the sole
2229 : : * purpose of providing a providing the needed quiescent state.
2230 : : */
2231 : 2682010 : void rcu_sched_clock_irq(int user)
2232 : : {
2233 : 2682010 : trace_rcu_utilization(TPS("Start scheduler-tick"));
2234 : 5560428 : raw_cpu_inc(rcu_data.ticks_this_gp);
2235 : : /* The load-acquire pairs with the store-release setting to true. */
2236 [ + + ]: 8340642 : if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2237 : : /* Idle and userspace execution already are quiescent states. */
2238 [ + - + - ]: 20895 : if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2239 : 20896 : set_tsk_need_resched(current);
2240 : : set_preempt_need_resched();
2241 : : }
2242 : 41792 : __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2243 : : }
2244 : 2724583 : rcu_flavor_sched_clock_irq(user);
2245 [ + + ]: 2379131 : if (rcu_pending())
2246 : 1664208 : invoke_rcu_core();
2247 : :
2248 : 2932328 : trace_rcu_utilization(TPS("End scheduler-tick"));
2249 : 2746474 : }
2250 : :
2251 : : /*
2252 : : * Scan the leaf rcu_node structures. For each structure on which all
2253 : : * CPUs have reported a quiescent state and on which there are tasks
2254 : : * blocking the current grace period, initiate RCU priority boosting.
2255 : : * Otherwise, invoke the specified function to check dyntick state for
2256 : : * each CPU that has not yet reported a quiescent state.
2257 : : */
2258 : 303760 : static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2259 : : {
2260 : : int cpu;
2261 : : unsigned long flags;
2262 : : unsigned long mask;
2263 : : struct rcu_node *rnp;
2264 : :
2265 [ + + ]: 607520 : rcu_for_each_leaf_node(rnp) {
2266 : 303760 : cond_resched_tasks_rcu_qs();
2267 : : mask = 0;
2268 : 303760 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
2269 [ + + ]: 303760 : if (rnp->qsmask == 0) {
2270 : : if (!IS_ENABLED(CONFIG_PREEMPTION) ||
2271 : : rcu_preempt_blocked_readers_cgp(rnp)) {
2272 : : /*
2273 : : * No point in scanning bits because they
2274 : : * are all zero. But we might need to
2275 : : * priority-boost blocked readers.
2276 : : */
2277 : : rcu_initiate_boost(rnp, flags);
2278 : : /* rcu_initiate_boost() releases rnp->lock */
2279 : 523 : continue;
2280 : : }
2281 : : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2282 : : continue;
2283 : : }
2284 [ + + ]: 1516185 : for_each_leaf_node_possible_cpu(rnp, cpu) {
2285 : 1212948 : unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
2286 [ + + ]: 1212948 : if ((rnp->qsmask & bit) != 0) {
2287 [ + + ]: 470614 : if (f(per_cpu_ptr(&rcu_data, cpu)))
2288 : 207101 : mask |= bit;
2289 : : }
2290 : : }
2291 [ + + ]: 303237 : if (mask != 0) {
2292 : : /* Idle/offline CPUs, report (releases rnp->lock). */
2293 : 136330 : rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2294 : : } else {
2295 : : /* Nothing to do here, so just drop the lock. */
2296 : 166907 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2297 : : }
2298 : : }
2299 : 303760 : }
2300 : :
2301 : : /*
2302 : : * Force quiescent states on reluctant CPUs, and also detect which
2303 : : * CPUs are in dyntick-idle mode.
2304 : : */
2305 : 0 : void rcu_force_quiescent_state(void)
2306 : : {
2307 : : unsigned long flags;
2308 : : bool ret;
2309 : : struct rcu_node *rnp;
2310 : : struct rcu_node *rnp_old = NULL;
2311 : :
2312 : : /* Funnel through hierarchy to reduce memory contention. */
2313 : 0 : rnp = __this_cpu_read(rcu_data.mynode);
2314 [ # # ]: 0 : for (; rnp != NULL; rnp = rnp->parent) {
2315 [ # # # # ]: 0 : ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2316 : 0 : !raw_spin_trylock(&rnp->fqslock);
2317 [ # # ]: 0 : if (rnp_old != NULL)
2318 : : raw_spin_unlock(&rnp_old->fqslock);
2319 [ # # ]: 0 : if (ret)
2320 : : return;
2321 : : rnp_old = rnp;
2322 : : }
2323 : : /* rnp_old == rcu_get_root(), rnp == NULL. */
2324 : :
2325 : : /* Reached the root of the rcu_node tree, acquire lock. */
2326 : 0 : raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2327 : : raw_spin_unlock(&rnp_old->fqslock);
2328 [ # # ]: 0 : if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2329 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2330 : 0 : return; /* Someone beat us to it. */
2331 : : }
2332 : 0 : WRITE_ONCE(rcu_state.gp_flags,
2333 : : READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2334 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2335 : 0 : rcu_gp_kthread_wake();
2336 : : }
2337 : : EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2338 : :
2339 : : /* Perform RCU core processing work for the current CPU. */
2340 : 3810365 : static __latent_entropy void rcu_core(void)
2341 : : {
2342 : : unsigned long flags;
2343 : 7620730 : struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2344 : 3810365 : struct rcu_node *rnp = rdp->mynode;
2345 : : const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2346 : : rcu_segcblist_is_offloaded(&rdp->cblist);
2347 : :
2348 [ + + ]: 7620730 : if (cpu_is_offline(smp_processor_id()))
2349 : 3956939 : return;
2350 : 3675581 : trace_rcu_utilization(TPS("Start RCU core"));
2351 [ - + # # ]: 3693167 : WARN_ON_ONCE(!rdp->beenonline);
2352 : :
2353 : : /* Report any deferred quiescent states if preemption enabled. */
2354 : : if (!(preempt_count() & PREEMPT_MASK)) {
2355 : : rcu_preempt_deferred_qs(current);
2356 : : } else if (rcu_preempt_need_deferred_qs(current)) {
2357 : : set_tsk_need_resched(current);
2358 : : set_preempt_need_resched();
2359 : : }
2360 : :
2361 : : /* Update RCU state based on any recent quiescent states. */
2362 : 3795081 : rcu_check_quiescent_state(rdp);
2363 : :
2364 : : /* No grace period and unregistered callbacks? */
2365 [ + + + + ]: 3880186 : if (!rcu_gp_in_progress() &&
2366 : : rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) {
2367 : 26605 : local_irq_save(flags);
2368 [ + + ]: 26638 : if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2369 : 7906 : rcu_accelerate_cbs_unlocked(rnp, rdp);
2370 [ - + ]: 26636 : local_irq_restore(flags);
2371 : : }
2372 : :
2373 : : rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2374 : :
2375 : : /* If there are callbacks ready, invoke them. */
2376 [ + + + + ]: 6621658 : if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2377 : 2767071 : likely(READ_ONCE(rcu_scheduler_fully_active)))
2378 : 2766935 : rcu_do_batch(rdp);
2379 : :
2380 : : /* Do any needed deferred wakeups of rcuo kthreads. */
2381 : : do_nocb_deferred_wakeup(rdp);
2382 : 3851176 : trace_rcu_utilization(TPS("End RCU core"));
2383 : : }
2384 : :
2385 : 3848354 : static void rcu_core_si(struct softirq_action *h)
2386 : : {
2387 : 3848354 : rcu_core();
2388 : 3847568 : }
2389 : :
2390 : 0 : static void rcu_wake_cond(struct task_struct *t, int status)
2391 : : {
2392 : : /*
2393 : : * If the thread is yielding, only wake it when this
2394 : : * is invoked from idle
2395 : : */
2396 [ # # # # : 0 : if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
# # ]
2397 : 0 : wake_up_process(t);
2398 : 0 : }
2399 : :
2400 : 0 : static void invoke_rcu_core_kthread(void)
2401 : : {
2402 : : struct task_struct *t;
2403 : : unsigned long flags;
2404 : :
2405 : 0 : local_irq_save(flags);
2406 : 0 : __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2407 : 0 : t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2408 [ # # # # ]: 0 : if (t != NULL && t != current)
2409 : 0 : rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2410 [ # # ]: 0 : local_irq_restore(flags);
2411 : 0 : }
2412 : :
2413 : : /*
2414 : : * Wake up this CPU's rcuc kthread to do RCU core processing.
2415 : : */
2416 : 3795721 : static void invoke_rcu_core(void)
2417 : : {
2418 [ + - ]: 7591442 : if (!cpu_online(smp_processor_id()))
2419 : 3696975 : return;
2420 [ + - ]: 3812468 : if (use_softirq)
2421 : 3812468 : raise_softirq(RCU_SOFTIRQ);
2422 : : else
2423 : 0 : invoke_rcu_core_kthread();
2424 : : }
2425 : :
2426 : 0 : static void rcu_cpu_kthread_park(unsigned int cpu)
2427 : : {
2428 : 0 : per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2429 : 0 : }
2430 : :
2431 : 0 : static int rcu_cpu_kthread_should_run(unsigned int cpu)
2432 : : {
2433 : 0 : return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2434 : : }
2435 : :
2436 : : /*
2437 : : * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2438 : : * the RCU softirq used in configurations of RCU that do not support RCU
2439 : : * priority boosting.
2440 : : */
2441 : 0 : static void rcu_cpu_kthread(unsigned int cpu)
2442 : : {
2443 : 0 : unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2444 : 0 : char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2445 : : int spincnt;
2446 : :
2447 [ # # ]: 0 : for (spincnt = 0; spincnt < 10; spincnt++) {
2448 : 0 : trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
2449 : : local_bh_disable();
2450 : 0 : *statusp = RCU_KTHREAD_RUNNING;
2451 : 0 : local_irq_disable();
2452 : 0 : work = *workp;
2453 : 0 : *workp = 0;
2454 : 0 : local_irq_enable();
2455 [ # # ]: 0 : if (work)
2456 : 0 : rcu_core();
2457 : : local_bh_enable();
2458 [ # # ]: 0 : if (*workp == 0) {
2459 : 0 : trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2460 : 0 : *statusp = RCU_KTHREAD_WAITING;
2461 : 0 : return;
2462 : : }
2463 : : }
2464 : 0 : *statusp = RCU_KTHREAD_YIELDING;
2465 : 0 : trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2466 : 0 : schedule_timeout_interruptible(2);
2467 : 0 : trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2468 : 0 : *statusp = RCU_KTHREAD_WAITING;
2469 : : }
2470 : :
2471 : : static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2472 : : .store = &rcu_data.rcu_cpu_kthread_task,
2473 : : .thread_should_run = rcu_cpu_kthread_should_run,
2474 : : .thread_fn = rcu_cpu_kthread,
2475 : : .thread_comm = "rcuc/%u",
2476 : : .setup = rcu_cpu_kthread_setup,
2477 : : .park = rcu_cpu_kthread_park,
2478 : : };
2479 : :
2480 : : /*
2481 : : * Spawn per-CPU RCU core processing kthreads.
2482 : : */
2483 : 207 : static int __init rcu_spawn_core_kthreads(void)
2484 : : {
2485 : : int cpu;
2486 : :
2487 [ + + ]: 1242 : for_each_possible_cpu(cpu)
2488 : 828 : per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2489 [ - + ]: 207 : if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2490 : : return 0;
2491 [ # # # # ]: 0 : WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2492 : : "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2493 : : return 0;
2494 : : }
2495 : : early_initcall(rcu_spawn_core_kthreads);
2496 : :
2497 : : /*
2498 : : * Handle any core-RCU processing required by a call_rcu() invocation.
2499 : : */
2500 : 27990358 : static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2501 : : unsigned long flags)
2502 : : {
2503 : : /*
2504 : : * If called from an extended quiescent state, invoke the RCU
2505 : : * core in order to force a re-evaluation of RCU's idleness.
2506 : : */
2507 [ - + ]: 27992404 : if (!rcu_is_watching())
2508 : 0 : invoke_rcu_core();
2509 : :
2510 : : /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2511 [ + + + + ]: 83044980 : if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2512 : 27979126 : return;
2513 : :
2514 : : /*
2515 : : * Force the grace period if too many callbacks or too long waiting.
2516 : : * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2517 : : * if some other CPU has recently done so. Also, don't bother
2518 : : * invoking rcu_force_quiescent_state() if the newly enqueued callback
2519 : : * is the only one waiting for a grace period to complete.
2520 : : */
2521 [ - + ]: 27533742 : if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2522 : : rdp->qlen_last_fqs_check + qhimark)) {
2523 : :
2524 : : /* Are we ignoring a completed grace period? */
2525 : 0 : note_gp_changes(rdp);
2526 : :
2527 : : /* Start a new grace period if one not already started. */
2528 [ # # ]: 0 : if (!rcu_gp_in_progress()) {
2529 : 0 : rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2530 : : } else {
2531 : : /* Give the grace period a kick. */
2532 : 0 : rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2533 [ # # # # ]: 0 : if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
2534 : 0 : rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2535 : 0 : rcu_force_quiescent_state();
2536 : 0 : rdp->n_force_qs_snap = rcu_state.n_force_qs;
2537 : 0 : rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2538 : : }
2539 : : }
2540 : : }
2541 : :
2542 : : /*
2543 : : * RCU callback function to leak a callback.
2544 : : */
2545 : : static void rcu_leak_callback(struct rcu_head *rhp)
2546 : : {
2547 : : }
2548 : :
2549 : : /*
2550 : : * Helper function for call_rcu() and friends. The cpu argument will
2551 : : * normally be -1, indicating "currently running CPU". It may specify
2552 : : * a CPU only if that CPU is a no-CBs CPU. Currently, only rcu_barrier()
2553 : : * is expected to specify a CPU.
2554 : : */
2555 : : static void
2556 : 27985643 : __call_rcu(struct rcu_head *head, rcu_callback_t func, bool lazy)
2557 : : {
2558 : : unsigned long flags;
2559 : : struct rcu_data *rdp;
2560 : : bool was_alldone;
2561 : :
2562 : : /* Misaligned rcu_head! */
2563 [ - + # # ]: 27985643 : WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2564 : :
2565 : : if (debug_rcu_head_queue(head)) {
2566 : : /*
2567 : : * Probable double call_rcu(), so leak the callback.
2568 : : * Use rcu:rcu_callback trace event to find the previous
2569 : : * time callback was passed to __call_rcu().
2570 : : */
2571 : : WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n",
2572 : : head, head->func);
2573 : : WRITE_ONCE(head->func, rcu_leak_callback);
2574 : : return;
2575 : : }
2576 : 27985643 : head->func = func;
2577 : 27985643 : head->next = NULL;
2578 : 27983429 : local_irq_save(flags);
2579 : 55986122 : rdp = this_cpu_ptr(&rcu_data);
2580 : :
2581 : : /* Add the callback to our list. */
2582 [ - + ]: 27993061 : if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
2583 : : // This can trigger due to call_rcu() from offline CPU:
2584 [ # # # # ]: 0 : WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
2585 [ # # # # ]: 0 : WARN_ON_ONCE(!rcu_is_watching());
2586 : : // Very early boot, before rcu_init(). Initialize if needed
2587 : : // and then drop through to queue the callback.
2588 [ # # ]: 0 : if (rcu_segcblist_empty(&rdp->cblist))
2589 : 0 : rcu_segcblist_init(&rdp->cblist);
2590 : : }
2591 : : if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
2592 : : return; // Enqueued onto ->nocb_bypass, so just leave.
2593 : : /* If we get here, rcu_nocb_try_bypass() acquired ->nocb_lock. */
2594 : 27993061 : rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
2595 [ + + ]: 27989262 : if (__is_kfree_rcu_offset((unsigned long)func))
2596 : : trace_rcu_kfree_callback(rcu_state.name, head,
2597 : : (unsigned long)func,
2598 : : rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2599 : : rcu_segcblist_n_cbs(&rdp->cblist));
2600 : : else
2601 : : trace_rcu_callback(rcu_state.name, head,
2602 : : rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2603 : : rcu_segcblist_n_cbs(&rdp->cblist));
2604 : :
2605 : : /* Go handle any RCU core processing required. */
2606 : : if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2607 : : unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
2608 : : __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
2609 : : } else {
2610 : 27989262 : __call_rcu_core(rdp, head, flags);
2611 [ + + ]: 28428427 : local_irq_restore(flags);
2612 : : }
2613 : : }
2614 : :
2615 : : /**
2616 : : * call_rcu() - Queue an RCU callback for invocation after a grace period.
2617 : : * @head: structure to be used for queueing the RCU updates.
2618 : : * @func: actual callback function to be invoked after the grace period
2619 : : *
2620 : : * The callback function will be invoked some time after a full grace
2621 : : * period elapses, in other words after all pre-existing RCU read-side
2622 : : * critical sections have completed. However, the callback function
2623 : : * might well execute concurrently with RCU read-side critical sections
2624 : : * that started after call_rcu() was invoked. RCU read-side critical
2625 : : * sections are delimited by rcu_read_lock() and rcu_read_unlock(), and
2626 : : * may be nested. In addition, regions of code across which interrupts,
2627 : : * preemption, or softirqs have been disabled also serve as RCU read-side
2628 : : * critical sections. This includes hardware interrupt handlers, softirq
2629 : : * handlers, and NMI handlers.
2630 : : *
2631 : : * Note that all CPUs must agree that the grace period extended beyond
2632 : : * all pre-existing RCU read-side critical section. On systems with more
2633 : : * than one CPU, this means that when "func()" is invoked, each CPU is
2634 : : * guaranteed to have executed a full memory barrier since the end of its
2635 : : * last RCU read-side critical section whose beginning preceded the call
2636 : : * to call_rcu(). It also means that each CPU executing an RCU read-side
2637 : : * critical section that continues beyond the start of "func()" must have
2638 : : * executed a memory barrier after the call_rcu() but before the beginning
2639 : : * of that RCU read-side critical section. Note that these guarantees
2640 : : * include CPUs that are offline, idle, or executing in user mode, as
2641 : : * well as CPUs that are executing in the kernel.
2642 : : *
2643 : : * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
2644 : : * resulting RCU callback function "func()", then both CPU A and CPU B are
2645 : : * guaranteed to execute a full memory barrier during the time interval
2646 : : * between the call to call_rcu() and the invocation of "func()" -- even
2647 : : * if CPU A and CPU B are the same CPU (but again only if the system has
2648 : : * more than one CPU).
2649 : : */
2650 : 27909630 : void call_rcu(struct rcu_head *head, rcu_callback_t func)
2651 : : {
2652 : 27909630 : __call_rcu(head, func, 0);
2653 : 27919199 : }
2654 : : EXPORT_SYMBOL_GPL(call_rcu);
2655 : :
2656 : : /*
2657 : : * Queue an RCU callback for lazy invocation after a grace period.
2658 : : * This will likely be later named something like "call_rcu_lazy()",
2659 : : * but this change will require some way of tagging the lazy RCU
2660 : : * callbacks in the list of pending callbacks. Until then, this
2661 : : * function may only be called from __kfree_rcu().
2662 : : */
2663 : 74146 : void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
2664 : : {
2665 : 74146 : __call_rcu(head, func, 1);
2666 : 74153 : }
2667 : : EXPORT_SYMBOL_GPL(kfree_call_rcu);
2668 : :
2669 : : /*
2670 : : * During early boot, any blocking grace-period wait automatically
2671 : : * implies a grace period. Later on, this is never the case for PREEMPT.
2672 : : *
2673 : : * Howevr, because a context switch is a grace period for !PREEMPT, any
2674 : : * blocking grace-period wait automatically implies a grace period if
2675 : : * there is only one CPU online at any point time during execution of
2676 : : * either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
2677 : : * occasionally incorrectly indicate that there are multiple CPUs online
2678 : : * when there was in fact only one the whole time, as this just adds some
2679 : : * overhead: RCU still operates correctly.
2680 : : */
2681 : : static int rcu_blocking_is_gp(void)
2682 : : {
2683 : : int ret;
2684 : :
2685 : : if (IS_ENABLED(CONFIG_PREEMPTION))
2686 : : return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
2687 : 28835 : might_sleep(); /* Check for RCU read-side critical section. */
2688 : 28835 : preempt_disable();
2689 : : ret = num_online_cpus() <= 1;
2690 : 28835 : preempt_enable();
2691 : : return ret;
2692 : : }
2693 : :
2694 : : /**
2695 : : * synchronize_rcu - wait until a grace period has elapsed.
2696 : : *
2697 : : * Control will return to the caller some time after a full grace
2698 : : * period has elapsed, in other words after all currently executing RCU
2699 : : * read-side critical sections have completed. Note, however, that
2700 : : * upon return from synchronize_rcu(), the caller might well be executing
2701 : : * concurrently with new RCU read-side critical sections that began while
2702 : : * synchronize_rcu() was waiting. RCU read-side critical sections are
2703 : : * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
2704 : : * In addition, regions of code across which interrupts, preemption, or
2705 : : * softirqs have been disabled also serve as RCU read-side critical
2706 : : * sections. This includes hardware interrupt handlers, softirq handlers,
2707 : : * and NMI handlers.
2708 : : *
2709 : : * Note that this guarantee implies further memory-ordering guarantees.
2710 : : * On systems with more than one CPU, when synchronize_rcu() returns,
2711 : : * each CPU is guaranteed to have executed a full memory barrier since
2712 : : * the end of its last RCU read-side critical section whose beginning
2713 : : * preceded the call to synchronize_rcu(). In addition, each CPU having
2714 : : * an RCU read-side critical section that extends beyond the return from
2715 : : * synchronize_rcu() is guaranteed to have executed a full memory barrier
2716 : : * after the beginning of synchronize_rcu() and before the beginning of
2717 : : * that RCU read-side critical section. Note that these guarantees include
2718 : : * CPUs that are offline, idle, or executing in user mode, as well as CPUs
2719 : : * that are executing in the kernel.
2720 : : *
2721 : : * Furthermore, if CPU A invoked synchronize_rcu(), which returned
2722 : : * to its caller on CPU B, then both CPU A and CPU B are guaranteed
2723 : : * to have executed a full memory barrier during the execution of
2724 : : * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
2725 : : * again only if the system has more than one CPU).
2726 : : */
2727 : 25719 : void synchronize_rcu(void)
2728 : : {
2729 : : RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
2730 : : lock_is_held(&rcu_lock_map) ||
2731 : : lock_is_held(&rcu_sched_lock_map),
2732 : : "Illegal synchronize_rcu() in RCU read-side critical section");
2733 [ + + ]: 25719 : if (rcu_blocking_is_gp())
2734 : 25719 : return;
2735 [ + + ]: 25512 : if (rcu_gp_is_expedited())
2736 : 2070 : synchronize_rcu_expedited();
2737 : : else
2738 : 23442 : wait_rcu_gp(call_rcu);
2739 : : }
2740 : : EXPORT_SYMBOL_GPL(synchronize_rcu);
2741 : :
2742 : : /**
2743 : : * get_state_synchronize_rcu - Snapshot current RCU state
2744 : : *
2745 : : * Returns a cookie that is used by a later call to cond_synchronize_rcu()
2746 : : * to determine whether or not a full grace period has elapsed in the
2747 : : * meantime.
2748 : : */
2749 : 0 : unsigned long get_state_synchronize_rcu(void)
2750 : : {
2751 : : /*
2752 : : * Any prior manipulation of RCU-protected data must happen
2753 : : * before the load from ->gp_seq.
2754 : : */
2755 : 0 : smp_mb(); /* ^^^ */
2756 : 0 : return rcu_seq_snap(&rcu_state.gp_seq);
2757 : : }
2758 : : EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
2759 : :
2760 : : /**
2761 : : * cond_synchronize_rcu - Conditionally wait for an RCU grace period
2762 : : *
2763 : : * @oldstate: return value from earlier call to get_state_synchronize_rcu()
2764 : : *
2765 : : * If a full RCU grace period has elapsed since the earlier call to
2766 : : * get_state_synchronize_rcu(), just return. Otherwise, invoke
2767 : : * synchronize_rcu() to wait for a full grace period.
2768 : : *
2769 : : * Yes, this function does not take counter wrap into account. But
2770 : : * counter wrap is harmless. If the counter wraps, we have waited for
2771 : : * more than 2 billion grace periods (and way more on a 64-bit system!),
2772 : : * so waiting for one additional grace period should be just fine.
2773 : : */
2774 : 0 : void cond_synchronize_rcu(unsigned long oldstate)
2775 : : {
2776 [ # # ]: 0 : if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
2777 : 0 : synchronize_rcu();
2778 : : else
2779 : 0 : smp_mb(); /* Ensure GP ends before subsequent accesses. */
2780 : 0 : }
2781 : : EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
2782 : :
2783 : : /*
2784 : : * Check to see if there is any immediate RCU-related work to be done by
2785 : : * the current CPU, returning 1 if so and zero otherwise. The checks are
2786 : : * in order of increasing expense: checks that can be carried out against
2787 : : * CPU-local state are performed first. However, we must check for CPU
2788 : : * stalls first, else we might not get a chance.
2789 : : */
2790 : 2806800 : static int rcu_pending(void)
2791 : : {
2792 : 5613600 : struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2793 : 2806800 : struct rcu_node *rnp = rdp->mynode;
2794 : :
2795 : : /* Check for CPU stalls, if enabled. */
2796 : 2806800 : check_cpu_stall(rdp);
2797 : :
2798 : : /* Does this CPU need a deferred NOCB wakeup? */
2799 : : if (rcu_nocb_need_deferred_wakeup(rdp))
2800 : : return 1;
2801 : :
2802 : : /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
2803 : : if (rcu_nohz_full_cpu())
2804 : : return 0;
2805 : :
2806 : : /* Is the RCU core waiting for a quiescent state from this CPU? */
2807 [ + + + + ]: 2920061 : if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm)
2808 : : return 1;
2809 : :
2810 : : /* Does this CPU have callbacks ready to invoke? */
2811 [ + + ]: 2043585 : if (rcu_segcblist_ready_cbs(&rdp->cblist))
2812 : : return 1;
2813 : :
2814 : : /* Has RCU gone idle with this CPU needing another grace period? */
2815 [ + + + + ]: 2178485 : if (!rcu_gp_in_progress() &&
2816 : : rcu_segcblist_is_enabled(&rdp->cblist) &&
2817 : : (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) ||
2818 [ + + ]: 156394 : !rcu_segcblist_is_offloaded(&rdp->cblist)) &&
2819 : : !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2820 : : return 1;
2821 : :
2822 : : /* Have RCU grace period completed or started? */
2823 [ + + - + ]: 3122607 : if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
2824 : 1163870 : unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
2825 : : return 1;
2826 : :
2827 : : /* nothing to do */
2828 : : return 0;
2829 : : }
2830 : :
2831 : : /*
2832 : : * Helper function for rcu_barrier() tracing. If tracing is disabled,
2833 : : * the compiler is expected to optimize this away.
2834 : : */
2835 : : static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
2836 : : {
2837 : : trace_rcu_barrier(rcu_state.name, s, cpu,
2838 : : atomic_read(&rcu_state.barrier_cpu_count), done);
2839 : : }
2840 : :
2841 : : /*
2842 : : * RCU callback function for rcu_barrier(). If we are last, wake
2843 : : * up the task executing rcu_barrier().
2844 : : */
2845 : 107 : static void rcu_barrier_callback(struct rcu_head *rhp)
2846 : : {
2847 [ + + ]: 109 : if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
2848 : : rcu_barrier_trace(TPS("LastCB"), -1,
2849 : : rcu_state.barrier_sequence);
2850 : 95 : complete(&rcu_state.barrier_completion);
2851 : : } else {
2852 : : rcu_barrier_trace(TPS("CB"), -1, rcu_state.barrier_sequence);
2853 : : }
2854 : 109 : }
2855 : :
2856 : : /*
2857 : : * Called with preemption disabled, and from cross-cpu IRQ context.
2858 : : */
2859 : 109 : static void rcu_barrier_func(void *unused)
2860 : : {
2861 : 218 : struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2862 : :
2863 : : rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
2864 : 109 : rdp->barrier_head.func = rcu_barrier_callback;
2865 : : debug_rcu_head_queue(&rdp->barrier_head);
2866 : : rcu_nocb_lock(rdp);
2867 : 109 : WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
2868 [ + - ]: 109 : if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
2869 : : atomic_inc(&rcu_state.barrier_cpu_count);
2870 : : } else {
2871 : : debug_rcu_head_unqueue(&rdp->barrier_head);
2872 : : rcu_barrier_trace(TPS("IRQNQ"), -1,
2873 : : rcu_state.barrier_sequence);
2874 : : }
2875 : : rcu_nocb_unlock(rdp);
2876 : 109 : }
2877 : :
2878 : : /**
2879 : : * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
2880 : : *
2881 : : * Note that this primitive does not necessarily wait for an RCU grace period
2882 : : * to complete. For example, if there are no RCU callbacks queued anywhere
2883 : : * in the system, then rcu_barrier() is within its rights to return
2884 : : * immediately, without waiting for anything, much less an RCU grace period.
2885 : : */
2886 : 207 : void rcu_barrier(void)
2887 : : {
2888 : : int cpu;
2889 : : struct rcu_data *rdp;
2890 : : unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
2891 : :
2892 : : rcu_barrier_trace(TPS("Begin"), -1, s);
2893 : :
2894 : : /* Take mutex to serialize concurrent rcu_barrier() requests. */
2895 : 207 : mutex_lock(&rcu_state.barrier_mutex);
2896 : :
2897 : : /* Did someone else do our work for us? */
2898 [ - + ]: 207 : if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
2899 : : rcu_barrier_trace(TPS("EarlyExit"), -1,
2900 : : rcu_state.barrier_sequence);
2901 : 0 : smp_mb(); /* caller's subsequent code after above check. */
2902 : 0 : mutex_unlock(&rcu_state.barrier_mutex);
2903 : 207 : return;
2904 : : }
2905 : :
2906 : : /* Mark the start of the barrier operation. */
2907 : 207 : rcu_seq_start(&rcu_state.barrier_sequence);
2908 : : rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
2909 : :
2910 : : /*
2911 : : * Initialize the count to one rather than to zero in order to
2912 : : * avoid a too-soon return to zero in case of a short grace period
2913 : : * (or preemption of this task). Exclude CPU-hotplug operations
2914 : : * to ensure that no offline CPU has callbacks queued.
2915 : : */
2916 : : init_completion(&rcu_state.barrier_completion);
2917 : : atomic_set(&rcu_state.barrier_cpu_count, 1);
2918 : : get_online_cpus();
2919 : :
2920 : : /*
2921 : : * Force each CPU with callbacks to register a new callback.
2922 : : * When that callback is invoked, we will know that all of the
2923 : : * corresponding CPU's preceding callbacks have been invoked.
2924 : : */
2925 [ + + ]: 1242 : for_each_possible_cpu(cpu) {
2926 : 828 : rdp = per_cpu_ptr(&rcu_data, cpu);
2927 [ - + # # ]: 828 : if (!cpu_online(cpu) &&
2928 : : !rcu_segcblist_is_offloaded(&rdp->cblist))
2929 : 0 : continue;
2930 [ + + ]: 828 : if (rcu_segcblist_n_cbs(&rdp->cblist)) {
2931 : : rcu_barrier_trace(TPS("OnlineQ"), cpu,
2932 : : rcu_state.barrier_sequence);
2933 : 109 : smp_call_function_single(cpu, rcu_barrier_func, NULL, 1);
2934 : : } else {
2935 : : rcu_barrier_trace(TPS("OnlineNQ"), cpu,
2936 : : rcu_state.barrier_sequence);
2937 : : }
2938 : : }
2939 : : put_online_cpus();
2940 : :
2941 : : /*
2942 : : * Now that we have an rcu_barrier_callback() callback on each
2943 : : * CPU, and thus each counted, remove the initial count.
2944 : : */
2945 [ + + ]: 207 : if (atomic_dec_and_test(&rcu_state.barrier_cpu_count))
2946 : 112 : complete(&rcu_state.barrier_completion);
2947 : :
2948 : : /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
2949 : 207 : wait_for_completion(&rcu_state.barrier_completion);
2950 : :
2951 : : /* Mark the end of the barrier operation. */
2952 : : rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
2953 : 207 : rcu_seq_end(&rcu_state.barrier_sequence);
2954 : :
2955 : : /* Other rcu_barrier() invocations can now safely proceed. */
2956 : 207 : mutex_unlock(&rcu_state.barrier_mutex);
2957 : : }
2958 : : EXPORT_SYMBOL_GPL(rcu_barrier);
2959 : :
2960 : : /*
2961 : : * Propagate ->qsinitmask bits up the rcu_node tree to account for the
2962 : : * first CPU in a given leaf rcu_node structure coming online. The caller
2963 : : * must hold the corresponding leaf rcu_node ->lock with interrrupts
2964 : : * disabled.
2965 : : */
2966 : 207 : static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
2967 : : {
2968 : : long mask;
2969 : : long oldmask;
2970 : : struct rcu_node *rnp = rnp_leaf;
2971 : :
2972 : : raw_lockdep_assert_held_rcu_node(rnp_leaf);
2973 [ - + # # ]: 207 : WARN_ON_ONCE(rnp->wait_blkd_tasks);
2974 : : for (;;) {
2975 : 207 : mask = rnp->grpmask;
2976 : 207 : rnp = rnp->parent;
2977 [ - + ]: 207 : if (rnp == NULL)
2978 : : return;
2979 : 0 : raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
2980 : 0 : oldmask = rnp->qsmaskinit;
2981 : 0 : rnp->qsmaskinit |= mask;
2982 : : raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
2983 [ # # ]: 0 : if (oldmask)
2984 : : return;
2985 : : }
2986 : : }
2987 : :
2988 : : /*
2989 : : * Do boot-time initialization of a CPU's per-CPU RCU data.
2990 : : */
2991 : : static void __init
2992 : 828 : rcu_boot_init_percpu_data(int cpu)
2993 : : {
2994 : 828 : struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2995 : :
2996 : : /* Set up local state, ensuring consistent view of global state. */
2997 : 828 : rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
2998 [ - + # # ]: 828 : WARN_ON_ONCE(rdp->dynticks_nesting != 1);
2999 [ - + # # ]: 1656 : WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
3000 : 828 : rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
3001 : 828 : rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
3002 : 828 : rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
3003 : 828 : rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
3004 : 828 : rdp->cpu = cpu;
3005 : : rcu_boot_init_nocb_percpu_data(rdp);
3006 : 828 : }
3007 : :
3008 : : /*
3009 : : * Invoked early in the CPU-online process, when pretty much all services
3010 : : * are available. The incoming CPU is not present.
3011 : : *
3012 : : * Initializes a CPU's per-CPU RCU data. Note that only one online or
3013 : : * offline event can be happening at a given time. Note also that we can
3014 : : * accept some slop in the rsp->gp_seq access due to the fact that this
3015 : : * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
3016 : : * And any offloaded callbacks are being numbered elsewhere.
3017 : : */
3018 : 828 : int rcutree_prepare_cpu(unsigned int cpu)
3019 : : {
3020 : : unsigned long flags;
3021 : 828 : struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3022 : : struct rcu_node *rnp = rcu_get_root();
3023 : :
3024 : : /* Set up local state, ensuring consistent view of global state. */
3025 : 828 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
3026 : 828 : rdp->qlen_last_fqs_check = 0;
3027 : 828 : rdp->n_force_qs_snap = rcu_state.n_force_qs;
3028 : 828 : rdp->blimit = blimit;
3029 [ + - + - ]: 1656 : if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
3030 : : !rcu_segcblist_is_offloaded(&rdp->cblist))
3031 : 828 : rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
3032 : 828 : rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */
3033 : 828 : rcu_dynticks_eqs_online();
3034 : : raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
3035 : :
3036 : : /*
3037 : : * Add CPU to leaf rcu_node pending-online bitmask. Any needed
3038 : : * propagation up the rcu_node tree will happen at the beginning
3039 : : * of the next grace period.
3040 : : */
3041 : 828 : rnp = rdp->mynode;
3042 : 828 : raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
3043 : 828 : rdp->beenonline = true; /* We have now been online. */
3044 : 828 : rdp->gp_seq = rnp->gp_seq;
3045 : 828 : rdp->gp_seq_needed = rnp->gp_seq;
3046 : 828 : rdp->cpu_no_qs.b.norm = true;
3047 : 828 : rdp->core_needs_qs = false;
3048 : 828 : rdp->rcu_iw_pending = false;
3049 : 828 : rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
3050 : : trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
3051 : 828 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3052 : : rcu_prepare_kthreads(cpu);
3053 : : rcu_spawn_cpu_nocb_kthread(cpu);
3054 : :
3055 : 828 : return 0;
3056 : : }
3057 : :
3058 : : /*
3059 : : * Update RCU priority boot kthread affinity for CPU-hotplug changes.
3060 : : */
3061 : : static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
3062 : : {
3063 : 621 : struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3064 : :
3065 : : rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
3066 : : }
3067 : :
3068 : : /*
3069 : : * Near the end of the CPU-online process. Pretty much all services
3070 : : * enabled, and the CPU is now very much alive.
3071 : : */
3072 : 828 : int rcutree_online_cpu(unsigned int cpu)
3073 : : {
3074 : : unsigned long flags;
3075 : : struct rcu_data *rdp;
3076 : : struct rcu_node *rnp;
3077 : :
3078 : 828 : rdp = per_cpu_ptr(&rcu_data, cpu);
3079 : 828 : rnp = rdp->mynode;
3080 : 828 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
3081 : 828 : rnp->ffmask |= rdp->grpmask;
3082 : 828 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3083 [ + + ]: 828 : if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
3084 : : return 0; /* Too early in boot for scheduler work. */
3085 : 621 : sync_sched_exp_online_cleanup(cpu);
3086 : : rcutree_affinity_setting(cpu, -1);
3087 : 621 : return 0;
3088 : : }
3089 : :
3090 : : /*
3091 : : * Near the beginning of the process. The CPU is still very much alive
3092 : : * with pretty much all services enabled.
3093 : : */
3094 : 0 : int rcutree_offline_cpu(unsigned int cpu)
3095 : : {
3096 : : unsigned long flags;
3097 : : struct rcu_data *rdp;
3098 : : struct rcu_node *rnp;
3099 : :
3100 : 0 : rdp = per_cpu_ptr(&rcu_data, cpu);
3101 : 0 : rnp = rdp->mynode;
3102 : 0 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
3103 : 0 : rnp->ffmask &= ~rdp->grpmask;
3104 : 0 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3105 : :
3106 : : rcutree_affinity_setting(cpu, cpu);
3107 : 0 : return 0;
3108 : : }
3109 : :
3110 : : static DEFINE_PER_CPU(int, rcu_cpu_started);
3111 : :
3112 : : /*
3113 : : * Mark the specified CPU as being online so that subsequent grace periods
3114 : : * (both expedited and normal) will wait on it. Note that this means that
3115 : : * incoming CPUs are not allowed to use RCU read-side critical sections
3116 : : * until this function is called. Failing to observe this restriction
3117 : : * will result in lockdep splats.
3118 : : *
3119 : : * Note that this function is special in that it is invoked directly
3120 : : * from the incoming CPU rather than from the cpuhp_step mechanism.
3121 : : * This is because this function must be invoked at a precise location.
3122 : : */
3123 : 828 : void rcu_cpu_starting(unsigned int cpu)
3124 : : {
3125 : : unsigned long flags;
3126 : : unsigned long mask;
3127 : : int nbits;
3128 : : unsigned long oldmask;
3129 : : struct rcu_data *rdp;
3130 : : struct rcu_node *rnp;
3131 : :
3132 [ + - ]: 828 : if (per_cpu(rcu_cpu_started, cpu))
3133 : 828 : return;
3134 : :
3135 : 828 : per_cpu(rcu_cpu_started, cpu) = 1;
3136 : :
3137 : 828 : rdp = per_cpu_ptr(&rcu_data, cpu);
3138 : 828 : rnp = rdp->mynode;
3139 : 828 : mask = rdp->grpmask;
3140 : 828 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
3141 : 828 : rnp->qsmaskinitnext |= mask;
3142 : 828 : oldmask = rnp->expmaskinitnext;
3143 : 828 : rnp->expmaskinitnext |= mask;
3144 : 828 : oldmask ^= rnp->expmaskinitnext;
3145 : : nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
3146 : : /* Allow lockless access for expedited grace periods. */
3147 : 828 : smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + nbits); /* ^^^ */
3148 : : rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
3149 : 828 : rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
3150 : 828 : rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
3151 [ - + ]: 828 : if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
3152 : : /* Report QS -after- changing ->qsmaskinitnext! */
3153 : 0 : rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
3154 : : } else {
3155 : 828 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3156 : : }
3157 : 828 : smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
3158 : : }
3159 : :
3160 : : #ifdef CONFIG_HOTPLUG_CPU
3161 : : /*
3162 : : * The outgoing function has no further need of RCU, so remove it from
3163 : : * the rcu_node tree's ->qsmaskinitnext bit masks.
3164 : : *
3165 : : * Note that this function is special in that it is invoked directly
3166 : : * from the outgoing CPU rather than from the cpuhp_step mechanism.
3167 : : * This is because this function must be invoked at a precise location.
3168 : : */
3169 : : void rcu_report_dead(unsigned int cpu)
3170 : : {
3171 : : unsigned long flags;
3172 : : unsigned long mask;
3173 : : struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3174 : : struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
3175 : :
3176 : : /* QS for any half-done expedited grace period. */
3177 : : preempt_disable();
3178 : : rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
3179 : : preempt_enable();
3180 : : rcu_preempt_deferred_qs(current);
3181 : :
3182 : : /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
3183 : : mask = rdp->grpmask;
3184 : : raw_spin_lock(&rcu_state.ofl_lock);
3185 : : raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
3186 : : rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
3187 : : rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
3188 : : if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
3189 : : /* Report quiescent state -before- changing ->qsmaskinitnext! */
3190 : : rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
3191 : : raw_spin_lock_irqsave_rcu_node(rnp, flags);
3192 : : }
3193 : : rnp->qsmaskinitnext &= ~mask;
3194 : : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3195 : : raw_spin_unlock(&rcu_state.ofl_lock);
3196 : :
3197 : : per_cpu(rcu_cpu_started, cpu) = 0;
3198 : : }
3199 : :
3200 : : /*
3201 : : * The outgoing CPU has just passed through the dying-idle state, and we
3202 : : * are being invoked from the CPU that was IPIed to continue the offline
3203 : : * operation. Migrate the outgoing CPU's callbacks to the current CPU.
3204 : : */
3205 : : void rcutree_migrate_callbacks(int cpu)
3206 : : {
3207 : : unsigned long flags;
3208 : : struct rcu_data *my_rdp;
3209 : : struct rcu_node *my_rnp;
3210 : : struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3211 : : bool needwake;
3212 : :
3213 : : if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
3214 : : rcu_segcblist_empty(&rdp->cblist))
3215 : : return; /* No callbacks to migrate. */
3216 : :
3217 : : local_irq_save(flags);
3218 : : my_rdp = this_cpu_ptr(&rcu_data);
3219 : : my_rnp = my_rdp->mynode;
3220 : : rcu_nocb_lock(my_rdp); /* irqs already disabled. */
3221 : : WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
3222 : : raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
3223 : : /* Leverage recent GPs and set GP for new callbacks. */
3224 : : needwake = rcu_advance_cbs(my_rnp, rdp) ||
3225 : : rcu_advance_cbs(my_rnp, my_rdp);
3226 : : rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
3227 : : needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
3228 : : rcu_segcblist_disable(&rdp->cblist);
3229 : : WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
3230 : : !rcu_segcblist_n_cbs(&my_rdp->cblist));
3231 : : if (rcu_segcblist_is_offloaded(&my_rdp->cblist)) {
3232 : : raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
3233 : : __call_rcu_nocb_wake(my_rdp, true, flags);
3234 : : } else {
3235 : : rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
3236 : : raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
3237 : : }
3238 : : if (needwake)
3239 : : rcu_gp_kthread_wake();
3240 : : lockdep_assert_irqs_enabled();
3241 : : WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
3242 : : !rcu_segcblist_empty(&rdp->cblist),
3243 : : "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
3244 : : cpu, rcu_segcblist_n_cbs(&rdp->cblist),
3245 : : rcu_segcblist_first_cb(&rdp->cblist));
3246 : : }
3247 : : #endif
3248 : :
3249 : : /*
3250 : : * On non-huge systems, use expedited RCU grace periods to make suspend
3251 : : * and hibernation run faster.
3252 : : */
3253 : : static int rcu_pm_notify(struct notifier_block *self,
3254 : : unsigned long action, void *hcpu)
3255 : : {
3256 : : switch (action) {
3257 : : case PM_HIBERNATION_PREPARE:
3258 : : case PM_SUSPEND_PREPARE:
3259 : : rcu_expedite_gp();
3260 : : break;
3261 : : case PM_POST_HIBERNATION:
3262 : : case PM_POST_SUSPEND:
3263 : : rcu_unexpedite_gp();
3264 : : break;
3265 : : default:
3266 : : break;
3267 : : }
3268 : : return NOTIFY_OK;
3269 : : }
3270 : :
3271 : : /*
3272 : : * Spawn the kthreads that handle RCU's grace periods.
3273 : : */
3274 : 207 : static int __init rcu_spawn_gp_kthread(void)
3275 : : {
3276 : : unsigned long flags;
3277 : 207 : int kthread_prio_in = kthread_prio;
3278 : : struct rcu_node *rnp;
3279 : : struct sched_param sp;
3280 : : struct task_struct *t;
3281 : :
3282 : : /* Force priority into range. */
3283 : : if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
3284 : : && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
3285 : : kthread_prio = 2;
3286 : : else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3287 : : kthread_prio = 1;
3288 [ - + ]: 207 : else if (kthread_prio < 0)
3289 : 0 : kthread_prio = 0;
3290 [ - + ]: 207 : else if (kthread_prio > 99)
3291 : 0 : kthread_prio = 99;
3292 : :
3293 [ - + ]: 207 : if (kthread_prio != kthread_prio_in)
3294 : 0 : pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3295 : : kthread_prio, kthread_prio_in);
3296 : :
3297 : 207 : rcu_scheduler_fully_active = 1;
3298 : 207 : t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
3299 [ - + # # : 207 : if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
+ - ]
3300 : : return 0;
3301 [ - + ]: 207 : if (kthread_prio) {
3302 : 0 : sp.sched_priority = kthread_prio;
3303 : 0 : sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3304 : : }
3305 : : rnp = rcu_get_root();
3306 : 207 : raw_spin_lock_irqsave_rcu_node(rnp, flags);
3307 : 207 : rcu_state.gp_kthread = t;
3308 : 207 : raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3309 : 207 : wake_up_process(t);
3310 : : rcu_spawn_nocb_kthreads();
3311 : : rcu_spawn_boost_kthreads();
3312 : 207 : return 0;
3313 : : }
3314 : : early_initcall(rcu_spawn_gp_kthread);
3315 : :
3316 : : /*
3317 : : * This function is invoked towards the end of the scheduler's
3318 : : * initialization process. Before this is called, the idle task might
3319 : : * contain synchronous grace-period primitives (during which time, this idle
3320 : : * task is booting the system, and such primitives are no-ops). After this
3321 : : * function is called, any synchronous grace-period primitives are run as
3322 : : * expedited, with the requesting task driving the grace period forward.
3323 : : * A later core_initcall() rcu_set_runtime_mode() will switch to full
3324 : : * runtime RCU functionality.
3325 : : */
3326 : 207 : void rcu_scheduler_starting(void)
3327 : : {
3328 [ - + ]: 207 : WARN_ON(num_online_cpus() != 1);
3329 [ - + ]: 207 : WARN_ON(nr_context_switches() > 0);
3330 : 207 : rcu_test_sync_prims();
3331 : 207 : rcu_scheduler_active = RCU_SCHEDULER_INIT;
3332 : 207 : rcu_test_sync_prims();
3333 : 207 : }
3334 : :
3335 : : /*
3336 : : * Helper function for rcu_init() that initializes the rcu_state structure.
3337 : : */
3338 : 207 : static void __init rcu_init_one(void)
3339 : : {
3340 : : static const char * const buf[] = RCU_NODE_NAME_INIT;
3341 : : static const char * const fqs[] = RCU_FQS_NAME_INIT;
3342 : : static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
3343 : : static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
3344 : :
3345 : : int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
3346 : : int cpustride = 1;
3347 : : int i;
3348 : : int j;
3349 : : struct rcu_node *rnp;
3350 : :
3351 : : BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
3352 : :
3353 : : /* Silence gcc 4.8 false positive about array index out of range. */
3354 [ + - ]: 207 : if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
3355 : 0 : panic("rcu_init_one: rcu_num_lvls out of range");
3356 : :
3357 : : /* Initialize the level-tracking arrays. */
3358 : :
3359 [ - + ]: 0 : for (i = 1; i < rcu_num_lvls; i++)
3360 : 0 : rcu_state.level[i] =
3361 : 0 : rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
3362 : 207 : rcu_init_levelspread(levelspread, num_rcu_lvl);
3363 : :
3364 : : /* Initialize the elements themselves, starting from the leaves. */
3365 : :
3366 [ + + ]: 414 : for (i = rcu_num_lvls - 1; i >= 0; i--) {
3367 : 207 : cpustride *= levelspread[i];
3368 : 207 : rnp = rcu_state.level[i];
3369 [ + + ]: 414 : for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
3370 : 207 : raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
3371 : : lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
3372 : : &rcu_node_class[i], buf[i]);
3373 : 207 : raw_spin_lock_init(&rnp->fqslock);
3374 : : lockdep_set_class_and_name(&rnp->fqslock,
3375 : : &rcu_fqs_class[i], fqs[i]);
3376 : 207 : rnp->gp_seq = rcu_state.gp_seq;
3377 : 207 : rnp->gp_seq_needed = rcu_state.gp_seq;
3378 : 207 : rnp->completedqs = rcu_state.gp_seq;
3379 : 207 : rnp->qsmask = 0;
3380 : 207 : rnp->qsmaskinit = 0;
3381 : 207 : rnp->grplo = j * cpustride;
3382 : 207 : rnp->grphi = (j + 1) * cpustride - 1;
3383 [ - + ]: 207 : if (rnp->grphi >= nr_cpu_ids)
3384 : 0 : rnp->grphi = nr_cpu_ids - 1;
3385 [ + - ]: 207 : if (i == 0) {
3386 : 207 : rnp->grpnum = 0;
3387 : 207 : rnp->grpmask = 0;
3388 : 207 : rnp->parent = NULL;
3389 : : } else {
3390 : 0 : rnp->grpnum = j % levelspread[i - 1];
3391 : 0 : rnp->grpmask = BIT(rnp->grpnum);
3392 : 0 : rnp->parent = rcu_state.level[i - 1] +
3393 : 0 : j / levelspread[i - 1];
3394 : : }
3395 : 207 : rnp->level = i;
3396 : 207 : INIT_LIST_HEAD(&rnp->blkd_tasks);
3397 : : rcu_init_one_nocb(rnp);
3398 : 207 : init_waitqueue_head(&rnp->exp_wq[0]);
3399 : 207 : init_waitqueue_head(&rnp->exp_wq[1]);
3400 : 207 : init_waitqueue_head(&rnp->exp_wq[2]);
3401 : 207 : init_waitqueue_head(&rnp->exp_wq[3]);
3402 : 207 : spin_lock_init(&rnp->exp_lock);
3403 : : }
3404 : : }
3405 : :
3406 : 207 : init_swait_queue_head(&rcu_state.gp_wq);
3407 : 207 : init_swait_queue_head(&rcu_state.expedited_wq);
3408 : 207 : rnp = rcu_first_leaf_node();
3409 [ + + ]: 1242 : for_each_possible_cpu(i) {
3410 [ - + ]: 828 : while (i > rnp->grphi)
3411 : 0 : rnp++;
3412 : 828 : per_cpu_ptr(&rcu_data, i)->mynode = rnp;
3413 : 828 : rcu_boot_init_percpu_data(i);
3414 : : }
3415 : 207 : }
3416 : :
3417 : : /*
3418 : : * Compute the rcu_node tree geometry from kernel parameters. This cannot
3419 : : * replace the definitions in tree.h because those are needed to size
3420 : : * the ->node array in the rcu_state structure.
3421 : : */
3422 : 207 : static void __init rcu_init_geometry(void)
3423 : : {
3424 : : ulong d;
3425 : : int i;
3426 : : int rcu_capacity[RCU_NUM_LVLS];
3427 : :
3428 : : /*
3429 : : * Initialize any unspecified boot parameters.
3430 : : * The default values of jiffies_till_first_fqs and
3431 : : * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
3432 : : * value, which is a function of HZ, then adding one for each
3433 : : * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
3434 : : */
3435 : 207 : d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
3436 [ + - ]: 207 : if (jiffies_till_first_fqs == ULONG_MAX)
3437 : 207 : jiffies_till_first_fqs = d;
3438 [ + - ]: 207 : if (jiffies_till_next_fqs == ULONG_MAX)
3439 : 207 : jiffies_till_next_fqs = d;
3440 : 207 : adjust_jiffies_till_sched_qs();
3441 : :
3442 : : /* If the compile-time values are accurate, just leave. */
3443 [ + - - + ]: 414 : if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
3444 : 207 : nr_cpu_ids == NR_CPUS)
3445 : : return;
3446 : 0 : pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
3447 : : rcu_fanout_leaf, nr_cpu_ids);
3448 : :
3449 : : /*
3450 : : * The boot-time rcu_fanout_leaf parameter must be at least two
3451 : : * and cannot exceed the number of bits in the rcu_node masks.
3452 : : * Complain and fall back to the compile-time values if this
3453 : : * limit is exceeded.
3454 : : */
3455 [ # # ]: 0 : if (rcu_fanout_leaf < 2 ||
3456 : : rcu_fanout_leaf > sizeof(unsigned long) * 8) {
3457 : 0 : rcu_fanout_leaf = RCU_FANOUT_LEAF;
3458 : 0 : WARN_ON(1);
3459 : 0 : return;
3460 : : }
3461 : :
3462 : : /*
3463 : : * Compute number of nodes that can be handled an rcu_node tree
3464 : : * with the given number of levels.
3465 : : */
3466 : : rcu_capacity[0] = rcu_fanout_leaf;
3467 : : for (i = 1; i < RCU_NUM_LVLS; i++)
3468 : : rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
3469 : :
3470 : : /*
3471 : : * The tree must be able to accommodate the configured number of CPUs.
3472 : : * If this limit is exceeded, fall back to the compile-time values.
3473 : : */
3474 [ # # ]: 0 : if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
3475 : 0 : rcu_fanout_leaf = RCU_FANOUT_LEAF;
3476 : 0 : WARN_ON(1);
3477 : 0 : return;
3478 : : }
3479 : :
3480 : : /* Calculate the number of levels in the tree. */
3481 [ # # ]: 0 : for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
3482 : : }
3483 : 0 : rcu_num_lvls = i + 1;
3484 : :
3485 : : /* Calculate the number of rcu_nodes at each level of the tree. */
3486 [ # # ]: 0 : for (i = 0; i < rcu_num_lvls; i++) {
3487 : : int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
3488 : 0 : num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
3489 : : }
3490 : :
3491 : : /* Calculate the total number of rcu_node structures. */
3492 : 0 : rcu_num_nodes = 0;
3493 [ # # ]: 0 : for (i = 0; i < rcu_num_lvls; i++)
3494 : 0 : rcu_num_nodes += num_rcu_lvl[i];
3495 : : }
3496 : :
3497 : : /*
3498 : : * Dump out the structure of the rcu_node combining tree associated
3499 : : * with the rcu_state structure.
3500 : : */
3501 : 0 : static void __init rcu_dump_rcu_node_tree(void)
3502 : : {
3503 : : int level = 0;
3504 : : struct rcu_node *rnp;
3505 : :
3506 : 0 : pr_info("rcu_node tree layout dump\n");
3507 : 0 : pr_info(" ");
3508 [ # # ]: 0 : rcu_for_each_node_breadth_first(rnp) {
3509 [ # # ]: 0 : if (rnp->level != level) {
3510 : 0 : pr_cont("\n");
3511 : 0 : pr_info(" ");
3512 : 0 : level = rnp->level;
3513 : : }
3514 : 0 : pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
3515 : : }
3516 : 0 : pr_cont("\n");
3517 : 0 : }
3518 : :
3519 : : struct workqueue_struct *rcu_gp_wq;
3520 : : struct workqueue_struct *rcu_par_gp_wq;
3521 : :
3522 : 207 : void __init rcu_init(void)
3523 : : {
3524 : : int cpu;
3525 : :
3526 : 207 : rcu_early_boot_tests();
3527 : :
3528 : 207 : rcu_bootup_announce();
3529 : 207 : rcu_init_geometry();
3530 : 207 : rcu_init_one();
3531 [ - + ]: 207 : if (dump_tree)
3532 : 0 : rcu_dump_rcu_node_tree();
3533 [ + - ]: 207 : if (use_softirq)
3534 : 207 : open_softirq(RCU_SOFTIRQ, rcu_core_si);
3535 : :
3536 : : /*
3537 : : * We don't need protection against CPU-hotplug here because
3538 : : * this is called early in boot, before either interrupts
3539 : : * or the scheduler are operational.
3540 : : */
3541 : : pm_notifier(rcu_pm_notify, 0);
3542 [ + + ]: 414 : for_each_online_cpu(cpu) {
3543 : 207 : rcutree_prepare_cpu(cpu);
3544 : 207 : rcu_cpu_starting(cpu);
3545 : 207 : rcutree_online_cpu(cpu);
3546 : : }
3547 : :
3548 : : /* Create workqueue for expedited GPs and for Tree SRCU. */
3549 : 207 : rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
3550 [ - + ]: 207 : WARN_ON(!rcu_gp_wq);
3551 : 207 : rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
3552 [ - + ]: 207 : WARN_ON(!rcu_par_gp_wq);
3553 : 207 : srcu_init();
3554 : 207 : }
3555 : :
3556 : : #include "tree_stall.h"
3557 : : #include "tree_exp.h"
3558 : : #include "tree_plugin.h"
|