LCOV - code coverage report
Current view: top level - kernel/rcu - tree_plugin.h (source / functions) Hit Total Coverage
Test: combined.info Lines: 110 154 71.4 %
Date: 2022-03-28 13:20:08 Functions: 8 9 88.9 %
Branches: 33 66 50.0 %

           Branch data     Line data    Source code
       1                 :            : /* SPDX-License-Identifier: GPL-2.0+ */
       2                 :            : /*
       3                 :            :  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
       4                 :            :  * Internal non-public definitions that provide either classic
       5                 :            :  * or preemptible semantics.
       6                 :            :  *
       7                 :            :  * Copyright Red Hat, 2009
       8                 :            :  * Copyright IBM Corporation, 2009
       9                 :            :  *
      10                 :            :  * Author: Ingo Molnar <mingo@elte.hu>
      11                 :            :  *         Paul E. McKenney <paulmck@linux.ibm.com>
      12                 :            :  */
      13                 :            : 
      14                 :            : #include "../locking/rtmutex_common.h"
      15                 :            : 
      16                 :            : #ifdef CONFIG_RCU_NOCB_CPU
      17                 :            : static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
      18                 :            : static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
      19                 :            : #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
      20                 :            : 
      21                 :            : /*
      22                 :            :  * Check the RCU kernel configuration parameters and print informative
      23                 :            :  * messages about anything out of the ordinary.
      24                 :            :  */
      25                 :         30 : static void __init rcu_bootup_announce_oddness(void)
      26                 :            : {
      27                 :         30 :         if (IS_ENABLED(CONFIG_RCU_TRACE))
      28                 :         30 :                 pr_info("\tRCU event tracing is enabled.\n");
      29                 :         30 :         if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
      30                 :            :             (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
      31                 :            :                 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
      32                 :            :                         RCU_FANOUT);
      33         [ -  + ]:         30 :         if (rcu_fanout_exact)
      34                 :          0 :                 pr_info("\tHierarchical RCU autobalancing is disabled.\n");
      35                 :         30 :         if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
      36                 :            :                 pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
      37                 :         30 :         if (IS_ENABLED(CONFIG_PROVE_RCU))
      38                 :            :                 pr_info("\tRCU lockdep checking is enabled.\n");
      39                 :         30 :         if (RCU_NUM_LVLS >= 4)
      40                 :            :                 pr_info("\tFour(or more)-level hierarchy is enabled.\n");
      41                 :         30 :         if (RCU_FANOUT_LEAF != 16)
      42                 :            :                 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
      43                 :            :                         RCU_FANOUT_LEAF);
      44         [ -  + ]:         30 :         if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
      45                 :          0 :                 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
      46                 :            :                         rcu_fanout_leaf);
      47         [ +  - ]:         30 :         if (nr_cpu_ids != NR_CPUS)
      48                 :         30 :                 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
      49                 :            : #ifdef CONFIG_RCU_BOOST
      50                 :            :         pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
      51                 :            :                 kthread_prio, CONFIG_RCU_BOOST_DELAY);
      52                 :            : #endif
      53         [ -  + ]:         30 :         if (blimit != DEFAULT_RCU_BLIMIT)
      54                 :          0 :                 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
      55         [ -  + ]:         30 :         if (qhimark != DEFAULT_RCU_QHIMARK)
      56                 :          0 :                 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark);
      57         [ -  + ]:         30 :         if (qlowmark != DEFAULT_RCU_QLOMARK)
      58                 :          0 :                 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark);
      59         [ -  + ]:         30 :         if (jiffies_till_first_fqs != ULONG_MAX)
      60                 :          0 :                 pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs);
      61         [ -  + ]:         30 :         if (jiffies_till_next_fqs != ULONG_MAX)
      62                 :          0 :                 pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs);
      63         [ -  + ]:         30 :         if (jiffies_till_sched_qs != ULONG_MAX)
      64                 :          0 :                 pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs);
      65         [ -  + ]:         30 :         if (rcu_kick_kthreads)
      66                 :          0 :                 pr_info("\tKick kthreads if too-long grace period.\n");
      67                 :         30 :         if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
      68                 :            :                 pr_info("\tRCU callback double-/use-after-free debug enabled.\n");
      69         [ -  + ]:         30 :         if (gp_preinit_delay)
      70                 :          0 :                 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay);
      71         [ -  + ]:         30 :         if (gp_init_delay)
      72                 :          0 :                 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
      73         [ -  + ]:         30 :         if (gp_cleanup_delay)
      74                 :          0 :                 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay);
      75         [ -  + ]:         30 :         if (!use_softirq)
      76                 :          0 :                 pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n");
      77                 :         30 :         if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
      78                 :            :                 pr_info("\tRCU debug extended QS entry/exit.\n");
      79                 :         30 :         rcupdate_announce_bootup_oddness();
      80                 :         30 : }
      81                 :            : 
      82                 :            : #ifdef CONFIG_PREEMPT_RCU
      83                 :            : 
      84                 :            : static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
      85                 :            : static void rcu_read_unlock_special(struct task_struct *t);
      86                 :            : 
      87                 :            : /*
      88                 :            :  * Tell them what RCU they are running.
      89                 :            :  */
      90                 :            : static void __init rcu_bootup_announce(void)
      91                 :            : {
      92                 :            :         pr_info("Preemptible hierarchical RCU implementation.\n");
      93                 :            :         rcu_bootup_announce_oddness();
      94                 :            : }
      95                 :            : 
      96                 :            : /* Flags for rcu_preempt_ctxt_queue() decision table. */
      97                 :            : #define RCU_GP_TASKS    0x8
      98                 :            : #define RCU_EXP_TASKS   0x4
      99                 :            : #define RCU_GP_BLKD     0x2
     100                 :            : #define RCU_EXP_BLKD    0x1
     101                 :            : 
     102                 :            : /*
     103                 :            :  * Queues a task preempted within an RCU-preempt read-side critical
     104                 :            :  * section into the appropriate location within the ->blkd_tasks list,
     105                 :            :  * depending on the states of any ongoing normal and expedited grace
     106                 :            :  * periods.  The ->gp_tasks pointer indicates which element the normal
     107                 :            :  * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
     108                 :            :  * indicates which element the expedited grace period is waiting on (again,
     109                 :            :  * NULL if none).  If a grace period is waiting on a given element in the
     110                 :            :  * ->blkd_tasks list, it also waits on all subsequent elements.  Thus,
     111                 :            :  * adding a task to the tail of the list blocks any grace period that is
     112                 :            :  * already waiting on one of the elements.  In contrast, adding a task
     113                 :            :  * to the head of the list won't block any grace period that is already
     114                 :            :  * waiting on one of the elements.
     115                 :            :  *
     116                 :            :  * This queuing is imprecise, and can sometimes make an ongoing grace
     117                 :            :  * period wait for a task that is not strictly speaking blocking it.
     118                 :            :  * Given the choice, we needlessly block a normal grace period rather than
     119                 :            :  * blocking an expedited grace period.
     120                 :            :  *
     121                 :            :  * Note that an endless sequence of expedited grace periods still cannot
     122                 :            :  * indefinitely postpone a normal grace period.  Eventually, all of the
     123                 :            :  * fixed number of preempted tasks blocking the normal grace period that are
     124                 :            :  * not also blocking the expedited grace period will resume and complete
     125                 :            :  * their RCU read-side critical sections.  At that point, the ->gp_tasks
     126                 :            :  * pointer will equal the ->exp_tasks pointer, at which point the end of
     127                 :            :  * the corresponding expedited grace period will also be the end of the
     128                 :            :  * normal grace period.
     129                 :            :  */
     130                 :            : static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
     131                 :            :         __releases(rnp->lock) /* But leaves rrupts disabled. */
     132                 :            : {
     133                 :            :         int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
     134                 :            :                          (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
     135                 :            :                          (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
     136                 :            :                          (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
     137                 :            :         struct task_struct *t = current;
     138                 :            : 
     139                 :            :         raw_lockdep_assert_held_rcu_node(rnp);
     140                 :            :         WARN_ON_ONCE(rdp->mynode != rnp);
     141                 :            :         WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
     142                 :            :         /* RCU better not be waiting on newly onlined CPUs! */
     143                 :            :         WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
     144                 :            :                      rdp->grpmask);
     145                 :            : 
     146                 :            :         /*
     147                 :            :          * Decide where to queue the newly blocked task.  In theory,
     148                 :            :          * this could be an if-statement.  In practice, when I tried
     149                 :            :          * that, it was quite messy.
     150                 :            :          */
     151                 :            :         switch (blkd_state) {
     152                 :            :         case 0:
     153                 :            :         case                RCU_EXP_TASKS:
     154                 :            :         case                RCU_EXP_TASKS + RCU_GP_BLKD:
     155                 :            :         case RCU_GP_TASKS:
     156                 :            :         case RCU_GP_TASKS + RCU_EXP_TASKS:
     157                 :            : 
     158                 :            :                 /*
     159                 :            :                  * Blocking neither GP, or first task blocking the normal
     160                 :            :                  * GP but not blocking the already-waiting expedited GP.
     161                 :            :                  * Queue at the head of the list to avoid unnecessarily
     162                 :            :                  * blocking the already-waiting GPs.
     163                 :            :                  */
     164                 :            :                 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
     165                 :            :                 break;
     166                 :            : 
     167                 :            :         case                                              RCU_EXP_BLKD:
     168                 :            :         case                                RCU_GP_BLKD:
     169                 :            :         case                                RCU_GP_BLKD + RCU_EXP_BLKD:
     170                 :            :         case RCU_GP_TASKS +                               RCU_EXP_BLKD:
     171                 :            :         case RCU_GP_TASKS +                 RCU_GP_BLKD + RCU_EXP_BLKD:
     172                 :            :         case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
     173                 :            : 
     174                 :            :                 /*
     175                 :            :                  * First task arriving that blocks either GP, or first task
     176                 :            :                  * arriving that blocks the expedited GP (with the normal
     177                 :            :                  * GP already waiting), or a task arriving that blocks
     178                 :            :                  * both GPs with both GPs already waiting.  Queue at the
     179                 :            :                  * tail of the list to avoid any GP waiting on any of the
     180                 :            :                  * already queued tasks that are not blocking it.
     181                 :            :                  */
     182                 :            :                 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
     183                 :            :                 break;
     184                 :            : 
     185                 :            :         case                RCU_EXP_TASKS +               RCU_EXP_BLKD:
     186                 :            :         case                RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
     187                 :            :         case RCU_GP_TASKS + RCU_EXP_TASKS +               RCU_EXP_BLKD:
     188                 :            : 
     189                 :            :                 /*
     190                 :            :                  * Second or subsequent task blocking the expedited GP.
     191                 :            :                  * The task either does not block the normal GP, or is the
     192                 :            :                  * first task blocking the normal GP.  Queue just after
     193                 :            :                  * the first task blocking the expedited GP.
     194                 :            :                  */
     195                 :            :                 list_add(&t->rcu_node_entry, rnp->exp_tasks);
     196                 :            :                 break;
     197                 :            : 
     198                 :            :         case RCU_GP_TASKS +                 RCU_GP_BLKD:
     199                 :            :         case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
     200                 :            : 
     201                 :            :                 /*
     202                 :            :                  * Second or subsequent task blocking the normal GP.
     203                 :            :                  * The task does not block the expedited GP. Queue just
     204                 :            :                  * after the first task blocking the normal GP.
     205                 :            :                  */
     206                 :            :                 list_add(&t->rcu_node_entry, rnp->gp_tasks);
     207                 :            :                 break;
     208                 :            : 
     209                 :            :         default:
     210                 :            : 
     211                 :            :                 /* Yet another exercise in excessive paranoia. */
     212                 :            :                 WARN_ON_ONCE(1);
     213                 :            :                 break;
     214                 :            :         }
     215                 :            : 
     216                 :            :         /*
     217                 :            :          * We have now queued the task.  If it was the first one to
     218                 :            :          * block either grace period, update the ->gp_tasks and/or
     219                 :            :          * ->exp_tasks pointers, respectively, to reference the newly
     220                 :            :          * blocked tasks.
     221                 :            :          */
     222                 :            :         if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
     223                 :            :                 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
     224                 :            :                 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
     225                 :            :         }
     226                 :            :         if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
     227                 :            :                 rnp->exp_tasks = &t->rcu_node_entry;
     228                 :            :         WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
     229                 :            :                      !(rnp->qsmask & rdp->grpmask));
     230                 :            :         WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
     231                 :            :                      !(rnp->expmask & rdp->grpmask));
     232                 :            :         raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
     233                 :            : 
     234                 :            :         /*
     235                 :            :          * Report the quiescent state for the expedited GP.  This expedited
     236                 :            :          * GP should not be able to end until we report, so there should be
     237                 :            :          * no need to check for a subsequent expedited GP.  (Though we are
     238                 :            :          * still in a quiescent state in any case.)
     239                 :            :          */
     240                 :            :         if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs)
     241                 :            :                 rcu_report_exp_rdp(rdp);
     242                 :            :         else
     243                 :            :                 WARN_ON_ONCE(rdp->exp_deferred_qs);
     244                 :            : }
     245                 :            : 
     246                 :            : /*
     247                 :            :  * Record a preemptible-RCU quiescent state for the specified CPU.
     248                 :            :  * Note that this does not necessarily mean that the task currently running
     249                 :            :  * on the CPU is in a quiescent state:  Instead, it means that the current
     250                 :            :  * grace period need not wait on any RCU read-side critical section that
     251                 :            :  * starts later on this CPU.  It also means that if the current task is
     252                 :            :  * in an RCU read-side critical section, it has already added itself to
     253                 :            :  * some leaf rcu_node structure's ->blkd_tasks list.  In addition to the
     254                 :            :  * current task, there might be any number of other tasks blocked while
     255                 :            :  * in an RCU read-side critical section.
     256                 :            :  *
     257                 :            :  * Callers to this function must disable preemption.
     258                 :            :  */
     259                 :            : static void rcu_qs(void)
     260                 :            : {
     261                 :            :         RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n");
     262                 :            :         if (__this_cpu_read(rcu_data.cpu_no_qs.s)) {
     263                 :            :                 trace_rcu_grace_period(TPS("rcu_preempt"),
     264                 :            :                                        __this_cpu_read(rcu_data.gp_seq),
     265                 :            :                                        TPS("cpuqs"));
     266                 :            :                 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
     267                 :            :                 barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
     268                 :            :                 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false);
     269                 :            :         }
     270                 :            : }
     271                 :            : 
     272                 :            : /*
     273                 :            :  * We have entered the scheduler, and the current task might soon be
     274                 :            :  * context-switched away from.  If this task is in an RCU read-side
     275                 :            :  * critical section, we will no longer be able to rely on the CPU to
     276                 :            :  * record that fact, so we enqueue the task on the blkd_tasks list.
     277                 :            :  * The task will dequeue itself when it exits the outermost enclosing
     278                 :            :  * RCU read-side critical section.  Therefore, the current grace period
     279                 :            :  * cannot be permitted to complete until the blkd_tasks list entries
     280                 :            :  * predating the current grace period drain, in other words, until
     281                 :            :  * rnp->gp_tasks becomes NULL.
     282                 :            :  *
     283                 :            :  * Caller must disable interrupts.
     284                 :            :  */
     285                 :            : void rcu_note_context_switch(bool preempt)
     286                 :            : {
     287                 :            :         struct task_struct *t = current;
     288                 :            :         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
     289                 :            :         struct rcu_node *rnp;
     290                 :            : 
     291                 :            :         trace_rcu_utilization(TPS("Start context switch"));
     292                 :            :         lockdep_assert_irqs_disabled();
     293                 :            :         WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0);
     294                 :            :         if (rcu_preempt_depth() > 0 &&
     295                 :            :             !t->rcu_read_unlock_special.b.blocked) {
     296                 :            : 
     297                 :            :                 /* Possibly blocking in an RCU read-side critical section. */
     298                 :            :                 rnp = rdp->mynode;
     299                 :            :                 raw_spin_lock_rcu_node(rnp);
     300                 :            :                 t->rcu_read_unlock_special.b.blocked = true;
     301                 :            :                 t->rcu_blocked_node = rnp;
     302                 :            : 
     303                 :            :                 /*
     304                 :            :                  * Verify the CPU's sanity, trace the preemption, and
     305                 :            :                  * then queue the task as required based on the states
     306                 :            :                  * of any ongoing and expedited grace periods.
     307                 :            :                  */
     308                 :            :                 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
     309                 :            :                 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
     310                 :            :                 trace_rcu_preempt_task(rcu_state.name,
     311                 :            :                                        t->pid,
     312                 :            :                                        (rnp->qsmask & rdp->grpmask)
     313                 :            :                                        ? rnp->gp_seq
     314                 :            :                                        : rcu_seq_snap(&rnp->gp_seq));
     315                 :            :                 rcu_preempt_ctxt_queue(rnp, rdp);
     316                 :            :         } else {
     317                 :            :                 rcu_preempt_deferred_qs(t);
     318                 :            :         }
     319                 :            : 
     320                 :            :         /*
     321                 :            :          * Either we were not in an RCU read-side critical section to
     322                 :            :          * begin with, or we have now recorded that critical section
     323                 :            :          * globally.  Either way, we can now note a quiescent state
     324                 :            :          * for this CPU.  Again, if we were in an RCU read-side critical
     325                 :            :          * section, and if that critical section was blocking the current
     326                 :            :          * grace period, then the fact that the task has been enqueued
     327                 :            :          * means that we continue to block the current grace period.
     328                 :            :          */
     329                 :            :         rcu_qs();
     330                 :            :         if (rdp->exp_deferred_qs)
     331                 :            :                 rcu_report_exp_rdp(rdp);
     332                 :            :         trace_rcu_utilization(TPS("End context switch"));
     333                 :            : }
     334                 :            : EXPORT_SYMBOL_GPL(rcu_note_context_switch);
     335                 :            : 
     336                 :            : /*
     337                 :            :  * Check for preempted RCU readers blocking the current grace period
     338                 :            :  * for the specified rcu_node structure.  If the caller needs a reliable
     339                 :            :  * answer, it must hold the rcu_node's ->lock.
     340                 :            :  */
     341                 :            : static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
     342                 :            : {
     343                 :            :         return READ_ONCE(rnp->gp_tasks) != NULL;
     344                 :            : }
     345                 :            : 
     346                 :            : /* Bias and limit values for ->rcu_read_lock_nesting. */
     347                 :            : #define RCU_NEST_BIAS INT_MAX
     348                 :            : #define RCU_NEST_NMAX (-INT_MAX / 2)
     349                 :            : #define RCU_NEST_PMAX (INT_MAX / 2)
     350                 :            : 
     351                 :            : static void rcu_preempt_read_enter(void)
     352                 :            : {
     353                 :            :         current->rcu_read_lock_nesting++;
     354                 :            : }
     355                 :            : 
     356                 :            : static void rcu_preempt_read_exit(void)
     357                 :            : {
     358                 :            :         current->rcu_read_lock_nesting--;
     359                 :            : }
     360                 :            : 
     361                 :            : static void rcu_preempt_depth_set(int val)
     362                 :            : {
     363                 :            :         current->rcu_read_lock_nesting = val;
     364                 :            : }
     365                 :            : 
     366                 :            : /*
     367                 :            :  * Preemptible RCU implementation for rcu_read_lock().
     368                 :            :  * Just increment ->rcu_read_lock_nesting, shared state will be updated
     369                 :            :  * if we block.
     370                 :            :  */
     371                 :            : void __rcu_read_lock(void)
     372                 :            : {
     373                 :            :         rcu_preempt_read_enter();
     374                 :            :         if (IS_ENABLED(CONFIG_PROVE_LOCKING))
     375                 :            :                 WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
     376                 :            :         barrier();  /* critical section after entry code. */
     377                 :            : }
     378                 :            : EXPORT_SYMBOL_GPL(__rcu_read_lock);
     379                 :            : 
     380                 :            : /*
     381                 :            :  * Preemptible RCU implementation for rcu_read_unlock().
     382                 :            :  * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
     383                 :            :  * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
     384                 :            :  * invoke rcu_read_unlock_special() to clean up after a context switch
     385                 :            :  * in an RCU read-side critical section and other special cases.
     386                 :            :  */
     387                 :            : void __rcu_read_unlock(void)
     388                 :            : {
     389                 :            :         struct task_struct *t = current;
     390                 :            : 
     391                 :            :         if (rcu_preempt_depth() != 1) {
     392                 :            :                 rcu_preempt_read_exit();
     393                 :            :         } else {
     394                 :            :                 barrier();  /* critical section before exit code. */
     395                 :            :                 rcu_preempt_depth_set(-RCU_NEST_BIAS);
     396                 :            :                 barrier();  /* assign before ->rcu_read_unlock_special load */
     397                 :            :                 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
     398                 :            :                         rcu_read_unlock_special(t);
     399                 :            :                 barrier();  /* ->rcu_read_unlock_special load before assign */
     400                 :            :                 rcu_preempt_depth_set(0);
     401                 :            :         }
     402                 :            :         if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
     403                 :            :                 int rrln = rcu_preempt_depth();
     404                 :            : 
     405                 :            :                 WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX);
     406                 :            :         }
     407                 :            : }
     408                 :            : EXPORT_SYMBOL_GPL(__rcu_read_unlock);
     409                 :            : 
     410                 :            : /*
     411                 :            :  * Advance a ->blkd_tasks-list pointer to the next entry, instead
     412                 :            :  * returning NULL if at the end of the list.
     413                 :            :  */
     414                 :            : static struct list_head *rcu_next_node_entry(struct task_struct *t,
     415                 :            :                                              struct rcu_node *rnp)
     416                 :            : {
     417                 :            :         struct list_head *np;
     418                 :            : 
     419                 :            :         np = t->rcu_node_entry.next;
     420                 :            :         if (np == &rnp->blkd_tasks)
     421                 :            :                 np = NULL;
     422                 :            :         return np;
     423                 :            : }
     424                 :            : 
     425                 :            : /*
     426                 :            :  * Return true if the specified rcu_node structure has tasks that were
     427                 :            :  * preempted within an RCU read-side critical section.
     428                 :            :  */
     429                 :            : static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
     430                 :            : {
     431                 :            :         return !list_empty(&rnp->blkd_tasks);
     432                 :            : }
     433                 :            : 
     434                 :            : /*
     435                 :            :  * Report deferred quiescent states.  The deferral time can
     436                 :            :  * be quite short, for example, in the case of the call from
     437                 :            :  * rcu_read_unlock_special().
     438                 :            :  */
     439                 :            : static void
     440                 :            : rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
     441                 :            : {
     442                 :            :         bool empty_exp;
     443                 :            :         bool empty_norm;
     444                 :            :         bool empty_exp_now;
     445                 :            :         struct list_head *np;
     446                 :            :         bool drop_boost_mutex = false;
     447                 :            :         struct rcu_data *rdp;
     448                 :            :         struct rcu_node *rnp;
     449                 :            :         union rcu_special special;
     450                 :            : 
     451                 :            :         /*
     452                 :            :          * If RCU core is waiting for this CPU to exit its critical section,
     453                 :            :          * report the fact that it has exited.  Because irqs are disabled,
     454                 :            :          * t->rcu_read_unlock_special cannot change.
     455                 :            :          */
     456                 :            :         special = t->rcu_read_unlock_special;
     457                 :            :         rdp = this_cpu_ptr(&rcu_data);
     458                 :            :         if (!special.s && !rdp->exp_deferred_qs) {
     459                 :            :                 local_irq_restore(flags);
     460                 :            :                 return;
     461                 :            :         }
     462                 :            :         t->rcu_read_unlock_special.s = 0;
     463                 :            :         if (special.b.need_qs)
     464                 :            :                 rcu_qs();
     465                 :            : 
     466                 :            :         /*
     467                 :            :          * Respond to a request by an expedited grace period for a
     468                 :            :          * quiescent state from this CPU.  Note that requests from
     469                 :            :          * tasks are handled when removing the task from the
     470                 :            :          * blocked-tasks list below.
     471                 :            :          */
     472                 :            :         if (rdp->exp_deferred_qs)
     473                 :            :                 rcu_report_exp_rdp(rdp);
     474                 :            : 
     475                 :            :         /* Clean up if blocked during RCU read-side critical section. */
     476                 :            :         if (special.b.blocked) {
     477                 :            : 
     478                 :            :                 /*
     479                 :            :                  * Remove this task from the list it blocked on.  The task
     480                 :            :                  * now remains queued on the rcu_node corresponding to the
     481                 :            :                  * CPU it first blocked on, so there is no longer any need
     482                 :            :                  * to loop.  Retain a WARN_ON_ONCE() out of sheer paranoia.
     483                 :            :                  */
     484                 :            :                 rnp = t->rcu_blocked_node;
     485                 :            :                 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
     486                 :            :                 WARN_ON_ONCE(rnp != t->rcu_blocked_node);
     487                 :            :                 WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
     488                 :            :                 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
     489                 :            :                 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
     490                 :            :                              (!empty_norm || rnp->qsmask));
     491                 :            :                 empty_exp = sync_rcu_exp_done(rnp);
     492                 :            :                 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
     493                 :            :                 np = rcu_next_node_entry(t, rnp);
     494                 :            :                 list_del_init(&t->rcu_node_entry);
     495                 :            :                 t->rcu_blocked_node = NULL;
     496                 :            :                 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
     497                 :            :                                                 rnp->gp_seq, t->pid);
     498                 :            :                 if (&t->rcu_node_entry == rnp->gp_tasks)
     499                 :            :                         WRITE_ONCE(rnp->gp_tasks, np);
     500                 :            :                 if (&t->rcu_node_entry == rnp->exp_tasks)
     501                 :            :                         rnp->exp_tasks = np;
     502                 :            :                 if (IS_ENABLED(CONFIG_RCU_BOOST)) {
     503                 :            :                         /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
     504                 :            :                         drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
     505                 :            :                         if (&t->rcu_node_entry == rnp->boost_tasks)
     506                 :            :                                 rnp->boost_tasks = np;
     507                 :            :                 }
     508                 :            : 
     509                 :            :                 /*
     510                 :            :                  * If this was the last task on the current list, and if
     511                 :            :                  * we aren't waiting on any CPUs, report the quiescent state.
     512                 :            :                  * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
     513                 :            :                  * so we must take a snapshot of the expedited state.
     514                 :            :                  */
     515                 :            :                 empty_exp_now = sync_rcu_exp_done(rnp);
     516                 :            :                 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
     517                 :            :                         trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
     518                 :            :                                                          rnp->gp_seq,
     519                 :            :                                                          0, rnp->qsmask,
     520                 :            :                                                          rnp->level,
     521                 :            :                                                          rnp->grplo,
     522                 :            :                                                          rnp->grphi,
     523                 :            :                                                          !!rnp->gp_tasks);
     524                 :            :                         rcu_report_unblock_qs_rnp(rnp, flags);
     525                 :            :                 } else {
     526                 :            :                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
     527                 :            :                 }
     528                 :            : 
     529                 :            :                 /* Unboost if we were boosted. */
     530                 :            :                 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
     531                 :            :                         rt_mutex_futex_unlock(&rnp->boost_mtx);
     532                 :            : 
     533                 :            :                 /*
     534                 :            :                  * If this was the last task on the expedited lists,
     535                 :            :                  * then we need to report up the rcu_node hierarchy.
     536                 :            :                  */
     537                 :            :                 if (!empty_exp && empty_exp_now)
     538                 :            :                         rcu_report_exp_rnp(rnp, true);
     539                 :            :         } else {
     540                 :            :                 local_irq_restore(flags);
     541                 :            :         }
     542                 :            : }
     543                 :            : 
     544                 :            : /*
     545                 :            :  * Is a deferred quiescent-state pending, and are we also not in
     546                 :            :  * an RCU read-side critical section?  It is the caller's responsibility
     547                 :            :  * to ensure it is otherwise safe to report any deferred quiescent
     548                 :            :  * states.  The reason for this is that it is safe to report a
     549                 :            :  * quiescent state during context switch even though preemption
     550                 :            :  * is disabled.  This function cannot be expected to understand these
     551                 :            :  * nuances, so the caller must handle them.
     552                 :            :  */
     553                 :            : static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
     554                 :            : {
     555                 :            :         return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
     556                 :            :                 READ_ONCE(t->rcu_read_unlock_special.s)) &&
     557                 :            :                rcu_preempt_depth() <= 0;
     558                 :            : }
     559                 :            : 
     560                 :            : /*
     561                 :            :  * Report a deferred quiescent state if needed and safe to do so.
     562                 :            :  * As with rcu_preempt_need_deferred_qs(), "safe" involves only
     563                 :            :  * not being in an RCU read-side critical section.  The caller must
     564                 :            :  * evaluate safety in terms of interrupt, softirq, and preemption
     565                 :            :  * disabling.
     566                 :            :  */
     567                 :            : static void rcu_preempt_deferred_qs(struct task_struct *t)
     568                 :            : {
     569                 :            :         unsigned long flags;
     570                 :            :         bool couldrecurse = rcu_preempt_depth() >= 0;
     571                 :            : 
     572                 :            :         if (!rcu_preempt_need_deferred_qs(t))
     573                 :            :                 return;
     574                 :            :         if (couldrecurse)
     575                 :            :                 rcu_preempt_depth_set(rcu_preempt_depth() - RCU_NEST_BIAS);
     576                 :            :         local_irq_save(flags);
     577                 :            :         rcu_preempt_deferred_qs_irqrestore(t, flags);
     578                 :            :         if (couldrecurse)
     579                 :            :                 rcu_preempt_depth_set(rcu_preempt_depth() + RCU_NEST_BIAS);
     580                 :            : }
     581                 :            : 
     582                 :            : /*
     583                 :            :  * Minimal handler to give the scheduler a chance to re-evaluate.
     584                 :            :  */
     585                 :            : static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
     586                 :            : {
     587                 :            :         struct rcu_data *rdp;
     588                 :            : 
     589                 :            :         rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
     590                 :            :         rdp->defer_qs_iw_pending = false;
     591                 :            : }
     592                 :            : 
     593                 :            : /*
     594                 :            :  * Handle special cases during rcu_read_unlock(), such as needing to
     595                 :            :  * notify RCU core processing or task having blocked during the RCU
     596                 :            :  * read-side critical section.
     597                 :            :  */
     598                 :            : static void rcu_read_unlock_special(struct task_struct *t)
     599                 :            : {
     600                 :            :         unsigned long flags;
     601                 :            :         bool preempt_bh_were_disabled =
     602                 :            :                         !!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
     603                 :            :         bool irqs_were_disabled;
     604                 :            : 
     605                 :            :         /* NMI handlers cannot block and cannot safely manipulate state. */
     606                 :            :         if (in_nmi())
     607                 :            :                 return;
     608                 :            : 
     609                 :            :         local_irq_save(flags);
     610                 :            :         irqs_were_disabled = irqs_disabled_flags(flags);
     611                 :            :         if (preempt_bh_were_disabled || irqs_were_disabled) {
     612                 :            :                 bool exp;
     613                 :            :                 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
     614                 :            :                 struct rcu_node *rnp = rdp->mynode;
     615                 :            : 
     616                 :            :                 exp = (t->rcu_blocked_node && t->rcu_blocked_node->exp_tasks) ||
     617                 :            :                       (rdp->grpmask & READ_ONCE(rnp->expmask)) ||
     618                 :            :                       tick_nohz_full_cpu(rdp->cpu);
     619                 :            :                 // Need to defer quiescent state until everything is enabled.
     620                 :            :                 if (irqs_were_disabled && use_softirq &&
     621                 :            :                     (in_interrupt() ||
     622                 :            :                      (exp && !t->rcu_read_unlock_special.b.deferred_qs))) {
     623                 :            :                         // Using softirq, safe to awaken, and we get
     624                 :            :                         // no help from enabling irqs, unlike bh/preempt.
     625                 :            :                         raise_softirq_irqoff(RCU_SOFTIRQ);
     626                 :            :                 } else {
     627                 :            :                         // Enabling BH or preempt does reschedule, so...
     628                 :            :                         // Also if no expediting or NO_HZ_FULL, slow is OK.
     629                 :            :                         set_tsk_need_resched(current);
     630                 :            :                         set_preempt_need_resched();
     631                 :            :                         if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
     632                 :            :                             !rdp->defer_qs_iw_pending && exp) {
     633                 :            :                                 // Get scheduler to re-evaluate and call hooks.
     634                 :            :                                 // If !IRQ_WORK, FQS scan will eventually IPI.
     635                 :            :                                 init_irq_work(&rdp->defer_qs_iw,
     636                 :            :                                               rcu_preempt_deferred_qs_handler);
     637                 :            :                                 rdp->defer_qs_iw_pending = true;
     638                 :            :                                 irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
     639                 :            :                         }
     640                 :            :                 }
     641                 :            :                 t->rcu_read_unlock_special.b.deferred_qs = true;
     642                 :            :                 local_irq_restore(flags);
     643                 :            :                 return;
     644                 :            :         }
     645                 :            :         rcu_preempt_deferred_qs_irqrestore(t, flags);
     646                 :            : }
     647                 :            : 
     648                 :            : /*
     649                 :            :  * Check that the list of blocked tasks for the newly completed grace
     650                 :            :  * period is in fact empty.  It is a serious bug to complete a grace
     651                 :            :  * period that still has RCU readers blocked!  This function must be
     652                 :            :  * invoked -before- updating this rnp's ->gp_seq.
     653                 :            :  *
     654                 :            :  * Also, if there are blocked tasks on the list, they automatically
     655                 :            :  * block the newly created grace period, so set up ->gp_tasks accordingly.
     656                 :            :  */
     657                 :            : static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
     658                 :            : {
     659                 :            :         struct task_struct *t;
     660                 :            : 
     661                 :            :         RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
     662                 :            :         raw_lockdep_assert_held_rcu_node(rnp);
     663                 :            :         if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
     664                 :            :                 dump_blkd_tasks(rnp, 10);
     665                 :            :         if (rcu_preempt_has_tasks(rnp) &&
     666                 :            :             (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
     667                 :            :                 WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next);
     668                 :            :                 t = container_of(rnp->gp_tasks, struct task_struct,
     669                 :            :                                  rcu_node_entry);
     670                 :            :                 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
     671                 :            :                                                 rnp->gp_seq, t->pid);
     672                 :            :         }
     673                 :            :         WARN_ON_ONCE(rnp->qsmask);
     674                 :            : }
     675                 :            : 
     676                 :            : /*
     677                 :            :  * Check for a quiescent state from the current CPU, including voluntary
     678                 :            :  * context switches for Tasks RCU.  When a task blocks, the task is
     679                 :            :  * recorded in the corresponding CPU's rcu_node structure, which is checked
     680                 :            :  * elsewhere, hence this function need only check for quiescent states
     681                 :            :  * related to the current CPU, not to those related to tasks.
     682                 :            :  */
     683                 :            : static void rcu_flavor_sched_clock_irq(int user)
     684                 :            : {
     685                 :            :         struct task_struct *t = current;
     686                 :            : 
     687                 :            :         if (user || rcu_is_cpu_rrupt_from_idle()) {
     688                 :            :                 rcu_note_voluntary_context_switch(current);
     689                 :            :         }
     690                 :            :         if (rcu_preempt_depth() > 0 ||
     691                 :            :             (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
     692                 :            :                 /* No QS, force context switch if deferred. */
     693                 :            :                 if (rcu_preempt_need_deferred_qs(t)) {
     694                 :            :                         set_tsk_need_resched(t);
     695                 :            :                         set_preempt_need_resched();
     696                 :            :                 }
     697                 :            :         } else if (rcu_preempt_need_deferred_qs(t)) {
     698                 :            :                 rcu_preempt_deferred_qs(t); /* Report deferred QS. */
     699                 :            :                 return;
     700                 :            :         } else if (!rcu_preempt_depth()) {
     701                 :            :                 rcu_qs(); /* Report immediate QS. */
     702                 :            :                 return;
     703                 :            :         }
     704                 :            : 
     705                 :            :         /* If GP is oldish, ask for help from rcu_read_unlock_special(). */
     706                 :            :         if (rcu_preempt_depth() > 0 &&
     707                 :            :             __this_cpu_read(rcu_data.core_needs_qs) &&
     708                 :            :             __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
     709                 :            :             !t->rcu_read_unlock_special.b.need_qs &&
     710                 :            :             time_after(jiffies, rcu_state.gp_start + HZ))
     711                 :            :                 t->rcu_read_unlock_special.b.need_qs = true;
     712                 :            : }
     713                 :            : 
     714                 :            : /*
     715                 :            :  * Check for a task exiting while in a preemptible-RCU read-side
     716                 :            :  * critical section, clean up if so.  No need to issue warnings, as
     717                 :            :  * debug_check_no_locks_held() already does this if lockdep is enabled.
     718                 :            :  * Besides, if this function does anything other than just immediately
     719                 :            :  * return, there was a bug of some sort.  Spewing warnings from this
     720                 :            :  * function is like as not to simply obscure important prior warnings.
     721                 :            :  */
     722                 :            : void exit_rcu(void)
     723                 :            : {
     724                 :            :         struct task_struct *t = current;
     725                 :            : 
     726                 :            :         if (unlikely(!list_empty(&current->rcu_node_entry))) {
     727                 :            :                 rcu_preempt_depth_set(1);
     728                 :            :                 barrier();
     729                 :            :                 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
     730                 :            :         } else if (unlikely(rcu_preempt_depth())) {
     731                 :            :                 rcu_preempt_depth_set(1);
     732                 :            :         } else {
     733                 :            :                 return;
     734                 :            :         }
     735                 :            :         __rcu_read_unlock();
     736                 :            :         rcu_preempt_deferred_qs(current);
     737                 :            : }
     738                 :            : 
     739                 :            : /*
     740                 :            :  * Dump the blocked-tasks state, but limit the list dump to the
     741                 :            :  * specified number of elements.
     742                 :            :  */
     743                 :            : static void
     744                 :            : dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
     745                 :            : {
     746                 :            :         int cpu;
     747                 :            :         int i;
     748                 :            :         struct list_head *lhp;
     749                 :            :         bool onl;
     750                 :            :         struct rcu_data *rdp;
     751                 :            :         struct rcu_node *rnp1;
     752                 :            : 
     753                 :            :         raw_lockdep_assert_held_rcu_node(rnp);
     754                 :            :         pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
     755                 :            :                 __func__, rnp->grplo, rnp->grphi, rnp->level,
     756                 :            :                 (long)rnp->gp_seq, (long)rnp->completedqs);
     757                 :            :         for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
     758                 :            :                 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
     759                 :            :                         __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
     760                 :            :         pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
     761                 :            :                 __func__, READ_ONCE(rnp->gp_tasks), rnp->boost_tasks,
     762                 :            :                 rnp->exp_tasks);
     763                 :            :         pr_info("%s: ->blkd_tasks", __func__);
     764                 :            :         i = 0;
     765                 :            :         list_for_each(lhp, &rnp->blkd_tasks) {
     766                 :            :                 pr_cont(" %p", lhp);
     767                 :            :                 if (++i >= ncheck)
     768                 :            :                         break;
     769                 :            :         }
     770                 :            :         pr_cont("\n");
     771                 :            :         for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
     772                 :            :                 rdp = per_cpu_ptr(&rcu_data, cpu);
     773                 :            :                 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
     774                 :            :                 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
     775                 :            :                         cpu, ".o"[onl],
     776                 :            :                         (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
     777                 :            :                         (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
     778                 :            :         }
     779                 :            : }
     780                 :            : 
     781                 :            : #else /* #ifdef CONFIG_PREEMPT_RCU */
     782                 :            : 
     783                 :            : /*
     784                 :            :  * Tell them what RCU they are running.
     785                 :            :  */
     786                 :         30 : static void __init rcu_bootup_announce(void)
     787                 :            : {
     788                 :         30 :         pr_info("Hierarchical RCU implementation.\n");
     789                 :         30 :         rcu_bootup_announce_oddness();
     790                 :         30 : }
     791                 :            : 
     792                 :            : /*
     793                 :            :  * Note a quiescent state for PREEMPTION=n.  Because we do not need to know
     794                 :            :  * how many quiescent states passed, just if there was at least one since
     795                 :            :  * the start of the grace period, this just sets a flag.  The caller must
     796                 :            :  * have disabled preemption.
     797                 :            :  */
     798                 :     519847 : static void rcu_qs(void)
     799                 :            : {
     800                 :     519847 :         RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!");
     801         [ +  + ]:     519847 :         if (!__this_cpu_read(rcu_data.cpu_no_qs.s))
     802                 :            :                 return;
     803                 :      67577 :         trace_rcu_grace_period(TPS("rcu_sched"),
     804                 :      67577 :                                __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs"));
     805         [ -  + ]:      67577 :         __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
     806         [ -  + ]:      67577 :         if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
     807                 :            :                 return;
     808                 :          0 :         __this_cpu_write(rcu_data.cpu_no_qs.b.exp, false);
     809                 :          0 :         rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
     810                 :            : }
     811                 :            : 
     812                 :            : /*
     813                 :            :  * Register an urgently needed quiescent state.  If there is an
     814                 :            :  * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
     815                 :            :  * dyntick-idle quiescent state visible to other CPUs, which will in
     816                 :            :  * some cases serve for expedited as well as normal grace periods.
     817                 :            :  * Either way, register a lightweight quiescent state.
     818                 :            :  */
     819                 :   68388520 : void rcu_all_qs(void)
     820                 :            : {
     821                 :   68388520 :         unsigned long flags;
     822                 :            : 
     823         [ -  + ]:   68388520 :         if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
     824                 :            :                 return;
     825                 :          0 :         preempt_disable();
     826                 :            :         /* Load rcu_urgent_qs before other flags. */
     827         [ #  # ]:          0 :         if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
     828                 :          0 :                 preempt_enable();
     829                 :          0 :                 return;
     830                 :            :         }
     831                 :          0 :         this_cpu_write(rcu_data.rcu_urgent_qs, false);
     832         [ #  # ]:          0 :         if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
     833                 :          0 :                 local_irq_save(flags);
     834                 :          0 :                 rcu_momentary_dyntick_idle();
     835                 :          0 :                 local_irq_restore(flags);
     836                 :            :         }
     837                 :          0 :         rcu_qs();
     838                 :          0 :         preempt_enable();
     839                 :            : }
     840                 :            : EXPORT_SYMBOL_GPL(rcu_all_qs);
     841                 :            : 
     842                 :            : /*
     843                 :            :  * Note a PREEMPTION=n context switch. The caller must have disabled interrupts.
     844                 :            :  */
     845                 :     315502 : void rcu_note_context_switch(bool preempt)
     846                 :            : {
     847                 :     315502 :         trace_rcu_utilization(TPS("Start context switch"));
     848                 :     315502 :         rcu_qs();
     849                 :            :         /* Load rcu_urgent_qs before other flags. */
     850         [ +  - ]:     315502 :         if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
     851                 :     315502 :                 goto out;
     852                 :          0 :         this_cpu_write(rcu_data.rcu_urgent_qs, false);
     853         [ #  # ]:          0 :         if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
     854                 :          0 :                 rcu_momentary_dyntick_idle();
     855                 :          0 :         if (!preempt)
     856                 :     315502 :                 rcu_tasks_qs(current);
     857                 :          0 : out:
     858                 :     315502 :         trace_rcu_utilization(TPS("End context switch"));
     859                 :     315502 : }
     860                 :            : EXPORT_SYMBOL_GPL(rcu_note_context_switch);
     861                 :            : 
     862                 :            : /*
     863                 :            :  * Because preemptible RCU does not exist, there are never any preempted
     864                 :            :  * RCU readers.
     865                 :            :  */
     866                 :     202563 : static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
     867                 :            : {
     868   [ -  +  -  + ]:     135042 :         return 0;
     869                 :            : }
     870                 :            : 
     871                 :            : /*
     872                 :            :  * Because there is no preemptible RCU, there can be no readers blocked.
     873                 :            :  */
     874                 :          0 : static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
     875                 :            : {
     876         [ #  # ]:          0 :         return false;
     877                 :            : }
     878                 :            : 
     879                 :            : /*
     880                 :            :  * Because there is no preemptible RCU, there can be no deferred quiescent
     881                 :            :  * states.
     882                 :            :  */
     883                 :         52 : static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
     884                 :            : {
     885                 :         52 :         return false;
     886                 :            : }
     887                 :     258992 : static void rcu_preempt_deferred_qs(struct task_struct *t) { }
     888                 :            : 
     889                 :            : /*
     890                 :            :  * Because there is no preemptible RCU, there can be no readers blocked,
     891                 :            :  * so there is no need to check for blocked tasks.  So check only for
     892                 :            :  * bogus qsmask values.
     893                 :            :  */
     894                 :      67551 : static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
     895                 :            : {
     896   [ -  +  +  - ]:      67551 :         WARN_ON_ONCE(rnp->qsmask);
     897                 :            : }
     898                 :            : 
     899                 :            : /*
     900                 :            :  * Check to see if this CPU is in a non-context-switch quiescent state,
     901                 :            :  * namely user mode and idle loop.
     902                 :            :  */
     903                 :      97198 : static void rcu_flavor_sched_clock_irq(int user)
     904                 :            : {
     905   [ +  +  +  - ]:      99291 :         if (user || rcu_is_cpu_rrupt_from_idle()) {
     906                 :            : 
     907                 :            :                 /*
     908                 :            :                  * Get here if this CPU took its interrupt from user
     909                 :            :                  * mode or from the idle loop, and if this is not a
     910                 :            :                  * nested interrupt.  In this case, the CPU is in
     911                 :            :                  * a quiescent state, so note it.
     912                 :            :                  *
     913                 :            :                  * No memory barrier is required here because rcu_qs()
     914                 :            :                  * references only CPU-local variables that other CPUs
     915                 :            :                  * neither access nor modify, at least not while the
     916                 :            :                  * corresponding CPU is online.
     917                 :            :                  */
     918                 :            : 
     919                 :      10305 :                 rcu_qs();
     920                 :            :         }
     921                 :      97198 : }
     922                 :            : 
     923                 :            : /*
     924                 :            :  * Because preemptible RCU does not exist, tasks cannot possibly exit
     925                 :            :  * while in preemptible RCU read-side critical sections.
     926                 :            :  */
     927                 :      17730 : void exit_rcu(void)
     928                 :            : {
     929                 :      17730 : }
     930                 :            : 
     931                 :            : /*
     932                 :            :  * Dump the guaranteed-empty blocked-tasks state.  Trust but verify.
     933                 :            :  */
     934                 :            : static void
     935                 :            : dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
     936                 :            : {
     937                 :            :         WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
     938                 :            : }
     939                 :            : 
     940                 :            : #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
     941                 :            : 
     942                 :            : /*
     943                 :            :  * If boosting, set rcuc kthreads to realtime priority.
     944                 :            :  */
     945                 :          0 : static void rcu_cpu_kthread_setup(unsigned int cpu)
     946                 :            : {
     947                 :            : #ifdef CONFIG_RCU_BOOST
     948                 :            :         struct sched_param sp;
     949                 :            : 
     950                 :            :         sp.sched_priority = kthread_prio;
     951                 :            :         sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
     952                 :            : #endif /* #ifdef CONFIG_RCU_BOOST */
     953                 :          0 : }
     954                 :            : 
     955                 :            : #ifdef CONFIG_RCU_BOOST
     956                 :            : 
     957                 :            : /*
     958                 :            :  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
     959                 :            :  * or ->boost_tasks, advancing the pointer to the next task in the
     960                 :            :  * ->blkd_tasks list.
     961                 :            :  *
     962                 :            :  * Note that irqs must be enabled: boosting the task can block.
     963                 :            :  * Returns 1 if there are more tasks needing to be boosted.
     964                 :            :  */
     965                 :            : static int rcu_boost(struct rcu_node *rnp)
     966                 :            : {
     967                 :            :         unsigned long flags;
     968                 :            :         struct task_struct *t;
     969                 :            :         struct list_head *tb;
     970                 :            : 
     971                 :            :         if (READ_ONCE(rnp->exp_tasks) == NULL &&
     972                 :            :             READ_ONCE(rnp->boost_tasks) == NULL)
     973                 :            :                 return 0;  /* Nothing left to boost. */
     974                 :            : 
     975                 :            :         raw_spin_lock_irqsave_rcu_node(rnp, flags);
     976                 :            : 
     977                 :            :         /*
     978                 :            :          * Recheck under the lock: all tasks in need of boosting
     979                 :            :          * might exit their RCU read-side critical sections on their own.
     980                 :            :          */
     981                 :            :         if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
     982                 :            :                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
     983                 :            :                 return 0;
     984                 :            :         }
     985                 :            : 
     986                 :            :         /*
     987                 :            :          * Preferentially boost tasks blocking expedited grace periods.
     988                 :            :          * This cannot starve the normal grace periods because a second
     989                 :            :          * expedited grace period must boost all blocked tasks, including
     990                 :            :          * those blocking the pre-existing normal grace period.
     991                 :            :          */
     992                 :            :         if (rnp->exp_tasks != NULL)
     993                 :            :                 tb = rnp->exp_tasks;
     994                 :            :         else
     995                 :            :                 tb = rnp->boost_tasks;
     996                 :            : 
     997                 :            :         /*
     998                 :            :          * We boost task t by manufacturing an rt_mutex that appears to
     999                 :            :          * be held by task t.  We leave a pointer to that rt_mutex where
    1000                 :            :          * task t can find it, and task t will release the mutex when it
    1001                 :            :          * exits its outermost RCU read-side critical section.  Then
    1002                 :            :          * simply acquiring this artificial rt_mutex will boost task
    1003                 :            :          * t's priority.  (Thanks to tglx for suggesting this approach!)
    1004                 :            :          *
    1005                 :            :          * Note that task t must acquire rnp->lock to remove itself from
    1006                 :            :          * the ->blkd_tasks list, which it will do from exit() if from
    1007                 :            :          * nowhere else.  We therefore are guaranteed that task t will
    1008                 :            :          * stay around at least until we drop rnp->lock.  Note that
    1009                 :            :          * rnp->lock also resolves races between our priority boosting
    1010                 :            :          * and task t's exiting its outermost RCU read-side critical
    1011                 :            :          * section.
    1012                 :            :          */
    1013                 :            :         t = container_of(tb, struct task_struct, rcu_node_entry);
    1014                 :            :         rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
    1015                 :            :         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    1016                 :            :         /* Lock only for side effect: boosts task t's priority. */
    1017                 :            :         rt_mutex_lock(&rnp->boost_mtx);
    1018                 :            :         rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
    1019                 :            : 
    1020                 :            :         return READ_ONCE(rnp->exp_tasks) != NULL ||
    1021                 :            :                READ_ONCE(rnp->boost_tasks) != NULL;
    1022                 :            : }
    1023                 :            : 
    1024                 :            : /*
    1025                 :            :  * Priority-boosting kthread, one per leaf rcu_node.
    1026                 :            :  */
    1027                 :            : static int rcu_boost_kthread(void *arg)
    1028                 :            : {
    1029                 :            :         struct rcu_node *rnp = (struct rcu_node *)arg;
    1030                 :            :         int spincnt = 0;
    1031                 :            :         int more2boost;
    1032                 :            : 
    1033                 :            :         trace_rcu_utilization(TPS("Start boost kthread@init"));
    1034                 :            :         for (;;) {
    1035                 :            :                 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
    1036                 :            :                 trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
    1037                 :            :                 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
    1038                 :            :                 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
    1039                 :            :                 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
    1040                 :            :                 more2boost = rcu_boost(rnp);
    1041                 :            :                 if (more2boost)
    1042                 :            :                         spincnt++;
    1043                 :            :                 else
    1044                 :            :                         spincnt = 0;
    1045                 :            :                 if (spincnt > 10) {
    1046                 :            :                         rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
    1047                 :            :                         trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
    1048                 :            :                         schedule_timeout_interruptible(2);
    1049                 :            :                         trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
    1050                 :            :                         spincnt = 0;
    1051                 :            :                 }
    1052                 :            :         }
    1053                 :            :         /* NOTREACHED */
    1054                 :            :         trace_rcu_utilization(TPS("End boost kthread@notreached"));
    1055                 :            :         return 0;
    1056                 :            : }
    1057                 :            : 
    1058                 :            : /*
    1059                 :            :  * Check to see if it is time to start boosting RCU readers that are
    1060                 :            :  * blocking the current grace period, and, if so, tell the per-rcu_node
    1061                 :            :  * kthread to start boosting them.  If there is an expedited grace
    1062                 :            :  * period in progress, it is always time to boost.
    1063                 :            :  *
    1064                 :            :  * The caller must hold rnp->lock, which this function releases.
    1065                 :            :  * The ->boost_kthread_task is immortal, so we don't need to worry
    1066                 :            :  * about it going away.
    1067                 :            :  */
    1068                 :            : static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
    1069                 :            :         __releases(rnp->lock)
    1070                 :            : {
    1071                 :            :         raw_lockdep_assert_held_rcu_node(rnp);
    1072                 :            :         if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
    1073                 :            :                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    1074                 :            :                 return;
    1075                 :            :         }
    1076                 :            :         if (rnp->exp_tasks != NULL ||
    1077                 :            :             (rnp->gp_tasks != NULL &&
    1078                 :            :              rnp->boost_tasks == NULL &&
    1079                 :            :              rnp->qsmask == 0 &&
    1080                 :            :              ULONG_CMP_GE(jiffies, rnp->boost_time))) {
    1081                 :            :                 if (rnp->exp_tasks == NULL)
    1082                 :            :                         rnp->boost_tasks = rnp->gp_tasks;
    1083                 :            :                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    1084                 :            :                 rcu_wake_cond(rnp->boost_kthread_task,
    1085                 :            :                               rnp->boost_kthread_status);
    1086                 :            :         } else {
    1087                 :            :                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    1088                 :            :         }
    1089                 :            : }
    1090                 :            : 
    1091                 :            : /*
    1092                 :            :  * Is the current CPU running the RCU-callbacks kthread?
    1093                 :            :  * Caller must have preemption disabled.
    1094                 :            :  */
    1095                 :            : static bool rcu_is_callbacks_kthread(void)
    1096                 :            : {
    1097                 :            :         return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current;
    1098                 :            : }
    1099                 :            : 
    1100                 :            : #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
    1101                 :            : 
    1102                 :            : /*
    1103                 :            :  * Do priority-boost accounting for the start of a new grace period.
    1104                 :            :  */
    1105                 :            : static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
    1106                 :            : {
    1107                 :            :         rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
    1108                 :            : }
    1109                 :            : 
    1110                 :            : /*
    1111                 :            :  * Create an RCU-boost kthread for the specified node if one does not
    1112                 :            :  * already exist.  We only create this kthread for preemptible RCU.
    1113                 :            :  * Returns zero if all is well, a negated errno otherwise.
    1114                 :            :  */
    1115                 :            : static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
    1116                 :            : {
    1117                 :            :         int rnp_index = rnp - rcu_get_root();
    1118                 :            :         unsigned long flags;
    1119                 :            :         struct sched_param sp;
    1120                 :            :         struct task_struct *t;
    1121                 :            : 
    1122                 :            :         if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
    1123                 :            :                 return;
    1124                 :            : 
    1125                 :            :         if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
    1126                 :            :                 return;
    1127                 :            : 
    1128                 :            :         rcu_state.boost = 1;
    1129                 :            : 
    1130                 :            :         if (rnp->boost_kthread_task != NULL)
    1131                 :            :                 return;
    1132                 :            : 
    1133                 :            :         t = kthread_create(rcu_boost_kthread, (void *)rnp,
    1134                 :            :                            "rcub/%d", rnp_index);
    1135                 :            :         if (WARN_ON_ONCE(IS_ERR(t)))
    1136                 :            :                 return;
    1137                 :            : 
    1138                 :            :         raw_spin_lock_irqsave_rcu_node(rnp, flags);
    1139                 :            :         rnp->boost_kthread_task = t;
    1140                 :            :         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    1141                 :            :         sp.sched_priority = kthread_prio;
    1142                 :            :         sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
    1143                 :            :         wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
    1144                 :            : }
    1145                 :            : 
    1146                 :            : /*
    1147                 :            :  * Set the per-rcu_node kthread's affinity to cover all CPUs that are
    1148                 :            :  * served by the rcu_node in question.  The CPU hotplug lock is still
    1149                 :            :  * held, so the value of rnp->qsmaskinit will be stable.
    1150                 :            :  *
    1151                 :            :  * We don't include outgoingcpu in the affinity set, use -1 if there is
    1152                 :            :  * no outgoing CPU.  If there are no CPUs left in the affinity set,
    1153                 :            :  * this function allows the kthread to execute on any CPU.
    1154                 :            :  */
    1155                 :            : static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
    1156                 :            : {
    1157                 :            :         struct task_struct *t = rnp->boost_kthread_task;
    1158                 :            :         unsigned long mask = rcu_rnp_online_cpus(rnp);
    1159                 :            :         cpumask_var_t cm;
    1160                 :            :         int cpu;
    1161                 :            : 
    1162                 :            :         if (!t)
    1163                 :            :                 return;
    1164                 :            :         if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
    1165                 :            :                 return;
    1166                 :            :         for_each_leaf_node_possible_cpu(rnp, cpu)
    1167                 :            :                 if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
    1168                 :            :                     cpu != outgoingcpu)
    1169                 :            :                         cpumask_set_cpu(cpu, cm);
    1170                 :            :         if (cpumask_weight(cm) == 0)
    1171                 :            :                 cpumask_setall(cm);
    1172                 :            :         set_cpus_allowed_ptr(t, cm);
    1173                 :            :         free_cpumask_var(cm);
    1174                 :            : }
    1175                 :            : 
    1176                 :            : /*
    1177                 :            :  * Spawn boost kthreads -- called as soon as the scheduler is running.
    1178                 :            :  */
    1179                 :            : static void __init rcu_spawn_boost_kthreads(void)
    1180                 :            : {
    1181                 :            :         struct rcu_node *rnp;
    1182                 :            : 
    1183                 :            :         rcu_for_each_leaf_node(rnp)
    1184                 :            :                 rcu_spawn_one_boost_kthread(rnp);
    1185                 :            : }
    1186                 :            : 
    1187                 :            : static void rcu_prepare_kthreads(int cpu)
    1188                 :            : {
    1189                 :            :         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
    1190                 :            :         struct rcu_node *rnp = rdp->mynode;
    1191                 :            : 
    1192                 :            :         /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
    1193                 :            :         if (rcu_scheduler_fully_active)
    1194                 :            :                 rcu_spawn_one_boost_kthread(rnp);
    1195                 :            : }
    1196                 :            : 
    1197                 :            : #else /* #ifdef CONFIG_RCU_BOOST */
    1198                 :            : 
    1199                 :          0 : static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
    1200                 :            :         __releases(rnp->lock)
    1201                 :            : {
    1202                 :          0 :         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    1203                 :          0 : }
    1204                 :            : 
    1205                 :     238839 : static bool rcu_is_callbacks_kthread(void)
    1206                 :            : {
    1207                 :     238839 :         return false;
    1208                 :            : }
    1209                 :            : 
    1210                 :      67551 : static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
    1211                 :            : {
    1212                 :      67551 : }
    1213                 :            : 
    1214                 :          0 : static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
    1215                 :            : {
    1216                 :          0 : }
    1217                 :            : 
    1218                 :         30 : static void __init rcu_spawn_boost_kthreads(void)
    1219                 :            : {
    1220                 :         30 : }
    1221                 :            : 
    1222                 :         30 : static void rcu_prepare_kthreads(int cpu)
    1223                 :            : {
    1224                 :         30 : }
    1225                 :            : 
    1226                 :            : #endif /* #else #ifdef CONFIG_RCU_BOOST */
    1227                 :            : 
    1228                 :            : #if !defined(CONFIG_RCU_FAST_NO_HZ)
    1229                 :            : 
    1230                 :            : /*
    1231                 :            :  * Check to see if any future non-offloaded RCU-related work will need
    1232                 :            :  * to be done by the current CPU, even if none need be done immediately,
    1233                 :            :  * returning 1 if so.  This function is part of the RCU implementation;
    1234                 :            :  * it is -not- an exported member of the RCU API.
    1235                 :            :  *
    1236                 :            :  * Because we not have RCU_FAST_NO_HZ, just check whether or not this
    1237                 :            :  * CPU has RCU callbacks queued.
    1238                 :            :  */
    1239                 :      11058 : int rcu_needs_cpu(u64 basemono, u64 *nextevt)
    1240                 :            : {
    1241                 :      11058 :         *nextevt = KTIME_MAX;
    1242   [ +  +  -  + ]:      21281 :         return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
    1243         [ -  + ]:      10223 :                !rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist);
    1244                 :            : }
    1245                 :            : 
    1246                 :            : /*
    1247                 :            :  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
    1248                 :            :  * after it.
    1249                 :            :  */
    1250                 :      11058 : static void rcu_cleanup_after_idle(void)
    1251                 :            : {
    1252                 :      11058 : }
    1253                 :            : 
    1254                 :            : /*
    1255                 :            :  * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
    1256                 :            :  * is nothing.
    1257                 :            :  */
    1258                 :      22123 : static void rcu_prepare_for_idle(void)
    1259                 :            : {
    1260                 :      22123 : }
    1261                 :            : 
    1262                 :            : #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
    1263                 :            : 
    1264                 :            : /*
    1265                 :            :  * This code is invoked when a CPU goes idle, at which point we want
    1266                 :            :  * to have the CPU do everything required for RCU so that it can enter
    1267                 :            :  * the energy-efficient dyntick-idle mode.
    1268                 :            :  *
    1269                 :            :  * The following preprocessor symbol controls this:
    1270                 :            :  *
    1271                 :            :  * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
    1272                 :            :  *      to sleep in dyntick-idle mode with RCU callbacks pending.  This
    1273                 :            :  *      is sized to be roughly one RCU grace period.  Those energy-efficiency
    1274                 :            :  *      benchmarkers who might otherwise be tempted to set this to a large
    1275                 :            :  *      number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
    1276                 :            :  *      system.  And if you are -that- concerned about energy efficiency,
    1277                 :            :  *      just power the system down and be done with it!
    1278                 :            :  *
    1279                 :            :  * The value below works well in practice.  If future workloads require
    1280                 :            :  * adjustment, they can be converted into kernel config parameters, though
    1281                 :            :  * making the state machine smarter might be a better option.
    1282                 :            :  */
    1283                 :            : #define RCU_IDLE_GP_DELAY 4             /* Roughly one grace period. */
    1284                 :            : 
    1285                 :            : static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
    1286                 :            : module_param(rcu_idle_gp_delay, int, 0644);
    1287                 :            : 
    1288                 :            : /*
    1289                 :            :  * Try to advance callbacks on the current CPU, but only if it has been
    1290                 :            :  * awhile since the last time we did so.  Afterwards, if there are any
    1291                 :            :  * callbacks ready for immediate invocation, return true.
    1292                 :            :  */
    1293                 :            : static bool __maybe_unused rcu_try_advance_all_cbs(void)
    1294                 :            : {
    1295                 :            :         bool cbs_ready = false;
    1296                 :            :         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
    1297                 :            :         struct rcu_node *rnp;
    1298                 :            : 
    1299                 :            :         /* Exit early if we advanced recently. */
    1300                 :            :         if (jiffies == rdp->last_advance_all)
    1301                 :            :                 return false;
    1302                 :            :         rdp->last_advance_all = jiffies;
    1303                 :            : 
    1304                 :            :         rnp = rdp->mynode;
    1305                 :            : 
    1306                 :            :         /*
    1307                 :            :          * Don't bother checking unless a grace period has
    1308                 :            :          * completed since we last checked and there are
    1309                 :            :          * callbacks not yet ready to invoke.
    1310                 :            :          */
    1311                 :            :         if ((rcu_seq_completed_gp(rdp->gp_seq,
    1312                 :            :                                   rcu_seq_current(&rnp->gp_seq)) ||
    1313                 :            :              unlikely(READ_ONCE(rdp->gpwrap))) &&
    1314                 :            :             rcu_segcblist_pend_cbs(&rdp->cblist))
    1315                 :            :                 note_gp_changes(rdp);
    1316                 :            : 
    1317                 :            :         if (rcu_segcblist_ready_cbs(&rdp->cblist))
    1318                 :            :                 cbs_ready = true;
    1319                 :            :         return cbs_ready;
    1320                 :            : }
    1321                 :            : 
    1322                 :            : /*
    1323                 :            :  * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
    1324                 :            :  * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
    1325                 :            :  * caller about what to set the timeout.
    1326                 :            :  *
    1327                 :            :  * The caller must have disabled interrupts.
    1328                 :            :  */
    1329                 :            : int rcu_needs_cpu(u64 basemono, u64 *nextevt)
    1330                 :            : {
    1331                 :            :         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
    1332                 :            :         unsigned long dj;
    1333                 :            : 
    1334                 :            :         lockdep_assert_irqs_disabled();
    1335                 :            : 
    1336                 :            :         /* If no non-offloaded callbacks, RCU doesn't need the CPU. */
    1337                 :            :         if (rcu_segcblist_empty(&rdp->cblist) ||
    1338                 :            :             rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist)) {
    1339                 :            :                 *nextevt = KTIME_MAX;
    1340                 :            :                 return 0;
    1341                 :            :         }
    1342                 :            : 
    1343                 :            :         /* Attempt to advance callbacks. */
    1344                 :            :         if (rcu_try_advance_all_cbs()) {
    1345                 :            :                 /* Some ready to invoke, so initiate later invocation. */
    1346                 :            :                 invoke_rcu_core();
    1347                 :            :                 return 1;
    1348                 :            :         }
    1349                 :            :         rdp->last_accelerate = jiffies;
    1350                 :            : 
    1351                 :            :         /* Request timer and round. */
    1352                 :            :         dj = round_up(rcu_idle_gp_delay + jiffies, rcu_idle_gp_delay) - jiffies;
    1353                 :            : 
    1354                 :            :         *nextevt = basemono + dj * TICK_NSEC;
    1355                 :            :         return 0;
    1356                 :            : }
    1357                 :            : 
    1358                 :            : /*
    1359                 :            :  * Prepare a CPU for idle from an RCU perspective.  The first major task is to
    1360                 :            :  * sense whether nohz mode has been enabled or disabled via sysfs.  The second
    1361                 :            :  * major task is to accelerate (that is, assign grace-period numbers to) any
    1362                 :            :  * recently arrived callbacks.
    1363                 :            :  *
    1364                 :            :  * The caller must have disabled interrupts.
    1365                 :            :  */
    1366                 :            : static void rcu_prepare_for_idle(void)
    1367                 :            : {
    1368                 :            :         bool needwake;
    1369                 :            :         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
    1370                 :            :         struct rcu_node *rnp;
    1371                 :            :         int tne;
    1372                 :            : 
    1373                 :            :         lockdep_assert_irqs_disabled();
    1374                 :            :         if (rcu_segcblist_is_offloaded(&rdp->cblist))
    1375                 :            :                 return;
    1376                 :            : 
    1377                 :            :         /* Handle nohz enablement switches conservatively. */
    1378                 :            :         tne = READ_ONCE(tick_nohz_active);
    1379                 :            :         if (tne != rdp->tick_nohz_enabled_snap) {
    1380                 :            :                 if (!rcu_segcblist_empty(&rdp->cblist))
    1381                 :            :                         invoke_rcu_core(); /* force nohz to see update. */
    1382                 :            :                 rdp->tick_nohz_enabled_snap = tne;
    1383                 :            :                 return;
    1384                 :            :         }
    1385                 :            :         if (!tne)
    1386                 :            :                 return;
    1387                 :            : 
    1388                 :            :         /*
    1389                 :            :          * If we have not yet accelerated this jiffy, accelerate all
    1390                 :            :          * callbacks on this CPU.
    1391                 :            :          */
    1392                 :            :         if (rdp->last_accelerate == jiffies)
    1393                 :            :                 return;
    1394                 :            :         rdp->last_accelerate = jiffies;
    1395                 :            :         if (rcu_segcblist_pend_cbs(&rdp->cblist)) {
    1396                 :            :                 rnp = rdp->mynode;
    1397                 :            :                 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
    1398                 :            :                 needwake = rcu_accelerate_cbs(rnp, rdp);
    1399                 :            :                 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
    1400                 :            :                 if (needwake)
    1401                 :            :                         rcu_gp_kthread_wake();
    1402                 :            :         }
    1403                 :            : }
    1404                 :            : 
    1405                 :            : /*
    1406                 :            :  * Clean up for exit from idle.  Attempt to advance callbacks based on
    1407                 :            :  * any grace periods that elapsed while the CPU was idle, and if any
    1408                 :            :  * callbacks are now ready to invoke, initiate invocation.
    1409                 :            :  */
    1410                 :            : static void rcu_cleanup_after_idle(void)
    1411                 :            : {
    1412                 :            :         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
    1413                 :            : 
    1414                 :            :         lockdep_assert_irqs_disabled();
    1415                 :            :         if (rcu_segcblist_is_offloaded(&rdp->cblist))
    1416                 :            :                 return;
    1417                 :            :         if (rcu_try_advance_all_cbs())
    1418                 :            :                 invoke_rcu_core();
    1419                 :            : }
    1420                 :            : 
    1421                 :            : #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
    1422                 :            : 
    1423                 :            : #ifdef CONFIG_RCU_NOCB_CPU
    1424                 :            : 
    1425                 :            : /*
    1426                 :            :  * Offload callback processing from the boot-time-specified set of CPUs
    1427                 :            :  * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
    1428                 :            :  * created that pull the callbacks from the corresponding CPU, wait for
    1429                 :            :  * a grace period to elapse, and invoke the callbacks.  These kthreads
    1430                 :            :  * are organized into GP kthreads, which manage incoming callbacks, wait for
    1431                 :            :  * grace periods, and awaken CB kthreads, and the CB kthreads, which only
    1432                 :            :  * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
    1433                 :            :  * do a wake_up() on their GP kthread when they insert a callback into any
    1434                 :            :  * empty list, unless the rcu_nocb_poll boot parameter has been specified,
    1435                 :            :  * in which case each kthread actively polls its CPU.  (Which isn't so great
    1436                 :            :  * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
    1437                 :            :  *
    1438                 :            :  * This is intended to be used in conjunction with Frederic Weisbecker's
    1439                 :            :  * adaptive-idle work, which would seriously reduce OS jitter on CPUs
    1440                 :            :  * running CPU-bound user-mode computations.
    1441                 :            :  *
    1442                 :            :  * Offloading of callbacks can also be used as an energy-efficiency
    1443                 :            :  * measure because CPUs with no RCU callbacks queued are more aggressive
    1444                 :            :  * about entering dyntick-idle mode.
    1445                 :            :  */
    1446                 :            : 
    1447                 :            : 
    1448                 :            : /*
    1449                 :            :  * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
    1450                 :            :  * The string after the "rcu_nocbs=" is either "all" for all CPUs, or a
    1451                 :            :  * comma-separated list of CPUs and/or CPU ranges.  If an invalid list is
    1452                 :            :  * given, a warning is emitted and all CPUs are offloaded.
    1453                 :            :  */
    1454                 :            : static int __init rcu_nocb_setup(char *str)
    1455                 :            : {
    1456                 :            :         alloc_bootmem_cpumask_var(&rcu_nocb_mask);
    1457                 :            :         if (!strcasecmp(str, "all"))
    1458                 :            :                 cpumask_setall(rcu_nocb_mask);
    1459                 :            :         else
    1460                 :            :                 if (cpulist_parse(str, rcu_nocb_mask)) {
    1461                 :            :                         pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
    1462                 :            :                         cpumask_setall(rcu_nocb_mask);
    1463                 :            :                 }
    1464                 :            :         return 1;
    1465                 :            : }
    1466                 :            : __setup("rcu_nocbs=", rcu_nocb_setup);
    1467                 :            : 
    1468                 :            : static int __init parse_rcu_nocb_poll(char *arg)
    1469                 :            : {
    1470                 :            :         rcu_nocb_poll = true;
    1471                 :            :         return 0;
    1472                 :            : }
    1473                 :            : early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
    1474                 :            : 
    1475                 :            : /*
    1476                 :            :  * Don't bother bypassing ->cblist if the call_rcu() rate is low.
    1477                 :            :  * After all, the main point of bypassing is to avoid lock contention
    1478                 :            :  * on ->nocb_lock, which only can happen at high call_rcu() rates.
    1479                 :            :  */
    1480                 :            : int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
    1481                 :            : module_param(nocb_nobypass_lim_per_jiffy, int, 0);
    1482                 :            : 
    1483                 :            : /*
    1484                 :            :  * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
    1485                 :            :  * lock isn't immediately available, increment ->nocb_lock_contended to
    1486                 :            :  * flag the contention.
    1487                 :            :  */
    1488                 :            : static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
    1489                 :            : {
    1490                 :            :         lockdep_assert_irqs_disabled();
    1491                 :            :         if (raw_spin_trylock(&rdp->nocb_bypass_lock))
    1492                 :            :                 return;
    1493                 :            :         atomic_inc(&rdp->nocb_lock_contended);
    1494                 :            :         WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
    1495                 :            :         smp_mb__after_atomic(); /* atomic_inc() before lock. */
    1496                 :            :         raw_spin_lock(&rdp->nocb_bypass_lock);
    1497                 :            :         smp_mb__before_atomic(); /* atomic_dec() after lock. */
    1498                 :            :         atomic_dec(&rdp->nocb_lock_contended);
    1499                 :            : }
    1500                 :            : 
    1501                 :            : /*
    1502                 :            :  * Spinwait until the specified rcu_data structure's ->nocb_lock is
    1503                 :            :  * not contended.  Please note that this is extremely special-purpose,
    1504                 :            :  * relying on the fact that at most two kthreads and one CPU contend for
    1505                 :            :  * this lock, and also that the two kthreads are guaranteed to have frequent
    1506                 :            :  * grace-period-duration time intervals between successive acquisitions
    1507                 :            :  * of the lock.  This allows us to use an extremely simple throttling
    1508                 :            :  * mechanism, and further to apply it only to the CPU doing floods of
    1509                 :            :  * call_rcu() invocations.  Don't try this at home!
    1510                 :            :  */
    1511                 :            : static void rcu_nocb_wait_contended(struct rcu_data *rdp)
    1512                 :            : {
    1513                 :            :         WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
    1514                 :            :         while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
    1515                 :            :                 cpu_relax();
    1516                 :            : }
    1517                 :            : 
    1518                 :            : /*
    1519                 :            :  * Conditionally acquire the specified rcu_data structure's
    1520                 :            :  * ->nocb_bypass_lock.
    1521                 :            :  */
    1522                 :            : static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
    1523                 :            : {
    1524                 :            :         lockdep_assert_irqs_disabled();
    1525                 :            :         return raw_spin_trylock(&rdp->nocb_bypass_lock);
    1526                 :            : }
    1527                 :            : 
    1528                 :            : /*
    1529                 :            :  * Release the specified rcu_data structure's ->nocb_bypass_lock.
    1530                 :            :  */
    1531                 :            : static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
    1532                 :            : {
    1533                 :            :         lockdep_assert_irqs_disabled();
    1534                 :            :         raw_spin_unlock(&rdp->nocb_bypass_lock);
    1535                 :            : }
    1536                 :            : 
    1537                 :            : /*
    1538                 :            :  * Acquire the specified rcu_data structure's ->nocb_lock, but only
    1539                 :            :  * if it corresponds to a no-CBs CPU.
    1540                 :            :  */
    1541                 :            : static void rcu_nocb_lock(struct rcu_data *rdp)
    1542                 :            : {
    1543                 :            :         lockdep_assert_irqs_disabled();
    1544                 :            :         if (!rcu_segcblist_is_offloaded(&rdp->cblist))
    1545                 :            :                 return;
    1546                 :            :         raw_spin_lock(&rdp->nocb_lock);
    1547                 :            : }
    1548                 :            : 
    1549                 :            : /*
    1550                 :            :  * Release the specified rcu_data structure's ->nocb_lock, but only
    1551                 :            :  * if it corresponds to a no-CBs CPU.
    1552                 :            :  */
    1553                 :            : static void rcu_nocb_unlock(struct rcu_data *rdp)
    1554                 :            : {
    1555                 :            :         if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
    1556                 :            :                 lockdep_assert_irqs_disabled();
    1557                 :            :                 raw_spin_unlock(&rdp->nocb_lock);
    1558                 :            :         }
    1559                 :            : }
    1560                 :            : 
    1561                 :            : /*
    1562                 :            :  * Release the specified rcu_data structure's ->nocb_lock and restore
    1563                 :            :  * interrupts, but only if it corresponds to a no-CBs CPU.
    1564                 :            :  */
    1565                 :            : static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
    1566                 :            :                                        unsigned long flags)
    1567                 :            : {
    1568                 :            :         if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
    1569                 :            :                 lockdep_assert_irqs_disabled();
    1570                 :            :                 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
    1571                 :            :         } else {
    1572                 :            :                 local_irq_restore(flags);
    1573                 :            :         }
    1574                 :            : }
    1575                 :            : 
    1576                 :            : /* Lockdep check that ->cblist may be safely accessed. */
    1577                 :            : static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
    1578                 :            : {
    1579                 :            :         lockdep_assert_irqs_disabled();
    1580                 :            :         if (rcu_segcblist_is_offloaded(&rdp->cblist) &&
    1581                 :            :             cpu_online(rdp->cpu))
    1582                 :            :                 lockdep_assert_held(&rdp->nocb_lock);
    1583                 :            : }
    1584                 :            : 
    1585                 :            : /*
    1586                 :            :  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
    1587                 :            :  * grace period.
    1588                 :            :  */
    1589                 :            : static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
    1590                 :            : {
    1591                 :            :         swake_up_all(sq);
    1592                 :            : }
    1593                 :            : 
    1594                 :            : static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
    1595                 :            : {
    1596                 :            :         return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
    1597                 :            : }
    1598                 :            : 
    1599                 :            : static void rcu_init_one_nocb(struct rcu_node *rnp)
    1600                 :            : {
    1601                 :            :         init_swait_queue_head(&rnp->nocb_gp_wq[0]);
    1602                 :            :         init_swait_queue_head(&rnp->nocb_gp_wq[1]);
    1603                 :            : }
    1604                 :            : 
    1605                 :            : /* Is the specified CPU a no-CBs CPU? */
    1606                 :            : bool rcu_is_nocb_cpu(int cpu)
    1607                 :            : {
    1608                 :            :         if (cpumask_available(rcu_nocb_mask))
    1609                 :            :                 return cpumask_test_cpu(cpu, rcu_nocb_mask);
    1610                 :            :         return false;
    1611                 :            : }
    1612                 :            : 
    1613                 :            : /*
    1614                 :            :  * Kick the GP kthread for this NOCB group.  Caller holds ->nocb_lock
    1615                 :            :  * and this function releases it.
    1616                 :            :  */
    1617                 :            : static void wake_nocb_gp(struct rcu_data *rdp, bool force,
    1618                 :            :                            unsigned long flags)
    1619                 :            :         __releases(rdp->nocb_lock)
    1620                 :            : {
    1621                 :            :         bool needwake = false;
    1622                 :            :         struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
    1623                 :            : 
    1624                 :            :         lockdep_assert_held(&rdp->nocb_lock);
    1625                 :            :         if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
    1626                 :            :                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
    1627                 :            :                                     TPS("AlreadyAwake"));
    1628                 :            :                 rcu_nocb_unlock_irqrestore(rdp, flags);
    1629                 :            :                 return;
    1630                 :            :         }
    1631                 :            :         del_timer(&rdp->nocb_timer);
    1632                 :            :         rcu_nocb_unlock_irqrestore(rdp, flags);
    1633                 :            :         raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
    1634                 :            :         if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
    1635                 :            :                 WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
    1636                 :            :                 needwake = true;
    1637                 :            :                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
    1638                 :            :         }
    1639                 :            :         raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
    1640                 :            :         if (needwake)
    1641                 :            :                 wake_up_process(rdp_gp->nocb_gp_kthread);
    1642                 :            : }
    1643                 :            : 
    1644                 :            : /*
    1645                 :            :  * Arrange to wake the GP kthread for this NOCB group at some future
    1646                 :            :  * time when it is safe to do so.
    1647                 :            :  */
    1648                 :            : static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
    1649                 :            :                                const char *reason)
    1650                 :            : {
    1651                 :            :         if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT)
    1652                 :            :                 mod_timer(&rdp->nocb_timer, jiffies + 1);
    1653                 :            :         if (rdp->nocb_defer_wakeup < waketype)
    1654                 :            :                 WRITE_ONCE(rdp->nocb_defer_wakeup, waketype);
    1655                 :            :         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
    1656                 :            : }
    1657                 :            : 
    1658                 :            : /*
    1659                 :            :  * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
    1660                 :            :  * However, if there is a callback to be enqueued and if ->nocb_bypass
    1661                 :            :  * proves to be initially empty, just return false because the no-CB GP
    1662                 :            :  * kthread may need to be awakened in this case.
    1663                 :            :  *
    1664                 :            :  * Note that this function always returns true if rhp is NULL.
    1665                 :            :  */
    1666                 :            : static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
    1667                 :            :                                      unsigned long j)
    1668                 :            : {
    1669                 :            :         struct rcu_cblist rcl;
    1670                 :            : 
    1671                 :            :         WARN_ON_ONCE(!rcu_segcblist_is_offloaded(&rdp->cblist));
    1672                 :            :         rcu_lockdep_assert_cblist_protected(rdp);
    1673                 :            :         lockdep_assert_held(&rdp->nocb_bypass_lock);
    1674                 :            :         if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
    1675                 :            :                 raw_spin_unlock(&rdp->nocb_bypass_lock);
    1676                 :            :                 return false;
    1677                 :            :         }
    1678                 :            :         /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
    1679                 :            :         if (rhp)
    1680                 :            :                 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
    1681                 :            :         rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
    1682                 :            :         rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
    1683                 :            :         WRITE_ONCE(rdp->nocb_bypass_first, j);
    1684                 :            :         rcu_nocb_bypass_unlock(rdp);
    1685                 :            :         return true;
    1686                 :            : }
    1687                 :            : 
    1688                 :            : /*
    1689                 :            :  * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
    1690                 :            :  * However, if there is a callback to be enqueued and if ->nocb_bypass
    1691                 :            :  * proves to be initially empty, just return false because the no-CB GP
    1692                 :            :  * kthread may need to be awakened in this case.
    1693                 :            :  *
    1694                 :            :  * Note that this function always returns true if rhp is NULL.
    1695                 :            :  */
    1696                 :            : static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
    1697                 :            :                                   unsigned long j)
    1698                 :            : {
    1699                 :            :         if (!rcu_segcblist_is_offloaded(&rdp->cblist))
    1700                 :            :                 return true;
    1701                 :            :         rcu_lockdep_assert_cblist_protected(rdp);
    1702                 :            :         rcu_nocb_bypass_lock(rdp);
    1703                 :            :         return rcu_nocb_do_flush_bypass(rdp, rhp, j);
    1704                 :            : }
    1705                 :            : 
    1706                 :            : /*
    1707                 :            :  * If the ->nocb_bypass_lock is immediately available, flush the
    1708                 :            :  * ->nocb_bypass queue into ->cblist.
    1709                 :            :  */
    1710                 :            : static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
    1711                 :            : {
    1712                 :            :         rcu_lockdep_assert_cblist_protected(rdp);
    1713                 :            :         if (!rcu_segcblist_is_offloaded(&rdp->cblist) ||
    1714                 :            :             !rcu_nocb_bypass_trylock(rdp))
    1715                 :            :                 return;
    1716                 :            :         WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
    1717                 :            : }
    1718                 :            : 
    1719                 :            : /*
    1720                 :            :  * See whether it is appropriate to use the ->nocb_bypass list in order
    1721                 :            :  * to control contention on ->nocb_lock.  A limited number of direct
    1722                 :            :  * enqueues are permitted into ->cblist per jiffy.  If ->nocb_bypass
    1723                 :            :  * is non-empty, further callbacks must be placed into ->nocb_bypass,
    1724                 :            :  * otherwise rcu_barrier() breaks.  Use rcu_nocb_flush_bypass() to switch
    1725                 :            :  * back to direct use of ->cblist.  However, ->nocb_bypass should not be
    1726                 :            :  * used if ->cblist is empty, because otherwise callbacks can be stranded
    1727                 :            :  * on ->nocb_bypass because we cannot count on the current CPU ever again
    1728                 :            :  * invoking call_rcu().  The general rule is that if ->nocb_bypass is
    1729                 :            :  * non-empty, the corresponding no-CBs grace-period kthread must not be
    1730                 :            :  * in an indefinite sleep state.
    1731                 :            :  *
    1732                 :            :  * Finally, it is not permitted to use the bypass during early boot,
    1733                 :            :  * as doing so would confuse the auto-initialization code.  Besides
    1734                 :            :  * which, there is no point in worrying about lock contention while
    1735                 :            :  * there is only one CPU in operation.
    1736                 :            :  */
    1737                 :            : static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
    1738                 :            :                                 bool *was_alldone, unsigned long flags)
    1739                 :            : {
    1740                 :            :         unsigned long c;
    1741                 :            :         unsigned long cur_gp_seq;
    1742                 :            :         unsigned long j = jiffies;
    1743                 :            :         long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
    1744                 :            : 
    1745                 :            :         if (!rcu_segcblist_is_offloaded(&rdp->cblist)) {
    1746                 :            :                 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
    1747                 :            :                 return false; /* Not offloaded, no bypassing. */
    1748                 :            :         }
    1749                 :            :         lockdep_assert_irqs_disabled();
    1750                 :            : 
    1751                 :            :         // Don't use ->nocb_bypass during early boot.
    1752                 :            :         if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
    1753                 :            :                 rcu_nocb_lock(rdp);
    1754                 :            :                 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
    1755                 :            :                 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
    1756                 :            :                 return false;
    1757                 :            :         }
    1758                 :            : 
    1759                 :            :         // If we have advanced to a new jiffy, reset counts to allow
    1760                 :            :         // moving back from ->nocb_bypass to ->cblist.
    1761                 :            :         if (j == rdp->nocb_nobypass_last) {
    1762                 :            :                 c = rdp->nocb_nobypass_count + 1;
    1763                 :            :         } else {
    1764                 :            :                 WRITE_ONCE(rdp->nocb_nobypass_last, j);
    1765                 :            :                 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
    1766                 :            :                 if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
    1767                 :            :                                  nocb_nobypass_lim_per_jiffy))
    1768                 :            :                         c = 0;
    1769                 :            :                 else if (c > nocb_nobypass_lim_per_jiffy)
    1770                 :            :                         c = nocb_nobypass_lim_per_jiffy;
    1771                 :            :         }
    1772                 :            :         WRITE_ONCE(rdp->nocb_nobypass_count, c);
    1773                 :            : 
    1774                 :            :         // If there hasn't yet been all that many ->cblist enqueues
    1775                 :            :         // this jiffy, tell the caller to enqueue onto ->cblist.  But flush
    1776                 :            :         // ->nocb_bypass first.
    1777                 :            :         if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
    1778                 :            :                 rcu_nocb_lock(rdp);
    1779                 :            :                 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
    1780                 :            :                 if (*was_alldone)
    1781                 :            :                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
    1782                 :            :                                             TPS("FirstQ"));
    1783                 :            :                 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
    1784                 :            :                 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
    1785                 :            :                 return false; // Caller must enqueue the callback.
    1786                 :            :         }
    1787                 :            : 
    1788                 :            :         // If ->nocb_bypass has been used too long or is too full,
    1789                 :            :         // flush ->nocb_bypass to ->cblist.
    1790                 :            :         if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
    1791                 :            :             ncbs >= qhimark) {
    1792                 :            :                 rcu_nocb_lock(rdp);
    1793                 :            :                 if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
    1794                 :            :                         *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
    1795                 :            :                         if (*was_alldone)
    1796                 :            :                                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
    1797                 :            :                                                     TPS("FirstQ"));
    1798                 :            :                         WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
    1799                 :            :                         return false; // Caller must enqueue the callback.
    1800                 :            :                 }
    1801                 :            :                 if (j != rdp->nocb_gp_adv_time &&
    1802                 :            :                     rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
    1803                 :            :                     rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
    1804                 :            :                         rcu_advance_cbs_nowake(rdp->mynode, rdp);
    1805                 :            :                         rdp->nocb_gp_adv_time = j;
    1806                 :            :                 }
    1807                 :            :                 rcu_nocb_unlock_irqrestore(rdp, flags);
    1808                 :            :                 return true; // Callback already enqueued.
    1809                 :            :         }
    1810                 :            : 
    1811                 :            :         // We need to use the bypass.
    1812                 :            :         rcu_nocb_wait_contended(rdp);
    1813                 :            :         rcu_nocb_bypass_lock(rdp);
    1814                 :            :         ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
    1815                 :            :         rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
    1816                 :            :         rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
    1817                 :            :         if (!ncbs) {
    1818                 :            :                 WRITE_ONCE(rdp->nocb_bypass_first, j);
    1819                 :            :                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
    1820                 :            :         }
    1821                 :            :         rcu_nocb_bypass_unlock(rdp);
    1822                 :            :         smp_mb(); /* Order enqueue before wake. */
    1823                 :            :         if (ncbs) {
    1824                 :            :                 local_irq_restore(flags);
    1825                 :            :         } else {
    1826                 :            :                 // No-CBs GP kthread might be indefinitely asleep, if so, wake.
    1827                 :            :                 rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
    1828                 :            :                 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
    1829                 :            :                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
    1830                 :            :                                             TPS("FirstBQwake"));
    1831                 :            :                         __call_rcu_nocb_wake(rdp, true, flags);
    1832                 :            :                 } else {
    1833                 :            :                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
    1834                 :            :                                             TPS("FirstBQnoWake"));
    1835                 :            :                         rcu_nocb_unlock_irqrestore(rdp, flags);
    1836                 :            :                 }
    1837                 :            :         }
    1838                 :            :         return true; // Callback already enqueued.
    1839                 :            : }
    1840                 :            : 
    1841                 :            : /*
    1842                 :            :  * Awaken the no-CBs grace-period kthead if needed, either due to it
    1843                 :            :  * legitimately being asleep or due to overload conditions.
    1844                 :            :  *
    1845                 :            :  * If warranted, also wake up the kthread servicing this CPUs queues.
    1846                 :            :  */
    1847                 :            : static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
    1848                 :            :                                  unsigned long flags)
    1849                 :            :                                  __releases(rdp->nocb_lock)
    1850                 :            : {
    1851                 :            :         unsigned long cur_gp_seq;
    1852                 :            :         unsigned long j;
    1853                 :            :         long len;
    1854                 :            :         struct task_struct *t;
    1855                 :            : 
    1856                 :            :         // If we are being polled or there is no kthread, just leave.
    1857                 :            :         t = READ_ONCE(rdp->nocb_gp_kthread);
    1858                 :            :         if (rcu_nocb_poll || !t) {
    1859                 :            :                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
    1860                 :            :                                     TPS("WakeNotPoll"));
    1861                 :            :                 rcu_nocb_unlock_irqrestore(rdp, flags);
    1862                 :            :                 return;
    1863                 :            :         }
    1864                 :            :         // Need to actually to a wakeup.
    1865                 :            :         len = rcu_segcblist_n_cbs(&rdp->cblist);
    1866                 :            :         if (was_alldone) {
    1867                 :            :                 rdp->qlen_last_fqs_check = len;
    1868                 :            :                 if (!irqs_disabled_flags(flags)) {
    1869                 :            :                         /* ... if queue was empty ... */
    1870                 :            :                         wake_nocb_gp(rdp, false, flags);
    1871                 :            :                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
    1872                 :            :                                             TPS("WakeEmpty"));
    1873                 :            :                 } else {
    1874                 :            :                         wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
    1875                 :            :                                            TPS("WakeEmptyIsDeferred"));
    1876                 :            :                         rcu_nocb_unlock_irqrestore(rdp, flags);
    1877                 :            :                 }
    1878                 :            :         } else if (len > rdp->qlen_last_fqs_check + qhimark) {
    1879                 :            :                 /* ... or if many callbacks queued. */
    1880                 :            :                 rdp->qlen_last_fqs_check = len;
    1881                 :            :                 j = jiffies;
    1882                 :            :                 if (j != rdp->nocb_gp_adv_time &&
    1883                 :            :                     rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
    1884                 :            :                     rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
    1885                 :            :                         rcu_advance_cbs_nowake(rdp->mynode, rdp);
    1886                 :            :                         rdp->nocb_gp_adv_time = j;
    1887                 :            :                 }
    1888                 :            :                 smp_mb(); /* Enqueue before timer_pending(). */
    1889                 :            :                 if ((rdp->nocb_cb_sleep ||
    1890                 :            :                      !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
    1891                 :            :                     !timer_pending(&rdp->nocb_bypass_timer))
    1892                 :            :                         wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
    1893                 :            :                                            TPS("WakeOvfIsDeferred"));
    1894                 :            :                 rcu_nocb_unlock_irqrestore(rdp, flags);
    1895                 :            :         } else {
    1896                 :            :                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
    1897                 :            :                 rcu_nocb_unlock_irqrestore(rdp, flags);
    1898                 :            :         }
    1899                 :            :         return;
    1900                 :            : }
    1901                 :            : 
    1902                 :            : /* Wake up the no-CBs GP kthread to flush ->nocb_bypass. */
    1903                 :            : static void do_nocb_bypass_wakeup_timer(struct timer_list *t)
    1904                 :            : {
    1905                 :            :         unsigned long flags;
    1906                 :            :         struct rcu_data *rdp = from_timer(rdp, t, nocb_bypass_timer);
    1907                 :            : 
    1908                 :            :         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
    1909                 :            :         rcu_nocb_lock_irqsave(rdp, flags);
    1910                 :            :         smp_mb__after_spinlock(); /* Timer expire before wakeup. */
    1911                 :            :         __call_rcu_nocb_wake(rdp, true, flags);
    1912                 :            : }
    1913                 :            : 
    1914                 :            : /*
    1915                 :            :  * No-CBs GP kthreads come here to wait for additional callbacks to show up
    1916                 :            :  * or for grace periods to end.
    1917                 :            :  */
    1918                 :            : static void nocb_gp_wait(struct rcu_data *my_rdp)
    1919                 :            : {
    1920                 :            :         bool bypass = false;
    1921                 :            :         long bypass_ncbs;
    1922                 :            :         int __maybe_unused cpu = my_rdp->cpu;
    1923                 :            :         unsigned long cur_gp_seq;
    1924                 :            :         unsigned long flags;
    1925                 :            :         bool gotcbs = false;
    1926                 :            :         unsigned long j = jiffies;
    1927                 :            :         bool needwait_gp = false; // This prevents actual uninitialized use.
    1928                 :            :         bool needwake;
    1929                 :            :         bool needwake_gp;
    1930                 :            :         struct rcu_data *rdp;
    1931                 :            :         struct rcu_node *rnp;
    1932                 :            :         unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
    1933                 :            : 
    1934                 :            :         /*
    1935                 :            :          * Each pass through the following loop checks for CBs and for the
    1936                 :            :          * nearest grace period (if any) to wait for next.  The CB kthreads
    1937                 :            :          * and the global grace-period kthread are awakened if needed.
    1938                 :            :          */
    1939                 :            :         for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
    1940                 :            :                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
    1941                 :            :                 rcu_nocb_lock_irqsave(rdp, flags);
    1942                 :            :                 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
    1943                 :            :                 if (bypass_ncbs &&
    1944                 :            :                     (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
    1945                 :            :                      bypass_ncbs > 2 * qhimark)) {
    1946                 :            :                         // Bypass full or old, so flush it.
    1947                 :            :                         (void)rcu_nocb_try_flush_bypass(rdp, j);
    1948                 :            :                         bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
    1949                 :            :                 } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
    1950                 :            :                         rcu_nocb_unlock_irqrestore(rdp, flags);
    1951                 :            :                         continue; /* No callbacks here, try next. */
    1952                 :            :                 }
    1953                 :            :                 if (bypass_ncbs) {
    1954                 :            :                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
    1955                 :            :                                             TPS("Bypass"));
    1956                 :            :                         bypass = true;
    1957                 :            :                 }
    1958                 :            :                 rnp = rdp->mynode;
    1959                 :            :                 if (bypass) {  // Avoid race with first bypass CB.
    1960                 :            :                         WRITE_ONCE(my_rdp->nocb_defer_wakeup,
    1961                 :            :                                    RCU_NOCB_WAKE_NOT);
    1962                 :            :                         del_timer(&my_rdp->nocb_timer);
    1963                 :            :                 }
    1964                 :            :                 // Advance callbacks if helpful and low contention.
    1965                 :            :                 needwake_gp = false;
    1966                 :            :                 if (!rcu_segcblist_restempty(&rdp->cblist,
    1967                 :            :                                              RCU_NEXT_READY_TAIL) ||
    1968                 :            :                     (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
    1969                 :            :                      rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
    1970                 :            :                         raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
    1971                 :            :                         needwake_gp = rcu_advance_cbs(rnp, rdp);
    1972                 :            :                         raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
    1973                 :            :                 }
    1974                 :            :                 // Need to wait on some grace period?
    1975                 :            :                 WARN_ON_ONCE(!rcu_segcblist_restempty(&rdp->cblist,
    1976                 :            :                                                       RCU_NEXT_READY_TAIL));
    1977                 :            :                 if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
    1978                 :            :                         if (!needwait_gp ||
    1979                 :            :                             ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
    1980                 :            :                                 wait_gp_seq = cur_gp_seq;
    1981                 :            :                         needwait_gp = true;
    1982                 :            :                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
    1983                 :            :                                             TPS("NeedWaitGP"));
    1984                 :            :                 }
    1985                 :            :                 if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
    1986                 :            :                         needwake = rdp->nocb_cb_sleep;
    1987                 :            :                         WRITE_ONCE(rdp->nocb_cb_sleep, false);
    1988                 :            :                         smp_mb(); /* CB invocation -after- GP end. */
    1989                 :            :                 } else {
    1990                 :            :                         needwake = false;
    1991                 :            :                 }
    1992                 :            :                 rcu_nocb_unlock_irqrestore(rdp, flags);
    1993                 :            :                 if (needwake) {
    1994                 :            :                         swake_up_one(&rdp->nocb_cb_wq);
    1995                 :            :                         gotcbs = true;
    1996                 :            :                 }
    1997                 :            :                 if (needwake_gp)
    1998                 :            :                         rcu_gp_kthread_wake();
    1999                 :            :         }
    2000                 :            : 
    2001                 :            :         my_rdp->nocb_gp_bypass = bypass;
    2002                 :            :         my_rdp->nocb_gp_gp = needwait_gp;
    2003                 :            :         my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
    2004                 :            :         if (bypass && !rcu_nocb_poll) {
    2005                 :            :                 // At least one child with non-empty ->nocb_bypass, so set
    2006                 :            :                 // timer in order to avoid stranding its callbacks.
    2007                 :            :                 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
    2008                 :            :                 mod_timer(&my_rdp->nocb_bypass_timer, j + 2);
    2009                 :            :                 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
    2010                 :            :         }
    2011                 :            :         if (rcu_nocb_poll) {
    2012                 :            :                 /* Polling, so trace if first poll in the series. */
    2013                 :            :                 if (gotcbs)
    2014                 :            :                         trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
    2015                 :            :                 schedule_timeout_interruptible(1);
    2016                 :            :         } else if (!needwait_gp) {
    2017                 :            :                 /* Wait for callbacks to appear. */
    2018                 :            :                 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
    2019                 :            :                 swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
    2020                 :            :                                 !READ_ONCE(my_rdp->nocb_gp_sleep));
    2021                 :            :                 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
    2022                 :            :         } else {
    2023                 :            :                 rnp = my_rdp->mynode;
    2024                 :            :                 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
    2025                 :            :                 swait_event_interruptible_exclusive(
    2026                 :            :                         rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
    2027                 :            :                         rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
    2028                 :            :                         !READ_ONCE(my_rdp->nocb_gp_sleep));
    2029                 :            :                 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
    2030                 :            :         }
    2031                 :            :         if (!rcu_nocb_poll) {
    2032                 :            :                 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
    2033                 :            :                 if (bypass)
    2034                 :            :                         del_timer(&my_rdp->nocb_bypass_timer);
    2035                 :            :                 WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
    2036                 :            :                 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
    2037                 :            :         }
    2038                 :            :         my_rdp->nocb_gp_seq = -1;
    2039                 :            :         WARN_ON(signal_pending(current));
    2040                 :            : }
    2041                 :            : 
    2042                 :            : /*
    2043                 :            :  * No-CBs grace-period-wait kthread.  There is one of these per group
    2044                 :            :  * of CPUs, but only once at least one CPU in that group has come online
    2045                 :            :  * at least once since boot.  This kthread checks for newly posted
    2046                 :            :  * callbacks from any of the CPUs it is responsible for, waits for a
    2047                 :            :  * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
    2048                 :            :  * that then have callback-invocation work to do.
    2049                 :            :  */
    2050                 :            : static int rcu_nocb_gp_kthread(void *arg)
    2051                 :            : {
    2052                 :            :         struct rcu_data *rdp = arg;
    2053                 :            : 
    2054                 :            :         for (;;) {
    2055                 :            :                 WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
    2056                 :            :                 nocb_gp_wait(rdp);
    2057                 :            :                 cond_resched_tasks_rcu_qs();
    2058                 :            :         }
    2059                 :            :         return 0;
    2060                 :            : }
    2061                 :            : 
    2062                 :            : /*
    2063                 :            :  * Invoke any ready callbacks from the corresponding no-CBs CPU,
    2064                 :            :  * then, if there are no more, wait for more to appear.
    2065                 :            :  */
    2066                 :            : static void nocb_cb_wait(struct rcu_data *rdp)
    2067                 :            : {
    2068                 :            :         unsigned long cur_gp_seq;
    2069                 :            :         unsigned long flags;
    2070                 :            :         bool needwake_gp = false;
    2071                 :            :         struct rcu_node *rnp = rdp->mynode;
    2072                 :            : 
    2073                 :            :         local_irq_save(flags);
    2074                 :            :         rcu_momentary_dyntick_idle();
    2075                 :            :         local_irq_restore(flags);
    2076                 :            :         local_bh_disable();
    2077                 :            :         rcu_do_batch(rdp);
    2078                 :            :         local_bh_enable();
    2079                 :            :         lockdep_assert_irqs_enabled();
    2080                 :            :         rcu_nocb_lock_irqsave(rdp, flags);
    2081                 :            :         if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
    2082                 :            :             rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
    2083                 :            :             raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
    2084                 :            :                 needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
    2085                 :            :                 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
    2086                 :            :         }
    2087                 :            :         if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
    2088                 :            :                 rcu_nocb_unlock_irqrestore(rdp, flags);
    2089                 :            :                 if (needwake_gp)
    2090                 :            :                         rcu_gp_kthread_wake();
    2091                 :            :                 return;
    2092                 :            :         }
    2093                 :            : 
    2094                 :            :         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
    2095                 :            :         WRITE_ONCE(rdp->nocb_cb_sleep, true);
    2096                 :            :         rcu_nocb_unlock_irqrestore(rdp, flags);
    2097                 :            :         if (needwake_gp)
    2098                 :            :                 rcu_gp_kthread_wake();
    2099                 :            :         swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
    2100                 :            :                                  !READ_ONCE(rdp->nocb_cb_sleep));
    2101                 :            :         if (!smp_load_acquire(&rdp->nocb_cb_sleep)) { /* VVV */
    2102                 :            :                 /* ^^^ Ensure CB invocation follows _sleep test. */
    2103                 :            :                 return;
    2104                 :            :         }
    2105                 :            :         WARN_ON(signal_pending(current));
    2106                 :            :         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
    2107                 :            : }
    2108                 :            : 
    2109                 :            : /*
    2110                 :            :  * Per-rcu_data kthread, but only for no-CBs CPUs.  Repeatedly invoke
    2111                 :            :  * nocb_cb_wait() to do the dirty work.
    2112                 :            :  */
    2113                 :            : static int rcu_nocb_cb_kthread(void *arg)
    2114                 :            : {
    2115                 :            :         struct rcu_data *rdp = arg;
    2116                 :            : 
    2117                 :            :         // Each pass through this loop does one callback batch, and,
    2118                 :            :         // if there are no more ready callbacks, waits for them.
    2119                 :            :         for (;;) {
    2120                 :            :                 nocb_cb_wait(rdp);
    2121                 :            :                 cond_resched_tasks_rcu_qs();
    2122                 :            :         }
    2123                 :            :         return 0;
    2124                 :            : }
    2125                 :            : 
    2126                 :            : /* Is a deferred wakeup of rcu_nocb_kthread() required? */
    2127                 :            : static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
    2128                 :            : {
    2129                 :            :         return READ_ONCE(rdp->nocb_defer_wakeup);
    2130                 :            : }
    2131                 :            : 
    2132                 :            : /* Do a deferred wakeup of rcu_nocb_kthread(). */
    2133                 :            : static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
    2134                 :            : {
    2135                 :            :         unsigned long flags;
    2136                 :            :         int ndw;
    2137                 :            : 
    2138                 :            :         rcu_nocb_lock_irqsave(rdp, flags);
    2139                 :            :         if (!rcu_nocb_need_deferred_wakeup(rdp)) {
    2140                 :            :                 rcu_nocb_unlock_irqrestore(rdp, flags);
    2141                 :            :                 return;
    2142                 :            :         }
    2143                 :            :         ndw = READ_ONCE(rdp->nocb_defer_wakeup);
    2144                 :            :         WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
    2145                 :            :         wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
    2146                 :            :         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
    2147                 :            : }
    2148                 :            : 
    2149                 :            : /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
    2150                 :            : static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
    2151                 :            : {
    2152                 :            :         struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
    2153                 :            : 
    2154                 :            :         do_nocb_deferred_wakeup_common(rdp);
    2155                 :            : }
    2156                 :            : 
    2157                 :            : /*
    2158                 :            :  * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
    2159                 :            :  * This means we do an inexact common-case check.  Note that if
    2160                 :            :  * we miss, ->nocb_timer will eventually clean things up.
    2161                 :            :  */
    2162                 :            : static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
    2163                 :            : {
    2164                 :            :         if (rcu_nocb_need_deferred_wakeup(rdp))
    2165                 :            :                 do_nocb_deferred_wakeup_common(rdp);
    2166                 :            : }
    2167                 :            : 
    2168                 :            : void __init rcu_init_nohz(void)
    2169                 :            : {
    2170                 :            :         int cpu;
    2171                 :            :         bool need_rcu_nocb_mask = false;
    2172                 :            :         struct rcu_data *rdp;
    2173                 :            : 
    2174                 :            : #if defined(CONFIG_NO_HZ_FULL)
    2175                 :            :         if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
    2176                 :            :                 need_rcu_nocb_mask = true;
    2177                 :            : #endif /* #if defined(CONFIG_NO_HZ_FULL) */
    2178                 :            : 
    2179                 :            :         if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
    2180                 :            :                 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
    2181                 :            :                         pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
    2182                 :            :                         return;
    2183                 :            :                 }
    2184                 :            :         }
    2185                 :            :         if (!cpumask_available(rcu_nocb_mask))
    2186                 :            :                 return;
    2187                 :            : 
    2188                 :            : #if defined(CONFIG_NO_HZ_FULL)
    2189                 :            :         if (tick_nohz_full_running)
    2190                 :            :                 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
    2191                 :            : #endif /* #if defined(CONFIG_NO_HZ_FULL) */
    2192                 :            : 
    2193                 :            :         if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
    2194                 :            :                 pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
    2195                 :            :                 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
    2196                 :            :                             rcu_nocb_mask);
    2197                 :            :         }
    2198                 :            :         if (cpumask_empty(rcu_nocb_mask))
    2199                 :            :                 pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
    2200                 :            :         else
    2201                 :            :                 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
    2202                 :            :                         cpumask_pr_args(rcu_nocb_mask));
    2203                 :            :         if (rcu_nocb_poll)
    2204                 :            :                 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
    2205                 :            : 
    2206                 :            :         for_each_cpu(cpu, rcu_nocb_mask) {
    2207                 :            :                 rdp = per_cpu_ptr(&rcu_data, cpu);
    2208                 :            :                 if (rcu_segcblist_empty(&rdp->cblist))
    2209                 :            :                         rcu_segcblist_init(&rdp->cblist);
    2210                 :            :                 rcu_segcblist_offload(&rdp->cblist);
    2211                 :            :         }
    2212                 :            :         rcu_organize_nocb_kthreads();
    2213                 :            : }
    2214                 :            : 
    2215                 :            : /* Initialize per-rcu_data variables for no-CBs CPUs. */
    2216                 :            : static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
    2217                 :            : {
    2218                 :            :         init_swait_queue_head(&rdp->nocb_cb_wq);
    2219                 :            :         init_swait_queue_head(&rdp->nocb_gp_wq);
    2220                 :            :         raw_spin_lock_init(&rdp->nocb_lock);
    2221                 :            :         raw_spin_lock_init(&rdp->nocb_bypass_lock);
    2222                 :            :         raw_spin_lock_init(&rdp->nocb_gp_lock);
    2223                 :            :         timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
    2224                 :            :         timer_setup(&rdp->nocb_bypass_timer, do_nocb_bypass_wakeup_timer, 0);
    2225                 :            :         rcu_cblist_init(&rdp->nocb_bypass);
    2226                 :            : }
    2227                 :            : 
    2228                 :            : /*
    2229                 :            :  * If the specified CPU is a no-CBs CPU that does not already have its
    2230                 :            :  * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
    2231                 :            :  * for this CPU's group has not yet been created, spawn it as well.
    2232                 :            :  */
    2233                 :            : static void rcu_spawn_one_nocb_kthread(int cpu)
    2234                 :            : {
    2235                 :            :         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
    2236                 :            :         struct rcu_data *rdp_gp;
    2237                 :            :         struct task_struct *t;
    2238                 :            : 
    2239                 :            :         /*
    2240                 :            :          * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
    2241                 :            :          * then nothing to do.
    2242                 :            :          */
    2243                 :            :         if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
    2244                 :            :                 return;
    2245                 :            : 
    2246                 :            :         /* If we didn't spawn the GP kthread first, reorganize! */
    2247                 :            :         rdp_gp = rdp->nocb_gp_rdp;
    2248                 :            :         if (!rdp_gp->nocb_gp_kthread) {
    2249                 :            :                 t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
    2250                 :            :                                 "rcuog/%d", rdp_gp->cpu);
    2251                 :            :                 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
    2252                 :            :                         return;
    2253                 :            :                 WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
    2254                 :            :         }
    2255                 :            : 
    2256                 :            :         /* Spawn the kthread for this CPU. */
    2257                 :            :         t = kthread_run(rcu_nocb_cb_kthread, rdp,
    2258                 :            :                         "rcuo%c/%d", rcu_state.abbr, cpu);
    2259                 :            :         if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
    2260                 :            :                 return;
    2261                 :            :         WRITE_ONCE(rdp->nocb_cb_kthread, t);
    2262                 :            :         WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
    2263                 :            : }
    2264                 :            : 
    2265                 :            : /*
    2266                 :            :  * If the specified CPU is a no-CBs CPU that does not already have its
    2267                 :            :  * rcuo kthread, spawn it.
    2268                 :            :  */
    2269                 :            : static void rcu_spawn_cpu_nocb_kthread(int cpu)
    2270                 :            : {
    2271                 :            :         if (rcu_scheduler_fully_active)
    2272                 :            :                 rcu_spawn_one_nocb_kthread(cpu);
    2273                 :            : }
    2274                 :            : 
    2275                 :            : /*
    2276                 :            :  * Once the scheduler is running, spawn rcuo kthreads for all online
    2277                 :            :  * no-CBs CPUs.  This assumes that the early_initcall()s happen before
    2278                 :            :  * non-boot CPUs come online -- if this changes, we will need to add
    2279                 :            :  * some mutual exclusion.
    2280                 :            :  */
    2281                 :            : static void __init rcu_spawn_nocb_kthreads(void)
    2282                 :            : {
    2283                 :            :         int cpu;
    2284                 :            : 
    2285                 :            :         for_each_online_cpu(cpu)
    2286                 :            :                 rcu_spawn_cpu_nocb_kthread(cpu);
    2287                 :            : }
    2288                 :            : 
    2289                 :            : /* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
    2290                 :            : static int rcu_nocb_gp_stride = -1;
    2291                 :            : module_param(rcu_nocb_gp_stride, int, 0444);
    2292                 :            : 
    2293                 :            : /*
    2294                 :            :  * Initialize GP-CB relationships for all no-CBs CPU.
    2295                 :            :  */
    2296                 :            : static void __init rcu_organize_nocb_kthreads(void)
    2297                 :            : {
    2298                 :            :         int cpu;
    2299                 :            :         bool firsttime = true;
    2300                 :            :         bool gotnocbs = false;
    2301                 :            :         bool gotnocbscbs = true;
    2302                 :            :         int ls = rcu_nocb_gp_stride;
    2303                 :            :         int nl = 0;  /* Next GP kthread. */
    2304                 :            :         struct rcu_data *rdp;
    2305                 :            :         struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
    2306                 :            :         struct rcu_data *rdp_prev = NULL;
    2307                 :            : 
    2308                 :            :         if (!cpumask_available(rcu_nocb_mask))
    2309                 :            :                 return;
    2310                 :            :         if (ls == -1) {
    2311                 :            :                 ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
    2312                 :            :                 rcu_nocb_gp_stride = ls;
    2313                 :            :         }
    2314                 :            : 
    2315                 :            :         /*
    2316                 :            :          * Each pass through this loop sets up one rcu_data structure.
    2317                 :            :          * Should the corresponding CPU come online in the future, then
    2318                 :            :          * we will spawn the needed set of rcu_nocb_kthread() kthreads.
    2319                 :            :          */
    2320                 :            :         for_each_cpu(cpu, rcu_nocb_mask) {
    2321                 :            :                 rdp = per_cpu_ptr(&rcu_data, cpu);
    2322                 :            :                 if (rdp->cpu >= nl) {
    2323                 :            :                         /* New GP kthread, set up for CBs & next GP. */
    2324                 :            :                         gotnocbs = true;
    2325                 :            :                         nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
    2326                 :            :                         rdp->nocb_gp_rdp = rdp;
    2327                 :            :                         rdp_gp = rdp;
    2328                 :            :                         if (dump_tree) {
    2329                 :            :                                 if (!firsttime)
    2330                 :            :                                         pr_cont("%s\n", gotnocbscbs
    2331                 :            :                                                         ? "" : " (self only)");
    2332                 :            :                                 gotnocbscbs = false;
    2333                 :            :                                 firsttime = false;
    2334                 :            :                                 pr_alert("%s: No-CB GP kthread CPU %d:",
    2335                 :            :                                          __func__, cpu);
    2336                 :            :                         }
    2337                 :            :                 } else {
    2338                 :            :                         /* Another CB kthread, link to previous GP kthread. */
    2339                 :            :                         gotnocbscbs = true;
    2340                 :            :                         rdp->nocb_gp_rdp = rdp_gp;
    2341                 :            :                         rdp_prev->nocb_next_cb_rdp = rdp;
    2342                 :            :                         if (dump_tree)
    2343                 :            :                                 pr_cont(" %d", cpu);
    2344                 :            :                 }
    2345                 :            :                 rdp_prev = rdp;
    2346                 :            :         }
    2347                 :            :         if (gotnocbs && dump_tree)
    2348                 :            :                 pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
    2349                 :            : }
    2350                 :            : 
    2351                 :            : /*
    2352                 :            :  * Bind the current task to the offloaded CPUs.  If there are no offloaded
    2353                 :            :  * CPUs, leave the task unbound.  Splat if the bind attempt fails.
    2354                 :            :  */
    2355                 :            : void rcu_bind_current_to_nocb(void)
    2356                 :            : {
    2357                 :            :         if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
    2358                 :            :                 WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
    2359                 :            : }
    2360                 :            : EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
    2361                 :            : 
    2362                 :            : /*
    2363                 :            :  * Dump out nocb grace-period kthread state for the specified rcu_data
    2364                 :            :  * structure.
    2365                 :            :  */
    2366                 :            : static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
    2367                 :            : {
    2368                 :            :         struct rcu_node *rnp = rdp->mynode;
    2369                 :            : 
    2370                 :            :         pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu\n",
    2371                 :            :                 rdp->cpu,
    2372                 :            :                 "kK"[!!rdp->nocb_gp_kthread],
    2373                 :            :                 "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
    2374                 :            :                 "dD"[!!rdp->nocb_defer_wakeup],
    2375                 :            :                 "tT"[timer_pending(&rdp->nocb_timer)],
    2376                 :            :                 "bB"[timer_pending(&rdp->nocb_bypass_timer)],
    2377                 :            :                 "sS"[!!rdp->nocb_gp_sleep],
    2378                 :            :                 ".W"[swait_active(&rdp->nocb_gp_wq)],
    2379                 :            :                 ".W"[swait_active(&rnp->nocb_gp_wq[0])],
    2380                 :            :                 ".W"[swait_active(&rnp->nocb_gp_wq[1])],
    2381                 :            :                 ".B"[!!rdp->nocb_gp_bypass],
    2382                 :            :                 ".G"[!!rdp->nocb_gp_gp],
    2383                 :            :                 (long)rdp->nocb_gp_seq,
    2384                 :            :                 rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops));
    2385                 :            : }
    2386                 :            : 
    2387                 :            : /* Dump out nocb kthread state for the specified rcu_data structure. */
    2388                 :            : static void show_rcu_nocb_state(struct rcu_data *rdp)
    2389                 :            : {
    2390                 :            :         struct rcu_segcblist *rsclp = &rdp->cblist;
    2391                 :            :         bool waslocked;
    2392                 :            :         bool wastimer;
    2393                 :            :         bool wassleep;
    2394                 :            : 
    2395                 :            :         if (rdp->nocb_gp_rdp == rdp)
    2396                 :            :                 show_rcu_nocb_gp_state(rdp);
    2397                 :            : 
    2398                 :            :         pr_info("   CB %d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%c%c%c q%ld\n",
    2399                 :            :                 rdp->cpu, rdp->nocb_gp_rdp->cpu,
    2400                 :            :                 "kK"[!!rdp->nocb_cb_kthread],
    2401                 :            :                 "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
    2402                 :            :                 "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
    2403                 :            :                 "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
    2404                 :            :                 "sS"[!!rdp->nocb_cb_sleep],
    2405                 :            :                 ".W"[swait_active(&rdp->nocb_cb_wq)],
    2406                 :            :                 jiffies - rdp->nocb_bypass_first,
    2407                 :            :                 jiffies - rdp->nocb_nobypass_last,
    2408                 :            :                 rdp->nocb_nobypass_count,
    2409                 :            :                 ".D"[rcu_segcblist_ready_cbs(rsclp)],
    2410                 :            :                 ".W"[!rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)],
    2411                 :            :                 ".R"[!rcu_segcblist_restempty(rsclp, RCU_WAIT_TAIL)],
    2412                 :            :                 ".N"[!rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL)],
    2413                 :            :                 ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
    2414                 :            :                 rcu_segcblist_n_cbs(&rdp->cblist));
    2415                 :            : 
    2416                 :            :         /* It is OK for GP kthreads to have GP state. */
    2417                 :            :         if (rdp->nocb_gp_rdp == rdp)
    2418                 :            :                 return;
    2419                 :            : 
    2420                 :            :         waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
    2421                 :            :         wastimer = timer_pending(&rdp->nocb_timer);
    2422                 :            :         wassleep = swait_active(&rdp->nocb_gp_wq);
    2423                 :            :         if (!rdp->nocb_defer_wakeup && !rdp->nocb_gp_sleep &&
    2424                 :            :             !waslocked && !wastimer && !wassleep)
    2425                 :            :                 return;  /* Nothing untowards. */
    2426                 :            : 
    2427                 :            :         pr_info("   !!! %c%c%c%c %c\n",
    2428                 :            :                 "lL"[waslocked],
    2429                 :            :                 "dD"[!!rdp->nocb_defer_wakeup],
    2430                 :            :                 "tT"[wastimer],
    2431                 :            :                 "sS"[!!rdp->nocb_gp_sleep],
    2432                 :            :                 ".W"[wassleep]);
    2433                 :            : }
    2434                 :            : 
    2435                 :            : #else /* #ifdef CONFIG_RCU_NOCB_CPU */
    2436                 :            : 
    2437                 :            : /* No ->nocb_lock to acquire.  */
    2438                 :     477704 : static void rcu_nocb_lock(struct rcu_data *rdp)
    2439                 :            : {
    2440                 :     477704 : }
    2441                 :            : 
    2442                 :            : /* No ->nocb_lock to release.  */
    2443                 :         26 : static void rcu_nocb_unlock(struct rcu_data *rdp)
    2444                 :            : {
    2445                 :          0 : }
    2446                 :            : 
    2447                 :            : /* No ->nocb_lock to release.  */
    2448                 :     477678 : static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
    2449                 :            :                                        unsigned long flags)
    2450                 :            : {
    2451                 :     477678 :         local_irq_restore(flags);
    2452                 :            : }
    2453                 :            : 
    2454                 :            : /* Lockdep check that ->cblist may be safely accessed. */
    2455                 :     340208 : static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
    2456                 :            : {
    2457                 :     340208 :         lockdep_assert_irqs_disabled();
    2458                 :            : }
    2459                 :            : 
    2460                 :      67521 : static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
    2461                 :            : {
    2462                 :      67521 : }
    2463                 :            : 
    2464                 :      67521 : static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
    2465                 :            : {
    2466                 :      67521 :         return NULL;
    2467                 :            : }
    2468                 :            : 
    2469                 :         30 : static void rcu_init_one_nocb(struct rcu_node *rnp)
    2470                 :            : {
    2471                 :         30 : }
    2472                 :            : 
    2473                 :         26 : static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
    2474                 :            :                                   unsigned long j)
    2475                 :            : {
    2476                 :         26 :         return true;
    2477                 :            : }
    2478                 :            : 
    2479                 :    2219838 : static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
    2480                 :            :                                 bool *was_alldone, unsigned long flags)
    2481                 :            : {
    2482                 :    2219838 :         return false;
    2483                 :            : }
    2484                 :            : 
    2485                 :          0 : static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
    2486                 :            :                                  unsigned long flags)
    2487                 :            : {
    2488                 :          0 :         WARN_ON_ONCE(1);  /* Should be dead code! */
    2489                 :          0 : }
    2490                 :            : 
    2491                 :         30 : static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
    2492                 :            : {
    2493                 :         30 : }
    2494                 :            : 
    2495                 :      97200 : static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
    2496                 :            : {
    2497         [ +  + ]:      97200 :         return false;
    2498                 :            : }
    2499                 :            : 
    2500                 :     259044 : static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
    2501                 :            : {
    2502                 :     259044 : }
    2503                 :            : 
    2504                 :         30 : static void rcu_spawn_cpu_nocb_kthread(int cpu)
    2505                 :            : {
    2506                 :         30 : }
    2507                 :            : 
    2508                 :         30 : static void __init rcu_spawn_nocb_kthreads(void)
    2509                 :            : {
    2510                 :         30 : }
    2511                 :            : 
    2512                 :            : static void show_rcu_nocb_state(struct rcu_data *rdp)
    2513                 :            : {
    2514                 :            : }
    2515                 :            : 
    2516                 :            : #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
    2517                 :            : 
    2518                 :            : /*
    2519                 :            :  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
    2520                 :            :  * grace-period kthread will do force_quiescent_state() processing?
    2521                 :            :  * The idea is to avoid waking up RCU core processing on such a
    2522                 :            :  * CPU unless the grace period has extended for too long.
    2523                 :            :  *
    2524                 :            :  * This code relies on the fact that all NO_HZ_FULL CPUs are also
    2525                 :            :  * CONFIG_RCU_NOCB_CPU CPUs.
    2526                 :            :  */
    2527                 :            : static bool rcu_nohz_full_cpu(void)
    2528                 :            : {
    2529                 :            : #ifdef CONFIG_NO_HZ_FULL
    2530                 :            :         if (tick_nohz_full_cpu(smp_processor_id()) &&
    2531                 :            :             (!rcu_gp_in_progress() ||
    2532                 :            :              ULONG_CMP_LT(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
    2533                 :            :                 return true;
    2534                 :            : #endif /* #ifdef CONFIG_NO_HZ_FULL */
    2535                 :            :         return false;
    2536                 :            : }
    2537                 :            : 
    2538                 :            : /*
    2539                 :            :  * Bind the RCU grace-period kthreads to the housekeeping CPU.
    2540                 :            :  */
    2541                 :         30 : static void rcu_bind_gp_kthread(void)
    2542                 :            : {
    2543                 :         30 :         if (!tick_nohz_full_enabled())
    2544                 :         30 :                 return;
    2545                 :            :         housekeeping_affine(current, HK_FLAG_RCU);
    2546                 :            : }
    2547                 :            : 
    2548                 :            : /* Record the current task on dyntick-idle entry. */
    2549                 :            : static void rcu_dynticks_task_enter(void)
    2550                 :            : {
    2551                 :            : #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
    2552                 :            :         WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
    2553                 :            : #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
    2554                 :            : }
    2555                 :            : 
    2556                 :            : /* Record no current task on dyntick-idle exit. */
    2557                 :      22123 : static void rcu_dynticks_task_exit(void)
    2558                 :            : {
    2559                 :            : #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
    2560                 :            :         WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
    2561                 :            : #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
    2562                 :      22123 : }

Generated by: LCOV version 1.14