LCOV - code coverage report
Current view: top level - kernel - workqueue.c (source / functions) Hit Total Coverage
Test: combined.info Lines: 1115 1998 55.8 %
Date: 2022-04-01 14:17:54 Functions: 82 133 61.7 %
Branches: 447 1232 36.3 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0-only
       2                 :            : /*
       3                 :            :  * kernel/workqueue.c - generic async execution with shared worker pool
       4                 :            :  *
       5                 :            :  * Copyright (C) 2002           Ingo Molnar
       6                 :            :  *
       7                 :            :  *   Derived from the taskqueue/keventd code by:
       8                 :            :  *     David Woodhouse <dwmw2@infradead.org>
       9                 :            :  *     Andrew Morton
      10                 :            :  *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
      11                 :            :  *     Theodore Ts'o <tytso@mit.edu>
      12                 :            :  *
      13                 :            :  * Made to use alloc_percpu by Christoph Lameter.
      14                 :            :  *
      15                 :            :  * Copyright (C) 2010           SUSE Linux Products GmbH
      16                 :            :  * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
      17                 :            :  *
      18                 :            :  * This is the generic async execution mechanism.  Work items as are
      19                 :            :  * executed in process context.  The worker pool is shared and
      20                 :            :  * automatically managed.  There are two worker pools for each CPU (one for
      21                 :            :  * normal work items and the other for high priority ones) and some extra
      22                 :            :  * pools for workqueues which are not bound to any specific CPU - the
      23                 :            :  * number of these backing pools is dynamic.
      24                 :            :  *
      25                 :            :  * Please read Documentation/core-api/workqueue.rst for details.
      26                 :            :  */
      27                 :            : 
      28                 :            : #include <linux/export.h>
      29                 :            : #include <linux/kernel.h>
      30                 :            : #include <linux/sched.h>
      31                 :            : #include <linux/init.h>
      32                 :            : #include <linux/signal.h>
      33                 :            : #include <linux/completion.h>
      34                 :            : #include <linux/workqueue.h>
      35                 :            : #include <linux/slab.h>
      36                 :            : #include <linux/cpu.h>
      37                 :            : #include <linux/notifier.h>
      38                 :            : #include <linux/kthread.h>
      39                 :            : #include <linux/hardirq.h>
      40                 :            : #include <linux/mempolicy.h>
      41                 :            : #include <linux/freezer.h>
      42                 :            : #include <linux/debug_locks.h>
      43                 :            : #include <linux/lockdep.h>
      44                 :            : #include <linux/idr.h>
      45                 :            : #include <linux/jhash.h>
      46                 :            : #include <linux/hashtable.h>
      47                 :            : #include <linux/rculist.h>
      48                 :            : #include <linux/nodemask.h>
      49                 :            : #include <linux/moduleparam.h>
      50                 :            : #include <linux/uaccess.h>
      51                 :            : #include <linux/sched/isolation.h>
      52                 :            : #include <linux/nmi.h>
      53                 :            : 
      54                 :            : #include "workqueue_internal.h"
      55                 :            : 
      56                 :            : enum {
      57                 :            :         /*
      58                 :            :          * worker_pool flags
      59                 :            :          *
      60                 :            :          * A bound pool is either associated or disassociated with its CPU.
      61                 :            :          * While associated (!DISASSOCIATED), all workers are bound to the
      62                 :            :          * CPU and none has %WORKER_UNBOUND set and concurrency management
      63                 :            :          * is in effect.
      64                 :            :          *
      65                 :            :          * While DISASSOCIATED, the cpu may be offline and all workers have
      66                 :            :          * %WORKER_UNBOUND set and concurrency management disabled, and may
      67                 :            :          * be executing on any CPU.  The pool behaves as an unbound one.
      68                 :            :          *
      69                 :            :          * Note that DISASSOCIATED should be flipped only while holding
      70                 :            :          * wq_pool_attach_mutex to avoid changing binding state while
      71                 :            :          * worker_attach_to_pool() is in progress.
      72                 :            :          */
      73                 :            :         POOL_MANAGER_ACTIVE     = 1 << 0, /* being managed */
      74                 :            :         POOL_DISASSOCIATED      = 1 << 2, /* cpu can't serve workers */
      75                 :            : 
      76                 :            :         /* worker flags */
      77                 :            :         WORKER_DIE              = 1 << 1, /* die die die */
      78                 :            :         WORKER_IDLE             = 1 << 2, /* is idle */
      79                 :            :         WORKER_PREP             = 1 << 3, /* preparing to run works */
      80                 :            :         WORKER_CPU_INTENSIVE    = 1 << 6, /* cpu intensive */
      81                 :            :         WORKER_UNBOUND          = 1 << 7, /* worker is unbound */
      82                 :            :         WORKER_REBOUND          = 1 << 8, /* worker was rebound */
      83                 :            : 
      84                 :            :         WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_CPU_INTENSIVE |
      85                 :            :                                   WORKER_UNBOUND | WORKER_REBOUND,
      86                 :            : 
      87                 :            :         NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
      88                 :            : 
      89                 :            :         UNBOUND_POOL_HASH_ORDER = 6,            /* hashed by pool->attrs */
      90                 :            :         BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
      91                 :            : 
      92                 :            :         MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
      93                 :            :         IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
      94                 :            : 
      95                 :            :         MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
      96                 :            :                                                 /* call for help after 10ms
      97                 :            :                                                    (min two ticks) */
      98                 :            :         MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
      99                 :            :         CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
     100                 :            : 
     101                 :            :         /*
     102                 :            :          * Rescue workers are used only on emergencies and shared by
     103                 :            :          * all cpus.  Give MIN_NICE.
     104                 :            :          */
     105                 :            :         RESCUER_NICE_LEVEL      = MIN_NICE,
     106                 :            :         HIGHPRI_NICE_LEVEL      = MIN_NICE,
     107                 :            : 
     108                 :            :         WQ_NAME_LEN             = 24,
     109                 :            : };
     110                 :            : 
     111                 :            : /*
     112                 :            :  * Structure fields follow one of the following exclusion rules.
     113                 :            :  *
     114                 :            :  * I: Modifiable by initialization/destruction paths and read-only for
     115                 :            :  *    everyone else.
     116                 :            :  *
     117                 :            :  * P: Preemption protected.  Disabling preemption is enough and should
     118                 :            :  *    only be modified and accessed from the local cpu.
     119                 :            :  *
     120                 :            :  * L: pool->lock protected.  Access with pool->lock held.
     121                 :            :  *
     122                 :            :  * X: During normal operation, modification requires pool->lock and should
     123                 :            :  *    be done only from local cpu.  Either disabling preemption on local
     124                 :            :  *    cpu or grabbing pool->lock is enough for read access.  If
     125                 :            :  *    POOL_DISASSOCIATED is set, it's identical to L.
     126                 :            :  *
     127                 :            :  * A: wq_pool_attach_mutex protected.
     128                 :            :  *
     129                 :            :  * PL: wq_pool_mutex protected.
     130                 :            :  *
     131                 :            :  * PR: wq_pool_mutex protected for writes.  RCU protected for reads.
     132                 :            :  *
     133                 :            :  * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
     134                 :            :  *
     135                 :            :  * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
     136                 :            :  *      RCU for reads.
     137                 :            :  *
     138                 :            :  * WQ: wq->mutex protected.
     139                 :            :  *
     140                 :            :  * WR: wq->mutex protected for writes.  RCU protected for reads.
     141                 :            :  *
     142                 :            :  * MD: wq_mayday_lock protected.
     143                 :            :  */
     144                 :            : 
     145                 :            : /* struct worker is defined in workqueue_internal.h */
     146                 :            : 
     147                 :            : struct worker_pool {
     148                 :            :         spinlock_t              lock;           /* the pool lock */
     149                 :            :         int                     cpu;            /* I: the associated cpu */
     150                 :            :         int                     node;           /* I: the associated node ID */
     151                 :            :         int                     id;             /* I: pool ID */
     152                 :            :         unsigned int            flags;          /* X: flags */
     153                 :            : 
     154                 :            :         unsigned long           watchdog_ts;    /* L: watchdog timestamp */
     155                 :            : 
     156                 :            :         struct list_head        worklist;       /* L: list of pending works */
     157                 :            : 
     158                 :            :         int                     nr_workers;     /* L: total number of workers */
     159                 :            :         int                     nr_idle;        /* L: currently idle workers */
     160                 :            : 
     161                 :            :         struct list_head        idle_list;      /* X: list of idle workers */
     162                 :            :         struct timer_list       idle_timer;     /* L: worker idle timeout */
     163                 :            :         struct timer_list       mayday_timer;   /* L: SOS timer for workers */
     164                 :            : 
     165                 :            :         /* a workers is either on busy_hash or idle_list, or the manager */
     166                 :            :         DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
     167                 :            :                                                 /* L: hash of busy workers */
     168                 :            : 
     169                 :            :         struct worker           *manager;       /* L: purely informational */
     170                 :            :         struct list_head        workers;        /* A: attached workers */
     171                 :            :         struct completion       *detach_completion; /* all workers detached */
     172                 :            : 
     173                 :            :         struct ida              worker_ida;     /* worker IDs for task name */
     174                 :            : 
     175                 :            :         struct workqueue_attrs  *attrs;         /* I: worker attributes */
     176                 :            :         struct hlist_node       hash_node;      /* PL: unbound_pool_hash node */
     177                 :            :         int                     refcnt;         /* PL: refcnt for unbound pools */
     178                 :            : 
     179                 :            :         /*
     180                 :            :          * The current concurrency level.  As it's likely to be accessed
     181                 :            :          * from other CPUs during try_to_wake_up(), put it in a separate
     182                 :            :          * cacheline.
     183                 :            :          */
     184                 :            :         atomic_t                nr_running ____cacheline_aligned_in_smp;
     185                 :            : 
     186                 :            :         /*
     187                 :            :          * Destruction of pool is RCU protected to allow dereferences
     188                 :            :          * from get_work_pool().
     189                 :            :          */
     190                 :            :         struct rcu_head         rcu;
     191                 :            : } ____cacheline_aligned_in_smp;
     192                 :            : 
     193                 :            : /*
     194                 :            :  * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
     195                 :            :  * of work_struct->data are used for flags and the remaining high bits
     196                 :            :  * point to the pwq; thus, pwqs need to be aligned at two's power of the
     197                 :            :  * number of flag bits.
     198                 :            :  */
     199                 :            : struct pool_workqueue {
     200                 :            :         struct worker_pool      *pool;          /* I: the associated pool */
     201                 :            :         struct workqueue_struct *wq;            /* I: the owning workqueue */
     202                 :            :         int                     work_color;     /* L: current color */
     203                 :            :         int                     flush_color;    /* L: flushing color */
     204                 :            :         int                     refcnt;         /* L: reference count */
     205                 :            :         int                     nr_in_flight[WORK_NR_COLORS];
     206                 :            :                                                 /* L: nr of in_flight works */
     207                 :            :         int                     nr_active;      /* L: nr of active works */
     208                 :            :         int                     max_active;     /* L: max active works */
     209                 :            :         struct list_head        delayed_works;  /* L: delayed works */
     210                 :            :         struct list_head        pwqs_node;      /* WR: node on wq->pwqs */
     211                 :            :         struct list_head        mayday_node;    /* MD: node on wq->maydays */
     212                 :            : 
     213                 :            :         /*
     214                 :            :          * Release of unbound pwq is punted to system_wq.  See put_pwq()
     215                 :            :          * and pwq_unbound_release_workfn() for details.  pool_workqueue
     216                 :            :          * itself is also RCU protected so that the first pwq can be
     217                 :            :          * determined without grabbing wq->mutex.
     218                 :            :          */
     219                 :            :         struct work_struct      unbound_release_work;
     220                 :            :         struct rcu_head         rcu;
     221                 :            : } __aligned(1 << WORK_STRUCT_FLAG_BITS);
     222                 :            : 
     223                 :            : /*
     224                 :            :  * Structure used to wait for workqueue flush.
     225                 :            :  */
     226                 :            : struct wq_flusher {
     227                 :            :         struct list_head        list;           /* WQ: list of flushers */
     228                 :            :         int                     flush_color;    /* WQ: flush color waiting for */
     229                 :            :         struct completion       done;           /* flush completion */
     230                 :            : };
     231                 :            : 
     232                 :            : struct wq_device;
     233                 :            : 
     234                 :            : /*
     235                 :            :  * The externally visible workqueue.  It relays the issued work items to
     236                 :            :  * the appropriate worker_pool through its pool_workqueues.
     237                 :            :  */
     238                 :            : struct workqueue_struct {
     239                 :            :         struct list_head        pwqs;           /* WR: all pwqs of this wq */
     240                 :            :         struct list_head        list;           /* PR: list of all workqueues */
     241                 :            : 
     242                 :            :         struct mutex            mutex;          /* protects this wq */
     243                 :            :         int                     work_color;     /* WQ: current work color */
     244                 :            :         int                     flush_color;    /* WQ: current flush color */
     245                 :            :         atomic_t                nr_pwqs_to_flush; /* flush in progress */
     246                 :            :         struct wq_flusher       *first_flusher; /* WQ: first flusher */
     247                 :            :         struct list_head        flusher_queue;  /* WQ: flush waiters */
     248                 :            :         struct list_head        flusher_overflow; /* WQ: flush overflow list */
     249                 :            : 
     250                 :            :         struct list_head        maydays;        /* MD: pwqs requesting rescue */
     251                 :            :         struct worker           *rescuer;       /* MD: rescue worker */
     252                 :            : 
     253                 :            :         int                     nr_drainers;    /* WQ: drain in progress */
     254                 :            :         int                     saved_max_active; /* WQ: saved pwq max_active */
     255                 :            : 
     256                 :            :         struct workqueue_attrs  *unbound_attrs; /* PW: only for unbound wqs */
     257                 :            :         struct pool_workqueue   *dfl_pwq;       /* PW: only for unbound wqs */
     258                 :            : 
     259                 :            : #ifdef CONFIG_SYSFS
     260                 :            :         struct wq_device        *wq_dev;        /* I: for sysfs interface */
     261                 :            : #endif
     262                 :            : #ifdef CONFIG_LOCKDEP
     263                 :            :         char                    *lock_name;
     264                 :            :         struct lock_class_key   key;
     265                 :            :         struct lockdep_map      lockdep_map;
     266                 :            : #endif
     267                 :            :         char                    name[WQ_NAME_LEN]; /* I: workqueue name */
     268                 :            : 
     269                 :            :         /*
     270                 :            :          * Destruction of workqueue_struct is RCU protected to allow walking
     271                 :            :          * the workqueues list without grabbing wq_pool_mutex.
     272                 :            :          * This is used to dump all workqueues from sysrq.
     273                 :            :          */
     274                 :            :         struct rcu_head         rcu;
     275                 :            : 
     276                 :            :         /* hot fields used during command issue, aligned to cacheline */
     277                 :            :         unsigned int            flags ____cacheline_aligned; /* WQ: WQ_* flags */
     278                 :            :         struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
     279                 :            :         struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
     280                 :            : };
     281                 :            : 
     282                 :            : static struct kmem_cache *pwq_cache;
     283                 :            : 
     284                 :            : static cpumask_var_t *wq_numa_possible_cpumask;
     285                 :            :                                         /* possible CPUs of each node */
     286                 :            : 
     287                 :            : static bool wq_disable_numa;
     288                 :            : module_param_named(disable_numa, wq_disable_numa, bool, 0444);
     289                 :            : 
     290                 :            : /* see the comment above the definition of WQ_POWER_EFFICIENT */
     291                 :            : static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
     292                 :            : module_param_named(power_efficient, wq_power_efficient, bool, 0444);
     293                 :            : 
     294                 :            : static bool wq_online;                  /* can kworkers be created yet? */
     295                 :            : 
     296                 :            : static bool wq_numa_enabled;            /* unbound NUMA affinity enabled */
     297                 :            : 
     298                 :            : /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
     299                 :            : static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
     300                 :            : 
     301                 :            : static DEFINE_MUTEX(wq_pool_mutex);     /* protects pools and workqueues list */
     302                 :            : static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
     303                 :            : static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
     304                 :            : static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
     305                 :            : 
     306                 :            : static LIST_HEAD(workqueues);           /* PR: list of all workqueues */
     307                 :            : static bool workqueue_freezing;         /* PL: have wqs started freezing? */
     308                 :            : 
     309                 :            : /* PL: allowable cpus for unbound wqs and work items */
     310                 :            : static cpumask_var_t wq_unbound_cpumask;
     311                 :            : 
     312                 :            : /* CPU where unbound work was last round robin scheduled from this CPU */
     313                 :            : static DEFINE_PER_CPU(int, wq_rr_cpu_last);
     314                 :            : 
     315                 :            : /*
     316                 :            :  * Local execution of unbound work items is no longer guaranteed.  The
     317                 :            :  * following always forces round-robin CPU selection on unbound work items
     318                 :            :  * to uncover usages which depend on it.
     319                 :            :  */
     320                 :            : #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
     321                 :            : static bool wq_debug_force_rr_cpu = true;
     322                 :            : #else
     323                 :            : static bool wq_debug_force_rr_cpu = false;
     324                 :            : #endif
     325                 :            : module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
     326                 :            : 
     327                 :            : /* the per-cpu worker pools */
     328                 :            : static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
     329                 :            : 
     330                 :            : static DEFINE_IDR(worker_pool_idr);     /* PR: idr of all pools */
     331                 :            : 
     332                 :            : /* PL: hash of all unbound pools keyed by pool->attrs */
     333                 :            : static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
     334                 :            : 
     335                 :            : /* I: attributes used when instantiating standard unbound pools on demand */
     336                 :            : static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
     337                 :            : 
     338                 :            : /* I: attributes used when instantiating ordered pools on demand */
     339                 :            : static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
     340                 :            : 
     341                 :            : struct workqueue_struct *system_wq __read_mostly;
     342                 :            : EXPORT_SYMBOL(system_wq);
     343                 :            : struct workqueue_struct *system_highpri_wq __read_mostly;
     344                 :            : EXPORT_SYMBOL_GPL(system_highpri_wq);
     345                 :            : struct workqueue_struct *system_long_wq __read_mostly;
     346                 :            : EXPORT_SYMBOL_GPL(system_long_wq);
     347                 :            : struct workqueue_struct *system_unbound_wq __read_mostly;
     348                 :            : EXPORT_SYMBOL_GPL(system_unbound_wq);
     349                 :            : struct workqueue_struct *system_freezable_wq __read_mostly;
     350                 :            : EXPORT_SYMBOL_GPL(system_freezable_wq);
     351                 :            : struct workqueue_struct *system_power_efficient_wq __read_mostly;
     352                 :            : EXPORT_SYMBOL_GPL(system_power_efficient_wq);
     353                 :            : struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
     354                 :            : EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
     355                 :            : 
     356                 :            : static int worker_thread(void *__worker);
     357                 :            : static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
     358                 :            : static void show_pwq(struct pool_workqueue *pwq);
     359                 :            : 
     360                 :            : #define CREATE_TRACE_POINTS
     361                 :            : #include <trace/events/workqueue.h>
     362                 :            : 
     363                 :            : #define assert_rcu_or_pool_mutex()                                      \
     364                 :            :         RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
     365                 :            :                          !lockdep_is_held(&wq_pool_mutex),          \
     366                 :            :                          "RCU or wq_pool_mutex should be held")
     367                 :            : 
     368                 :            : #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)                        \
     369                 :            :         RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
     370                 :            :                          !lockdep_is_held(&wq->mutex) &&         \
     371                 :            :                          !lockdep_is_held(&wq_pool_mutex),          \
     372                 :            :                          "RCU, wq->mutex or wq_pool_mutex should be held")
     373                 :            : 
     374                 :            : #define for_each_cpu_worker_pool(pool, cpu)                             \
     375                 :            :         for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];           \
     376                 :            :              (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
     377                 :            :              (pool)++)
     378                 :            : 
     379                 :            : /**
     380                 :            :  * for_each_pool - iterate through all worker_pools in the system
     381                 :            :  * @pool: iteration cursor
     382                 :            :  * @pi: integer used for iteration
     383                 :            :  *
     384                 :            :  * This must be called either with wq_pool_mutex held or RCU read
     385                 :            :  * locked.  If the pool needs to be used beyond the locking in effect, the
     386                 :            :  * caller is responsible for guaranteeing that the pool stays online.
     387                 :            :  *
     388                 :            :  * The if/else clause exists only for the lockdep assertion and can be
     389                 :            :  * ignored.
     390                 :            :  */
     391                 :            : #define for_each_pool(pool, pi)                                         \
     392                 :            :         idr_for_each_entry(&worker_pool_idr, pool, pi)                      \
     393                 :            :                 if (({ assert_rcu_or_pool_mutex(); false; })) { }       \
     394                 :            :                 else
     395                 :            : 
     396                 :            : /**
     397                 :            :  * for_each_pool_worker - iterate through all workers of a worker_pool
     398                 :            :  * @worker: iteration cursor
     399                 :            :  * @pool: worker_pool to iterate workers of
     400                 :            :  *
     401                 :            :  * This must be called with wq_pool_attach_mutex.
     402                 :            :  *
     403                 :            :  * The if/else clause exists only for the lockdep assertion and can be
     404                 :            :  * ignored.
     405                 :            :  */
     406                 :            : #define for_each_pool_worker(worker, pool)                              \
     407                 :            :         list_for_each_entry((worker), &(pool)->workers, node)            \
     408                 :            :                 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
     409                 :            :                 else
     410                 :            : 
     411                 :            : /**
     412                 :            :  * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
     413                 :            :  * @pwq: iteration cursor
     414                 :            :  * @wq: the target workqueue
     415                 :            :  *
     416                 :            :  * This must be called either with wq->mutex held or RCU read locked.
     417                 :            :  * If the pwq needs to be used beyond the locking in effect, the caller is
     418                 :            :  * responsible for guaranteeing that the pwq stays online.
     419                 :            :  *
     420                 :            :  * The if/else clause exists only for the lockdep assertion and can be
     421                 :            :  * ignored.
     422                 :            :  */
     423                 :            : #define for_each_pwq(pwq, wq)                                           \
     424                 :            :         list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node,           \
     425                 :            :                                  lockdep_is_held(&(wq->mutex)))
     426                 :            : 
     427                 :            : #ifdef CONFIG_DEBUG_OBJECTS_WORK
     428                 :            : 
     429                 :            : static struct debug_obj_descr work_debug_descr;
     430                 :            : 
     431                 :            : static void *work_debug_hint(void *addr)
     432                 :            : {
     433                 :            :         return ((struct work_struct *) addr)->func;
     434                 :            : }
     435                 :            : 
     436                 :            : static bool work_is_static_object(void *addr)
     437                 :            : {
     438                 :            :         struct work_struct *work = addr;
     439                 :            : 
     440                 :            :         return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
     441                 :            : }
     442                 :            : 
     443                 :            : /*
     444                 :            :  * fixup_init is called when:
     445                 :            :  * - an active object is initialized
     446                 :            :  */
     447                 :            : static bool work_fixup_init(void *addr, enum debug_obj_state state)
     448                 :            : {
     449                 :            :         struct work_struct *work = addr;
     450                 :            : 
     451                 :            :         switch (state) {
     452                 :            :         case ODEBUG_STATE_ACTIVE:
     453                 :            :                 cancel_work_sync(work);
     454                 :            :                 debug_object_init(work, &work_debug_descr);
     455                 :            :                 return true;
     456                 :            :         default:
     457                 :            :                 return false;
     458                 :            :         }
     459                 :            : }
     460                 :            : 
     461                 :            : /*
     462                 :            :  * fixup_free is called when:
     463                 :            :  * - an active object is freed
     464                 :            :  */
     465                 :            : static bool work_fixup_free(void *addr, enum debug_obj_state state)
     466                 :            : {
     467                 :            :         struct work_struct *work = addr;
     468                 :            : 
     469                 :            :         switch (state) {
     470                 :            :         case ODEBUG_STATE_ACTIVE:
     471                 :            :                 cancel_work_sync(work);
     472                 :            :                 debug_object_free(work, &work_debug_descr);
     473                 :            :                 return true;
     474                 :            :         default:
     475                 :            :                 return false;
     476                 :            :         }
     477                 :            : }
     478                 :            : 
     479                 :            : static struct debug_obj_descr work_debug_descr = {
     480                 :            :         .name           = "work_struct",
     481                 :            :         .debug_hint     = work_debug_hint,
     482                 :            :         .is_static_object = work_is_static_object,
     483                 :            :         .fixup_init     = work_fixup_init,
     484                 :            :         .fixup_free     = work_fixup_free,
     485                 :            : };
     486                 :            : 
     487                 :            : static inline void debug_work_activate(struct work_struct *work)
     488                 :            : {
     489                 :            :         debug_object_activate(work, &work_debug_descr);
     490                 :            : }
     491                 :            : 
     492                 :            : static inline void debug_work_deactivate(struct work_struct *work)
     493                 :            : {
     494                 :            :         debug_object_deactivate(work, &work_debug_descr);
     495                 :            : }
     496                 :            : 
     497                 :            : void __init_work(struct work_struct *work, int onstack)
     498                 :            : {
     499                 :            :         if (onstack)
     500                 :            :                 debug_object_init_on_stack(work, &work_debug_descr);
     501                 :            :         else
     502                 :            :                 debug_object_init(work, &work_debug_descr);
     503                 :            : }
     504                 :            : EXPORT_SYMBOL_GPL(__init_work);
     505                 :            : 
     506                 :            : void destroy_work_on_stack(struct work_struct *work)
     507                 :            : {
     508                 :            :         debug_object_free(work, &work_debug_descr);
     509                 :            : }
     510                 :            : EXPORT_SYMBOL_GPL(destroy_work_on_stack);
     511                 :            : 
     512                 :            : void destroy_delayed_work_on_stack(struct delayed_work *work)
     513                 :            : {
     514                 :            :         destroy_timer_on_stack(&work->timer);
     515                 :            :         debug_object_free(&work->work, &work_debug_descr);
     516                 :            : }
     517                 :            : EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
     518                 :            : 
     519                 :            : #else
     520                 :       9274 : static inline void debug_work_activate(struct work_struct *work) { }
     521                 :       9274 : static inline void debug_work_deactivate(struct work_struct *work) { }
     522                 :            : #endif
     523                 :            : 
     524                 :            : /**
     525                 :            :  * worker_pool_assign_id - allocate ID and assing it to @pool
     526                 :            :  * @pool: the pool pointer of interest
     527                 :            :  *
     528                 :            :  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
     529                 :            :  * successfully, -errno on failure.
     530                 :            :  */
     531                 :         44 : static int worker_pool_assign_id(struct worker_pool *pool)
     532                 :            : {
     533                 :         44 :         int ret;
     534                 :            : 
     535                 :         44 :         lockdep_assert_held(&wq_pool_mutex);
     536                 :            : 
     537                 :         44 :         ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
     538                 :            :                         GFP_KERNEL);
     539   [ +  -  +  - ]:         44 :         if (ret >= 0) {
     540                 :         44 :                 pool->id = ret;
     541                 :         22 :                 return 0;
     542                 :            :         }
     543                 :            :         return ret;
     544                 :            : }
     545                 :            : 
     546                 :            : /**
     547                 :            :  * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
     548                 :            :  * @wq: the target workqueue
     549                 :            :  * @node: the node ID
     550                 :            :  *
     551                 :            :  * This must be called with any of wq_pool_mutex, wq->mutex or RCU
     552                 :            :  * read locked.
     553                 :            :  * If the pwq needs to be used beyond the locking in effect, the caller is
     554                 :            :  * responsible for guaranteeing that the pwq stays online.
     555                 :            :  *
     556                 :            :  * Return: The unbound pool_workqueue for @node.
     557                 :            :  */
     558                 :        941 : static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
     559                 :            :                                                   int node)
     560                 :            : {
     561                 :        941 :         assert_rcu_or_wq_mutex_or_pool_mutex(wq);
     562                 :            : 
     563                 :            :         /*
     564                 :            :          * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
     565                 :            :          * delayed item is pending.  The plan is to keep CPU -> NODE
     566                 :            :          * mapping valid and stable across CPU on/offlines.  Once that
     567                 :            :          * happens, this workaround can be removed.
     568                 :            :          */
     569   [ -  -  -  + ]:        941 :         if (unlikely(node == NUMA_NO_NODE))
     570                 :          0 :                 return wq->dfl_pwq;
     571                 :            : 
     572                 :        941 :         return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
     573                 :            : }
     574                 :            : 
     575                 :       9274 : static unsigned int work_color_to_flags(int color)
     576                 :            : {
     577                 :       9274 :         return color << WORK_STRUCT_COLOR_SHIFT;
     578                 :            : }
     579                 :            : 
     580                 :       9274 : static int get_work_color(struct work_struct *work)
     581                 :            : {
     582                 :       9274 :         return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
     583                 :            :                 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
     584                 :            : }
     585                 :            : 
     586                 :       3784 : static int work_next_color(int color)
     587                 :            : {
     588                 :       3784 :         return (color + 1) % WORK_NR_COLORS;
     589                 :            : }
     590                 :            : 
     591                 :            : /*
     592                 :            :  * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
     593                 :            :  * contain the pointer to the queued pwq.  Once execution starts, the flag
     594                 :            :  * is cleared and the high bits contain OFFQ flags and pool ID.
     595                 :            :  *
     596                 :            :  * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
     597                 :            :  * and clear_work_data() can be used to set the pwq, pool or clear
     598                 :            :  * work->data.  These functions should only be called while the work is
     599                 :            :  * owned - ie. while the PENDING bit is set.
     600                 :            :  *
     601                 :            :  * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
     602                 :            :  * corresponding to a work.  Pool is available once the work has been
     603                 :            :  * queued anywhere after initialization until it is sync canceled.  pwq is
     604                 :            :  * available only while the work item is queued.
     605                 :            :  *
     606                 :            :  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
     607                 :            :  * canceled.  While being canceled, a work item may have its PENDING set
     608                 :            :  * but stay off timer and worklist for arbitrarily long and nobody should
     609                 :            :  * try to steal the PENDING bit.
     610                 :            :  */
     611                 :      22002 : static inline void set_work_data(struct work_struct *work, unsigned long data,
     612                 :            :                                  unsigned long flags)
     613                 :            : {
     614         [ -  + ]:      22002 :         WARN_ON_ONCE(!work_pending(work));
     615                 :      22002 :         atomic_long_set(&work->data, data | flags | work_static(work));
     616                 :      22002 : }
     617                 :            : 
     618                 :       9274 : static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
     619                 :            :                          unsigned long extra_flags)
     620                 :            : {
     621                 :       9274 :         set_work_data(work, (unsigned long)pwq,
     622                 :            :                       WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
     623                 :            : }
     624                 :            : 
     625                 :         59 : static void set_work_pool_and_keep_pending(struct work_struct *work,
     626                 :            :                                            int pool_id)
     627                 :            : {
     628                 :         59 :         set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
     629                 :            :                       WORK_STRUCT_PENDING);
     630                 :            : }
     631                 :            : 
     632                 :       9259 : static void set_work_pool_and_clear_pending(struct work_struct *work,
     633                 :            :                                             int pool_id)
     634                 :            : {
     635                 :            :         /*
     636                 :            :          * The following wmb is paired with the implied mb in
     637                 :            :          * test_and_set_bit(PENDING) and ensures all updates to @work made
     638                 :            :          * here are visible to and precede any updates by the next PENDING
     639                 :            :          * owner.
     640                 :            :          */
     641                 :         44 :         smp_wmb();
     642                 :       9259 :         set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
     643                 :            :         /*
     644                 :            :          * The following mb guarantees that previous clear of a PENDING bit
     645                 :            :          * will not be reordered with any speculative LOADS or STORES from
     646                 :            :          * work->current_func, which is executed afterwards.  This possible
     647                 :            :          * reordering can lead to a missed execution on attempt to queue
     648                 :            :          * the same @work.  E.g. consider this case:
     649                 :            :          *
     650                 :            :          *   CPU#0                         CPU#1
     651                 :            :          *   ----------------------------  --------------------------------
     652                 :            :          *
     653                 :            :          * 1  STORE event_indicated
     654                 :            :          * 2  queue_work_on() {
     655                 :            :          * 3    test_and_set_bit(PENDING)
     656                 :            :          * 4 }                             set_..._and_clear_pending() {
     657                 :            :          * 5                                 set_work_data() # clear bit
     658                 :            :          * 6                                 smp_mb()
     659                 :            :          * 7                               work->current_func() {
     660                 :            :          * 8                                  LOAD event_indicated
     661                 :            :          *                                 }
     662                 :            :          *
     663                 :            :          * Without an explicit full barrier speculative LOAD on line 8 can
     664                 :            :          * be executed before CPU#0 does STORE on line 1.  If that happens,
     665                 :            :          * CPU#0 observes the PENDING bit is still set and new execution of
     666                 :            :          * a @work is not queued in a hope, that CPU#1 will eventually
     667                 :            :          * finish the queued @work.  Meanwhile CPU#1 does not see
     668                 :            :          * event_indicated is set, because speculative LOAD was executed
     669                 :            :          * before actual STORE.
     670                 :            :          */
     671                 :       9259 :         smp_mb();
     672                 :            : }
     673                 :            : 
     674                 :       1705 : static void clear_work_data(struct work_struct *work)
     675                 :            : {
     676                 :       1705 :         smp_wmb();      /* see set_work_pool_and_clear_pending() */
     677                 :       1705 :         set_work_data(work, WORK_STRUCT_NO_POOL, 0);
     678                 :            : }
     679                 :            : 
     680                 :      10013 : static struct pool_workqueue *get_work_pwq(struct work_struct *work)
     681                 :            : {
     682                 :      10013 :         unsigned long data = atomic_long_read(&work->data);
     683                 :            : 
     684   [ -  -  -  -  :      10013 :         if (data & WORK_STRUCT_PWQ)
          +  +  -  -  +  
          -  -  -  +  -  
                   +  - ]
     685                 :       9525 :                 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
     686                 :            :         else
     687                 :            :                 return NULL;
     688                 :            : }
     689                 :            : 
     690                 :            : /**
     691                 :            :  * get_work_pool - return the worker_pool a given work was associated with
     692                 :            :  * @work: the work item of interest
     693                 :            :  *
     694                 :            :  * Pools are created and destroyed under wq_pool_mutex, and allows read
     695                 :            :  * access under RCU read lock.  As such, this function should be
     696                 :            :  * called under wq_pool_mutex or inside of a rcu_read_lock() region.
     697                 :            :  *
     698                 :            :  * All fields of the returned pool are accessible as long as the above
     699                 :            :  * mentioned locking is in effect.  If the returned pool needs to be used
     700                 :            :  * beyond the critical section, the caller is responsible for ensuring the
     701                 :            :  * returned pool is and stays online.
     702                 :            :  *
     703                 :            :  * Return: The worker_pool @work was last associated with.  %NULL if none.
     704                 :            :  */
     705                 :      13046 : static struct worker_pool *get_work_pool(struct work_struct *work)
     706                 :            : {
     707                 :      13046 :         unsigned long data = atomic_long_read(&work->data);
     708                 :      13046 :         int pool_id;
     709                 :            : 
     710                 :      13046 :         assert_rcu_or_pool_mutex();
     711                 :            : 
     712         [ +  + ]:      13046 :         if (data & WORK_STRUCT_PWQ)
     713                 :         84 :                 return ((struct pool_workqueue *)
     714                 :         84 :                         (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
     715                 :            : 
     716                 :      12962 :         pool_id = data >> WORK_OFFQ_POOL_SHIFT;
     717         [ +  + ]:      12962 :         if (pool_id == WORK_OFFQ_POOL_NONE)
     718                 :            :                 return NULL;
     719                 :            : 
     720                 :       6382 :         return idr_find(&worker_pool_idr, pool_id);
     721                 :            : }
     722                 :            : 
     723                 :            : /**
     724                 :            :  * get_work_pool_id - return the worker pool ID a given work is associated with
     725                 :            :  * @work: the work item of interest
     726                 :            :  *
     727                 :            :  * Return: The worker_pool ID @work was last associated with.
     728                 :            :  * %WORK_OFFQ_POOL_NONE if none.
     729                 :            :  */
     730                 :       1749 : static int get_work_pool_id(struct work_struct *work)
     731                 :            : {
     732                 :       1749 :         unsigned long data = atomic_long_read(&work->data);
     733                 :            : 
     734         [ -  + ]:       1749 :         if (data & WORK_STRUCT_PWQ)
     735                 :          0 :                 return ((struct pool_workqueue *)
     736                 :          0 :                         (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
     737                 :            : 
     738                 :       1749 :         return data >> WORK_OFFQ_POOL_SHIFT;
     739                 :            : }
     740                 :            : 
     741                 :       1705 : static void mark_work_canceling(struct work_struct *work)
     742                 :            : {
     743                 :       1705 :         unsigned long pool_id = get_work_pool_id(work);
     744                 :            : 
     745                 :       1705 :         pool_id <<= WORK_OFFQ_POOL_SHIFT;
     746                 :       1705 :         set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
     747                 :       1705 : }
     748                 :            : 
     749                 :          0 : static bool work_is_canceling(struct work_struct *work)
     750                 :            : {
     751                 :          0 :         unsigned long data = atomic_long_read(&work->data);
     752                 :            : 
     753                 :          0 :         return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
     754                 :            : }
     755                 :            : 
     756                 :            : /*
     757                 :            :  * Policy functions.  These define the policies on how the global worker
     758                 :            :  * pools are managed.  Unless noted otherwise, these functions assume that
     759                 :            :  * they're being called with pool->lock held.
     760                 :            :  */
     761                 :            : 
     762                 :      17829 : static bool __need_more_worker(struct worker_pool *pool)
     763                 :            : {
     764                 :       8555 :         return !atomic_read(&pool->nr_running);
     765                 :            : }
     766                 :            : 
     767                 :            : /*
     768                 :            :  * Need to wake up a worker?  Called from anything but currently
     769                 :            :  * running workers.
     770                 :            :  *
     771                 :            :  * Note that, because unbound workers never contribute to nr_running, this
     772                 :            :  * function will always return %true for unbound pools as long as the
     773                 :            :  * worklist isn't empty.
     774                 :            :  */
     775                 :      17418 : static bool need_more_worker(struct worker_pool *pool)
     776                 :            : {
     777   [ -  -  -  +  :       8555 :         return !list_empty(&pool->worklist) && __need_more_worker(pool);
             +  +  -  + ]
     778                 :            : }
     779                 :            : 
     780                 :            : /* Can I start working?  Called from busy but !running workers. */
     781                 :       7664 : static bool may_start_working(struct worker_pool *pool)
     782                 :            : {
     783                 :       7664 :         return pool->nr_idle;
     784                 :            : }
     785                 :            : 
     786                 :            : /* Do I need to keep working?  Called from currently running workers. */
     787                 :       9177 : static bool keep_working(struct worker_pool *pool)
     788                 :            : {
     789         [ -  + ]:       1678 :         return !list_empty(&pool->worklist) &&
     790                 :       1678 :                 atomic_read(&pool->nr_running) <= 1;
     791                 :            : }
     792                 :            : 
     793                 :            : /* Do we need a new worker?  Called from manager. */
     794                 :         88 : static bool need_to_create_worker(struct worker_pool *pool)
     795                 :            : {
     796   [ +  +  +  +  :        165 :         return need_more_worker(pool) && !may_start_working(pool);
                   +  - ]
     797                 :            : }
     798                 :            : 
     799                 :            : /* Do we have too many workers and should some go away? */
     800                 :       8159 : static bool too_many_workers(struct worker_pool *pool)
     801                 :            : {
     802                 :       8159 :         bool managing = pool->flags & POOL_MANAGER_ACTIVE;
     803                 :       8159 :         int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
     804                 :       8159 :         int nr_busy = pool->nr_workers - nr_idle;
     805                 :            : 
     806   [ -  -  -  + ]:       3886 :         return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
     807                 :            : }
     808                 :            : 
     809                 :            : /*
     810                 :            :  * Wake up functions.
     811                 :            :  */
     812                 :            : 
     813                 :            : /* Return the first idle worker.  Safe with preemption disabled */
     814                 :       8878 : static struct worker *first_idle_worker(struct worker_pool *pool)
     815                 :            : {
     816                 :       8878 :         if (unlikely(list_empty(&pool->idle_list)))
     817                 :            :                 return NULL;
     818                 :            : 
     819                 :       8618 :         return list_first_entry(&pool->idle_list, struct worker, entry);
     820                 :            : }
     821                 :            : 
     822                 :            : /**
     823                 :            :  * wake_up_worker - wake up an idle worker
     824                 :            :  * @pool: worker pool to wake worker from
     825                 :            :  *
     826                 :            :  * Wake up the first idle worker of @pool.
     827                 :            :  *
     828                 :            :  * CONTEXT:
     829                 :            :  * spin_lock_irq(pool->lock).
     830                 :            :  */
     831                 :       8789 : static void wake_up_worker(struct worker_pool *pool)
     832                 :            : {
     833                 :       8789 :         struct worker *worker = first_idle_worker(pool);
     834                 :            : 
     835   [ -  -  +  +  :       8789 :         if (likely(worker))
          -  -  +  -  +  
                      + ]
     836                 :       8618 :                 wake_up_process(worker->task);
     837                 :            : }
     838                 :            : 
     839                 :            : /**
     840                 :            :  * wq_worker_running - a worker is running again
     841                 :            :  * @task: task waking up
     842                 :            :  *
     843                 :            :  * This function is called when a worker returns from schedule()
     844                 :            :  */
     845                 :       9223 : void wq_worker_running(struct task_struct *task)
     846                 :            : {
     847                 :       9223 :         struct worker *worker = kthread_data(task);
     848                 :            : 
     849         [ +  + ]:       9223 :         if (!worker->sleeping)
     850                 :            :                 return;
     851         [ +  - ]:        134 :         if (!(worker->flags & WORKER_NOT_RUNNING))
     852                 :        134 :                 atomic_inc(&worker->pool->nr_running);
     853                 :        134 :         worker->sleeping = 0;
     854                 :            : }
     855                 :            : 
     856                 :            : /**
     857                 :            :  * wq_worker_sleeping - a worker is going to sleep
     858                 :            :  * @task: task going to sleep
     859                 :            :  *
     860                 :            :  * This function is called from schedule() when a busy worker is
     861                 :            :  * going to sleep.
     862                 :            :  */
     863                 :       9570 : void wq_worker_sleeping(struct task_struct *task)
     864                 :            : {
     865                 :       9570 :         struct worker *next, *worker = kthread_data(task);
     866                 :       9570 :         struct worker_pool *pool;
     867                 :            : 
     868                 :            :         /*
     869                 :            :          * Rescuers, which may not have all the fields set up like normal
     870                 :            :          * workers, also reach here, let's not access anything before
     871                 :            :          * checking NOT_RUNNING.
     872                 :            :          */
     873         [ +  + ]:       9570 :         if (worker->flags & WORKER_NOT_RUNNING)
     874                 :            :                 return;
     875                 :            : 
     876                 :        134 :         pool = worker->pool;
     877                 :            : 
     878   [ -  +  +  - ]:        134 :         if (WARN_ON_ONCE(worker->sleeping))
     879                 :            :                 return;
     880                 :            : 
     881                 :        134 :         worker->sleeping = 1;
     882                 :        134 :         spin_lock_irq(&pool->lock);
     883                 :            : 
     884                 :            :         /*
     885                 :            :          * The counterpart of the following dec_and_test, implied mb,
     886                 :            :          * worklist not empty test sequence is in insert_work().
     887                 :            :          * Please read comment there.
     888                 :            :          *
     889                 :            :          * NOT_RUNNING is clear.  This means that we're bound to and
     890                 :            :          * running on the local cpu w/ rq lock held and preemption
     891                 :            :          * disabled, which in turn means that none else could be
     892                 :            :          * manipulating idle_list, so dereferencing idle_list without pool
     893                 :            :          * lock is safe.
     894                 :            :          */
     895   [ +  -  +  + ]:        134 :         if (atomic_dec_and_test(&pool->nr_running) &&
     896         [ +  + ]:        134 :             !list_empty(&pool->worklist)) {
     897         [ +  - ]:         89 :                 next = first_idle_worker(pool);
     898         [ +  - ]:         89 :                 if (next)
     899                 :         89 :                         wake_up_process(next->task);
     900                 :            :         }
     901                 :        134 :         spin_unlock_irq(&pool->lock);
     902                 :            : }
     903                 :            : 
     904                 :            : /**
     905                 :            :  * wq_worker_last_func - retrieve worker's last work function
     906                 :            :  * @task: Task to retrieve last work function of.
     907                 :            :  *
     908                 :            :  * Determine the last function a worker executed. This is called from
     909                 :            :  * the scheduler to get a worker's last known identity.
     910                 :            :  *
     911                 :            :  * CONTEXT:
     912                 :            :  * spin_lock_irq(rq->lock)
     913                 :            :  *
     914                 :            :  * This function is called during schedule() when a kworker is going
     915                 :            :  * to sleep. It's used by psi to identify aggregation workers during
     916                 :            :  * dequeuing, to allow periodic aggregation to shut-off when that
     917                 :            :  * worker is the last task in the system or cgroup to go to sleep.
     918                 :            :  *
     919                 :            :  * As this function doesn't involve any workqueue-related locking, it
     920                 :            :  * only returns stable values when called from inside the scheduler's
     921                 :            :  * queuing and dequeuing paths, when @task, which must be a kworker,
     922                 :            :  * is guaranteed to not be processing any works.
     923                 :            :  *
     924                 :            :  * Return:
     925                 :            :  * The last work function %current executed as a worker, NULL if it
     926                 :            :  * hasn't executed any work yet.
     927                 :            :  */
     928                 :          0 : work_func_t wq_worker_last_func(struct task_struct *task)
     929                 :            : {
     930                 :          0 :         struct worker *worker = kthread_data(task);
     931                 :            : 
     932                 :          0 :         return worker->last_func;
     933                 :            : }
     934                 :            : 
     935                 :            : /**
     936                 :            :  * worker_set_flags - set worker flags and adjust nr_running accordingly
     937                 :            :  * @worker: self
     938                 :            :  * @flags: flags to set
     939                 :            :  *
     940                 :            :  * Set @flags in @worker->flags and adjust nr_running accordingly.
     941                 :            :  *
     942                 :            :  * CONTEXT:
     943                 :            :  * spin_lock_irq(pool->lock)
     944                 :            :  */
     945                 :       7499 : static inline void worker_set_flags(struct worker *worker, unsigned int flags)
     946                 :            : {
     947                 :       7499 :         struct worker_pool *pool = worker->pool;
     948                 :            : 
     949         [ -  + ]:       7499 :         WARN_ON_ONCE(worker->task != current);
     950                 :            : 
     951                 :            :         /* If transitioning into NOT_RUNNING, adjust nr_running. */
     952         [ +  - ]:       7499 :         if ((flags & WORKER_NOT_RUNNING) &&
     953         [ +  + ]:       7499 :             !(worker->flags & WORKER_NOT_RUNNING)) {
     954                 :       6570 :                 atomic_dec(&pool->nr_running);
     955                 :            :         }
     956                 :            : 
     957                 :       7499 :         worker->flags |= flags;
     958                 :       7499 : }
     959                 :            : 
     960                 :            : /**
     961                 :            :  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
     962                 :            :  * @worker: self
     963                 :            :  * @flags: flags to clear
     964                 :            :  *
     965                 :            :  * Clear @flags in @worker->flags and adjust nr_running accordingly.
     966                 :            :  *
     967                 :            :  * CONTEXT:
     968                 :            :  * spin_lock_irq(pool->lock)
     969                 :            :  */
     970                 :      15526 : static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
     971                 :            : {
     972                 :      15526 :         struct worker_pool *pool = worker->pool;
     973                 :      15526 :         unsigned int oflags = worker->flags;
     974                 :            : 
     975         [ -  + ]:      15526 :         WARN_ON_ONCE(worker->task != current);
     976                 :            : 
     977                 :      15526 :         worker->flags &= ~flags;
     978                 :            : 
     979                 :            :         /*
     980                 :            :          * If transitioning out of NOT_RUNNING, increment nr_running.  Note
     981                 :            :          * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
     982                 :            :          * of multiple flags, not a single flag.
     983                 :            :          */
     984   [ +  +  +  - ]:      15526 :         if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
     985         [ +  + ]:       7499 :                 if (!(worker->flags & WORKER_NOT_RUNNING))
     986                 :       6570 :                         atomic_inc(&pool->nr_running);
     987                 :      15526 : }
     988                 :            : 
     989                 :            : /**
     990                 :            :  * find_worker_executing_work - find worker which is executing a work
     991                 :            :  * @pool: pool of interest
     992                 :            :  * @work: work to find worker for
     993                 :            :  *
     994                 :            :  * Find a worker which is executing @work on @pool by searching
     995                 :            :  * @pool->busy_hash which is keyed by the address of @work.  For a worker
     996                 :            :  * to match, its current execution should match the address of @work and
     997                 :            :  * its work function.  This is to avoid unwanted dependency between
     998                 :            :  * unrelated work executions through a work item being recycled while still
     999                 :            :  * being executed.
    1000                 :            :  *
    1001                 :            :  * This is a bit tricky.  A work item may be freed once its execution
    1002                 :            :  * starts and nothing prevents the freed area from being recycled for
    1003                 :            :  * another work item.  If the same work item address ends up being reused
    1004                 :            :  * before the original execution finishes, workqueue will identify the
    1005                 :            :  * recycled work item as currently executing and make it wait until the
    1006                 :            :  * current execution finishes, introducing an unwanted dependency.
    1007                 :            :  *
    1008                 :            :  * This function checks the work item address and work function to avoid
    1009                 :            :  * false positives.  Note that this isn't complete as one may construct a
    1010                 :            :  * work function which can introduce dependency onto itself through a
    1011                 :            :  * recycled work item.  Well, if somebody wants to shoot oneself in the
    1012                 :            :  * foot that badly, there's only so much we can do, and if such deadlock
    1013                 :            :  * actually occurs, it should be easy to locate the culprit work function.
    1014                 :            :  *
    1015                 :            :  * CONTEXT:
    1016                 :            :  * spin_lock_irq(pool->lock).
    1017                 :            :  *
    1018                 :            :  * Return:
    1019                 :            :  * Pointer to worker which is executing @work if found, %NULL
    1020                 :            :  * otherwise.
    1021                 :            :  */
    1022                 :       9619 : static struct worker *find_worker_executing_work(struct worker_pool *pool,
    1023                 :            :                                                  struct work_struct *work)
    1024                 :            : {
    1025                 :       9619 :         struct worker *worker;
    1026                 :            : 
    1027   [ -  -  -  -  :       9623 :         hash_for_each_possible(pool->busy_hash, worker, hentry,
          -  -  +  +  -  
          +  +  +  -  -  
                   -  - ]
    1028                 :            :                                (unsigned long)work)
    1029   [ -  -  +  -  :         17 :                 if (worker->current_work == work &&
             -  +  -  - ]
    1030   [ -  -  -  +  :         13 :                     worker->current_func == work->func)
             -  -  -  - ]
    1031                 :            :                         return worker;
    1032                 :            : 
    1033                 :            :         return NULL;
    1034                 :            : }
    1035                 :            : 
    1036                 :            : /**
    1037                 :            :  * move_linked_works - move linked works to a list
    1038                 :            :  * @work: start of series of works to be scheduled
    1039                 :            :  * @head: target list to append @work to
    1040                 :            :  * @nextp: out parameter for nested worklist walking
    1041                 :            :  *
    1042                 :            :  * Schedule linked works starting from @work to @head.  Work series to
    1043                 :            :  * be scheduled starts at @work and includes any consecutive work with
    1044                 :            :  * WORK_STRUCT_LINKED set in its predecessor.
    1045                 :            :  *
    1046                 :            :  * If @nextp is not NULL, it's updated to point to the next work of
    1047                 :            :  * the last scheduled work.  This allows move_linked_works() to be
    1048                 :            :  * nested inside outer list_for_each_entry_safe().
    1049                 :            :  *
    1050                 :            :  * CONTEXT:
    1051                 :            :  * spin_lock_irq(pool->lock).
    1052                 :            :  */
    1053                 :        335 : static void move_linked_works(struct work_struct *work, struct list_head *head,
    1054                 :            :                               struct work_struct **nextp)
    1055                 :            : {
    1056                 :        335 :         struct work_struct *n;
    1057                 :            : 
    1058                 :            :         /*
    1059                 :            :          * Linked worklist will always end before the end of the list,
    1060                 :            :          * use NULL for list head.
    1061                 :            :          */
    1062   [ -  -  +  -  :        360 :         list_for_each_entry_safe_from(work, n, NULL, entry) {
             -  -  +  - ]
    1063   [ -  -  +  +  :        360 :                 list_move_tail(&work->entry, head);
             -  -  -  + ]
    1064   [ -  -  +  +  :        360 :                 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
             -  -  -  + ]
    1065                 :            :                         break;
    1066                 :            :         }
    1067                 :            : 
    1068                 :            :         /*
    1069                 :            :          * If we're already inside safe list traversal and have moved
    1070                 :            :          * multiple works to the scheduled queue, the next position
    1071                 :            :          * needs to be updated.
    1072                 :            :          */
    1073                 :        310 :         if (nextp)
    1074                 :            :                 *nextp = n;
    1075                 :            : }
    1076                 :            : 
    1077                 :            : /**
    1078                 :            :  * get_pwq - get an extra reference on the specified pool_workqueue
    1079                 :            :  * @pwq: pool_workqueue to get
    1080                 :            :  *
    1081                 :            :  * Obtain an extra reference on @pwq.  The caller should guarantee that
    1082                 :            :  * @pwq has positive refcnt and be holding the matching pool->lock.
    1083                 :            :  */
    1084                 :       9274 : static void get_pwq(struct pool_workqueue *pwq)
    1085                 :            : {
    1086                 :       9274 :         lockdep_assert_held(&pwq->pool->lock);
    1087                 :          0 :         WARN_ON_ONCE(pwq->refcnt <= 0);
    1088                 :       9274 :         pwq->refcnt++;
    1089                 :            : }
    1090                 :            : 
    1091                 :            : /**
    1092                 :            :  * put_pwq - put a pool_workqueue reference
    1093                 :            :  * @pwq: pool_workqueue to put
    1094                 :            :  *
    1095                 :            :  * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
    1096                 :            :  * destruction.  The caller should be holding the matching pool->lock.
    1097                 :            :  */
    1098                 :       9296 : static void put_pwq(struct pool_workqueue *pwq)
    1099                 :            : {
    1100                 :       9296 :         lockdep_assert_held(&pwq->pool->lock);
    1101         [ +  + ]:       9296 :         if (likely(--pwq->refcnt))
    1102                 :            :                 return;
    1103   [ -  +  +  - ]:         11 :         if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
    1104                 :            :                 return;
    1105                 :            :         /*
    1106                 :            :          * @pwq can't be released under pool->lock, bounce to
    1107                 :            :          * pwq_unbound_release_workfn().  This never recurses on the same
    1108                 :            :          * pool->lock as this path is taken only for unbound workqueues and
    1109                 :            :          * the release work item is scheduled on a per-cpu workqueue.  To
    1110                 :            :          * avoid lockdep warning, unbound pool->locks are given lockdep
    1111                 :            :          * subclass of 1 in get_unbound_pool().
    1112                 :            :          */
    1113                 :         11 :         schedule_work(&pwq->unbound_release_work);
    1114                 :            : }
    1115                 :            : 
    1116                 :            : /**
    1117                 :            :  * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
    1118                 :            :  * @pwq: pool_workqueue to put (can be %NULL)
    1119                 :            :  *
    1120                 :            :  * put_pwq() with locking.  This function also allows %NULL @pwq.
    1121                 :            :  */
    1122                 :        440 : static void put_pwq_unlocked(struct pool_workqueue *pwq)
    1123                 :            : {
    1124         [ +  + ]:        440 :         if (pwq) {
    1125                 :            :                 /*
    1126                 :            :                  * As both pwqs and pools are RCU protected, the
    1127                 :            :                  * following lock operations are safe.
    1128                 :            :                  */
    1129                 :         22 :                 spin_lock_irq(&pwq->pool->lock);
    1130                 :         22 :                 put_pwq(pwq);
    1131                 :         22 :                 spin_unlock_irq(&pwq->pool->lock);
    1132                 :            :         }
    1133                 :        440 : }
    1134                 :            : 
    1135                 :        310 : static void pwq_activate_delayed_work(struct work_struct *work)
    1136                 :            : {
    1137                 :        310 :         struct pool_workqueue *pwq = get_work_pwq(work);
    1138                 :            : 
    1139                 :        310 :         trace_workqueue_activate_work(work);
    1140         [ +  + ]:        310 :         if (list_empty(&pwq->pool->worklist))
    1141                 :        175 :                 pwq->pool->watchdog_ts = jiffies;
    1142                 :        310 :         move_linked_works(work, &pwq->pool->worklist, NULL);
    1143                 :        310 :         __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
    1144                 :        310 :         pwq->nr_active++;
    1145                 :        310 : }
    1146                 :            : 
    1147                 :        310 : static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
    1148                 :            : {
    1149                 :        310 :         struct work_struct *work = list_first_entry(&pwq->delayed_works,
    1150                 :            :                                                     struct work_struct, entry);
    1151                 :            : 
    1152                 :        310 :         pwq_activate_delayed_work(work);
    1153                 :        310 : }
    1154                 :            : 
    1155                 :            : /**
    1156                 :            :  * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
    1157                 :            :  * @pwq: pwq of interest
    1158                 :            :  * @color: color of work which left the queue
    1159                 :            :  *
    1160                 :            :  * A work either has completed or is removed from pending queue,
    1161                 :            :  * decrement nr_in_flight of its pwq and handle workqueue flushing.
    1162                 :            :  *
    1163                 :            :  * CONTEXT:
    1164                 :            :  * spin_lock_irq(pool->lock).
    1165                 :            :  */
    1166                 :       9274 : static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
    1167                 :            : {
    1168                 :            :         /* uncolored work items don't participate in flushing or nr_active */
    1169         [ +  + ]:       9274 :         if (color == WORK_NO_COLOR)
    1170                 :         38 :                 goto out_put;
    1171                 :            : 
    1172                 :       9236 :         pwq->nr_in_flight[color]--;
    1173                 :            : 
    1174                 :       9236 :         pwq->nr_active--;
    1175         [ +  + ]:       9236 :         if (!list_empty(&pwq->delayed_works)) {
    1176                 :            :                 /* one down, submit a delayed one */
    1177         [ +  - ]:        310 :                 if (pwq->nr_active < pwq->max_active)
    1178                 :        310 :                         pwq_activate_first_delayed(pwq);
    1179                 :            :         }
    1180                 :            : 
    1181                 :            :         /* is flush in progress and are we at the flushing tip? */
    1182         [ +  - ]:       9236 :         if (likely(pwq->flush_color != color))
    1183                 :       9236 :                 goto out_put;
    1184                 :            : 
    1185                 :            :         /* are there still in-flight works? */
    1186         [ #  # ]:          0 :         if (pwq->nr_in_flight[color])
    1187                 :          0 :                 goto out_put;
    1188                 :            : 
    1189                 :            :         /* this pwq is done, clear flush_color */
    1190                 :          0 :         pwq->flush_color = -1;
    1191                 :            : 
    1192                 :            :         /*
    1193                 :            :          * If this was the last pwq, wake up the first flusher.  It
    1194                 :            :          * will handle the rest.
    1195                 :            :          */
    1196         [ #  # ]:          0 :         if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
    1197                 :          0 :                 complete(&pwq->wq->first_flusher->done);
    1198                 :          0 : out_put:
    1199                 :       9274 :         put_pwq(pwq);
    1200                 :       9274 : }
    1201                 :            : 
    1202                 :            : /**
    1203                 :            :  * try_to_grab_pending - steal work item from worklist and disable irq
    1204                 :            :  * @work: work item to steal
    1205                 :            :  * @is_dwork: @work is a delayed_work
    1206                 :            :  * @flags: place to store irq state
    1207                 :            :  *
    1208                 :            :  * Try to grab PENDING bit of @work.  This function can handle @work in any
    1209                 :            :  * stable state - idle, on timer or on worklist.
    1210                 :            :  *
    1211                 :            :  * Return:
    1212                 :            :  *  1           if @work was pending and we successfully stole PENDING
    1213                 :            :  *  0           if @work was idle and we claimed PENDING
    1214                 :            :  *  -EAGAIN     if PENDING couldn't be grabbed at the moment, safe to busy-retry
    1215                 :            :  *  -ENOENT     if someone else is canceling @work, this state may persist
    1216                 :            :  *              for arbitrarily long
    1217                 :            :  *
    1218                 :            :  * Note:
    1219                 :            :  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
    1220                 :            :  * interrupted while holding PENDING and @work off queue, irq must be
    1221                 :            :  * disabled on entry.  This, combined with delayed_work->timer being
    1222                 :            :  * irqsafe, ensures that we return -EAGAIN for finite short period of time.
    1223                 :            :  *
    1224                 :            :  * On successful return, >= 0, irq is disabled and the caller is
    1225                 :            :  * responsible for releasing it using local_irq_restore(*@flags).
    1226                 :            :  *
    1227                 :            :  * This function is safe to call from any context including IRQ handler.
    1228                 :            :  */
    1229                 :       4759 : static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
    1230                 :            :                                unsigned long *flags)
    1231                 :            : {
    1232                 :       4759 :         struct worker_pool *pool;
    1233                 :       4759 :         struct pool_workqueue *pwq;
    1234                 :            : 
    1235                 :       4759 :         local_irq_save(*flags);
    1236                 :            : 
    1237                 :            :         /* try to steal the timer if it exists */
    1238         [ +  + ]:       4759 :         if (is_dwork) {
    1239                 :       3450 :                 struct delayed_work *dwork = to_delayed_work(work);
    1240                 :            : 
    1241                 :            :                 /*
    1242                 :            :                  * dwork->timer is irqsafe.  If del_timer() fails, it's
    1243                 :            :                  * guaranteed that the timer is not queued anywhere and not
    1244                 :            :                  * running on the local CPU.
    1245                 :            :                  */
    1246         [ +  + ]:       3450 :                 if (likely(del_timer(&dwork->timer)))
    1247                 :            :                         return 1;
    1248                 :            :         }
    1249                 :            : 
    1250                 :            :         /* try to claim PENDING the normal way */
    1251         [ +  + ]:       4661 :         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
    1252                 :            :                 return 0;
    1253                 :            : 
    1254                 :         59 :         rcu_read_lock();
    1255                 :            :         /*
    1256                 :            :          * The queueing is in progress, or it is already queued. Try to
    1257                 :            :          * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
    1258                 :            :          */
    1259                 :         59 :         pool = get_work_pool(work);
    1260         [ -  + ]:         59 :         if (!pool)
    1261                 :          0 :                 goto fail;
    1262                 :            : 
    1263                 :         59 :         spin_lock(&pool->lock);
    1264                 :            :         /*
    1265                 :            :          * work->data is guaranteed to point to pwq only while the work
    1266                 :            :          * item is queued on pwq->wq, and both updating work->data to point
    1267                 :            :          * to pwq on queueing and to pool on dequeueing are done under
    1268                 :            :          * pwq->pool->lock.  This in turn guarantees that, if work->data
    1269                 :            :          * points to pwq which is associated with a locked pool, the work
    1270                 :            :          * item is currently queued on that pool.
    1271                 :            :          */
    1272                 :         59 :         pwq = get_work_pwq(work);
    1273   [ +  -  +  - ]:         59 :         if (pwq && pwq->pool == pool) {
    1274                 :         59 :                 debug_work_deactivate(work);
    1275                 :            : 
    1276                 :            :                 /*
    1277                 :            :                  * A delayed work item cannot be grabbed directly because
    1278                 :            :                  * it might have linked NO_COLOR work items which, if left
    1279                 :            :                  * on the delayed_list, will confuse pwq->nr_active
    1280                 :            :                  * management later on and cause stall.  Make sure the work
    1281                 :            :                  * item is activated before grabbing.
    1282                 :            :                  */
    1283         [ -  + ]:         59 :                 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
    1284                 :          0 :                         pwq_activate_delayed_work(work);
    1285                 :            : 
    1286                 :         59 :                 list_del_init(&work->entry);
    1287                 :         59 :                 pwq_dec_nr_in_flight(pwq, get_work_color(work));
    1288                 :            : 
    1289                 :            :                 /* work->data points to pwq iff queued, point to pool */
    1290                 :         59 :                 set_work_pool_and_keep_pending(work, pool->id);
    1291                 :            : 
    1292                 :         59 :                 spin_unlock(&pool->lock);
    1293                 :         59 :                 rcu_read_unlock();
    1294                 :         59 :                 return 1;
    1295                 :            :         }
    1296                 :          0 :         spin_unlock(&pool->lock);
    1297                 :          0 : fail:
    1298                 :          0 :         rcu_read_unlock();
    1299                 :          0 :         local_irq_restore(*flags);
    1300         [ #  # ]:          0 :         if (work_is_canceling(work))
    1301                 :            :                 return -ENOENT;
    1302                 :          0 :         cpu_relax();
    1303                 :          0 :         return -EAGAIN;
    1304                 :            : }
    1305                 :            : 
    1306                 :            : /**
    1307                 :            :  * insert_work - insert a work into a pool
    1308                 :            :  * @pwq: pwq @work belongs to
    1309                 :            :  * @work: work to insert
    1310                 :            :  * @head: insertion point
    1311                 :            :  * @extra_flags: extra WORK_STRUCT_* flags to set
    1312                 :            :  *
    1313                 :            :  * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
    1314                 :            :  * work_struct flags.
    1315                 :            :  *
    1316                 :            :  * CONTEXT:
    1317                 :            :  * spin_lock_irq(pool->lock).
    1318                 :            :  */
    1319                 :       9274 : static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
    1320                 :            :                         struct list_head *head, unsigned int extra_flags)
    1321                 :            : {
    1322                 :       9274 :         struct worker_pool *pool = pwq->pool;
    1323                 :            : 
    1324                 :            :         /* we own @work, set data and link */
    1325                 :       9274 :         set_work_pwq(work, pwq, extra_flags);
    1326         [ -  + ]:       9274 :         list_add_tail(&work->entry, head);
    1327         [ -  + ]:       9274 :         get_pwq(pwq);
    1328                 :            : 
    1329                 :            :         /*
    1330                 :            :          * Ensure either wq_worker_sleeping() sees the above
    1331                 :            :          * list_add_tail() or we see zero nr_running to avoid workers lying
    1332                 :            :          * around lazily while there are works to be processed.
    1333                 :            :          */
    1334                 :       9274 :         smp_mb();
    1335                 :            : 
    1336         [ +  + ]:       9274 :         if (__need_more_worker(pool))
    1337         [ +  + ]:       8236 :                 wake_up_worker(pool);
    1338                 :       9274 : }
    1339                 :            : 
    1340                 :            : /*
    1341                 :            :  * Test whether @work is being queued from another work executing on the
    1342                 :            :  * same workqueue.
    1343                 :            :  */
    1344                 :          0 : static bool is_chained_work(struct workqueue_struct *wq)
    1345                 :            : {
    1346                 :          0 :         struct worker *worker;
    1347                 :            : 
    1348                 :          0 :         worker = current_wq_worker();
    1349                 :            :         /*
    1350                 :            :          * Return %true iff I'm a worker executing a work item on @wq.  If
    1351                 :            :          * I'm @worker, it's safe to dereference it without locking.
    1352                 :            :          */
    1353   [ #  #  #  # ]:          0 :         return worker && worker->current_pwq->wq == wq;
    1354                 :            : }
    1355                 :            : 
    1356                 :            : /*
    1357                 :            :  * When queueing an unbound work item to a wq, prefer local CPU if allowed
    1358                 :            :  * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
    1359                 :            :  * avoid perturbing sensitive tasks.
    1360                 :            :  */
    1361                 :        941 : static int wq_select_unbound_cpu(int cpu)
    1362                 :            : {
    1363                 :        941 :         static bool printed_dbg_warning;
    1364                 :        941 :         int new_cpu;
    1365                 :            : 
    1366         [ +  - ]:        941 :         if (likely(!wq_debug_force_rr_cpu)) {
    1367         [ -  + ]:        941 :                 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
    1368                 :            :                         return cpu;
    1369         [ #  # ]:          0 :         } else if (!printed_dbg_warning) {
    1370                 :          0 :                 pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
    1371                 :          0 :                 printed_dbg_warning = true;
    1372                 :            :         }
    1373                 :            : 
    1374         [ #  # ]:          0 :         if (cpumask_empty(wq_unbound_cpumask))
    1375                 :            :                 return cpu;
    1376                 :            : 
    1377                 :          0 :         new_cpu = __this_cpu_read(wq_rr_cpu_last);
    1378                 :          0 :         new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
    1379         [ #  # ]:          0 :         if (unlikely(new_cpu >= nr_cpu_ids)) {
    1380                 :          0 :                 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
    1381         [ #  # ]:          0 :                 if (unlikely(new_cpu >= nr_cpu_ids))
    1382                 :            :                         return cpu;
    1383                 :            :         }
    1384                 :          0 :         __this_cpu_write(wq_rr_cpu_last, new_cpu);
    1385                 :            : 
    1386                 :          0 :         return new_cpu;
    1387                 :            : }
    1388                 :            : 
    1389                 :       9236 : static void __queue_work(int cpu, struct workqueue_struct *wq,
    1390                 :            :                          struct work_struct *work)
    1391                 :            : {
    1392                 :       9236 :         struct pool_workqueue *pwq;
    1393                 :       9236 :         struct worker_pool *last_pool;
    1394                 :       9236 :         struct list_head *worklist;
    1395                 :       9236 :         unsigned int work_flags;
    1396                 :       9236 :         unsigned int req_cpu = cpu;
    1397                 :            : 
    1398                 :            :         /*
    1399                 :            :          * While a work item is PENDING && off queue, a task trying to
    1400                 :            :          * steal the PENDING will busy-loop waiting for it to either get
    1401                 :            :          * queued or lose PENDING.  Grabbing PENDING and queueing should
    1402                 :            :          * happen with IRQ disabled.
    1403                 :            :          */
    1404                 :       9236 :         lockdep_assert_irqs_disabled();
    1405                 :            : 
    1406                 :       9236 :         debug_work_activate(work);
    1407                 :            : 
    1408                 :            :         /* if draining, only works from the same workqueue are allowed */
    1409         [ -  + ]:       9236 :         if (unlikely(wq->flags & __WQ_DRAINING) &&
    1410   [ #  #  #  # ]:          0 :             WARN_ON_ONCE(!is_chained_work(wq)))
    1411                 :            :                 return;
    1412                 :       9236 :         rcu_read_lock();
    1413                 :       9236 : retry:
    1414                 :            :         /* pwq which will be used unless @work is executing elsewhere */
    1415         [ +  + ]:       9236 :         if (wq->flags & WQ_UNBOUND) {
    1416         [ +  - ]:        941 :                 if (req_cpu == WORK_CPU_UNBOUND)
    1417                 :        941 :                         cpu = wq_select_unbound_cpu(raw_smp_processor_id());
    1418         [ -  + ]:        941 :                 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
    1419                 :            :         } else {
    1420         [ +  + ]:       8295 :                 if (req_cpu == WORK_CPU_UNBOUND)
    1421                 :       7870 :                         cpu = raw_smp_processor_id();
    1422                 :       8295 :                 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
    1423                 :            :         }
    1424                 :            : 
    1425                 :            :         /*
    1426                 :            :          * If @work was previously on a different pool, it might still be
    1427                 :            :          * running there, in which case the work needs to be queued on that
    1428                 :            :          * pool to guarantee non-reentrancy.
    1429                 :            :          */
    1430                 :       9236 :         last_pool = get_work_pool(work);
    1431   [ +  +  -  + ]:       9236 :         if (last_pool && last_pool != pwq->pool) {
    1432                 :          0 :                 struct worker *worker;
    1433                 :            : 
    1434                 :          0 :                 spin_lock(&last_pool->lock);
    1435                 :            : 
    1436         [ #  # ]:          0 :                 worker = find_worker_executing_work(last_pool, work);
    1437                 :            : 
    1438   [ #  #  #  # ]:          0 :                 if (worker && worker->current_pwq->wq == wq) {
    1439                 :            :                         pwq = worker->current_pwq;
    1440                 :            :                 } else {
    1441                 :            :                         /* meh... not running there, queue here */
    1442                 :          0 :                         spin_unlock(&last_pool->lock);
    1443                 :          0 :                         spin_lock(&pwq->pool->lock);
    1444                 :            :                 }
    1445                 :            :         } else {
    1446                 :       9236 :                 spin_lock(&pwq->pool->lock);
    1447                 :            :         }
    1448                 :            : 
    1449                 :            :         /*
    1450                 :            :          * pwq is determined and locked.  For unbound pools, we could have
    1451                 :            :          * raced with pwq release and it could already be dead.  If its
    1452                 :            :          * refcnt is zero, repeat pwq selection.  Note that pwqs never die
    1453                 :            :          * without another pwq replacing it in the numa_pwq_tbl or while
    1454                 :            :          * work items are executing on it, so the retrying is guaranteed to
    1455                 :            :          * make forward-progress.
    1456                 :            :          */
    1457         [ -  + ]:       9236 :         if (unlikely(!pwq->refcnt)) {
    1458         [ #  # ]:          0 :                 if (wq->flags & WQ_UNBOUND) {
    1459                 :          0 :                         spin_unlock(&pwq->pool->lock);
    1460                 :          0 :                         cpu_relax();
    1461                 :          0 :                         goto retry;
    1462                 :            :                 }
    1463                 :            :                 /* oops */
    1464         [ #  # ]:          0 :                 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
    1465                 :            :                           wq->name, cpu);
    1466                 :            :         }
    1467                 :            : 
    1468                 :            :         /* pwq determined, queue */
    1469                 :       9236 :         trace_workqueue_queue_work(req_cpu, pwq, work);
    1470                 :            : 
    1471   [ -  +  -  + ]:       9236 :         if (WARN_ON(!list_empty(&work->entry)))
    1472                 :          0 :                 goto out;
    1473                 :            : 
    1474                 :       9236 :         pwq->nr_in_flight[pwq->work_color]++;
    1475                 :       9236 :         work_flags = work_color_to_flags(pwq->work_color);
    1476                 :            : 
    1477         [ +  + ]:       9236 :         if (likely(pwq->nr_active < pwq->max_active)) {
    1478                 :       8926 :                 trace_workqueue_activate_work(work);
    1479                 :       8926 :                 pwq->nr_active++;
    1480                 :       8926 :                 worklist = &pwq->pool->worklist;
    1481         [ +  + ]:       8926 :                 if (list_empty(worklist))
    1482                 :       8159 :                         pwq->pool->watchdog_ts = jiffies;
    1483                 :            :         } else {
    1484                 :        310 :                 work_flags |= WORK_STRUCT_DELAYED;
    1485                 :        310 :                 worklist = &pwq->delayed_works;
    1486                 :            :         }
    1487                 :            : 
    1488                 :       9236 :         insert_work(pwq, work, worklist, work_flags);
    1489                 :            : 
    1490                 :       9236 : out:
    1491                 :       9236 :         spin_unlock(&pwq->pool->lock);
    1492                 :       9236 :         rcu_read_unlock();
    1493                 :            : }
    1494                 :            : 
    1495                 :            : /**
    1496                 :            :  * queue_work_on - queue work on specific cpu
    1497                 :            :  * @cpu: CPU number to execute work on
    1498                 :            :  * @wq: workqueue to use
    1499                 :            :  * @work: work to queue
    1500                 :            :  *
    1501                 :            :  * We queue the work to a specific CPU, the caller must ensure it
    1502                 :            :  * can't go away.
    1503                 :            :  *
    1504                 :            :  * Return: %false if @work was already on a queue, %true otherwise.
    1505                 :            :  */
    1506                 :       4163 : bool queue_work_on(int cpu, struct workqueue_struct *wq,
    1507                 :            :                    struct work_struct *work)
    1508                 :            : {
    1509                 :       4163 :         bool ret = false;
    1510                 :       4163 :         unsigned long flags;
    1511                 :            : 
    1512                 :       4163 :         local_irq_save(flags);
    1513                 :            : 
    1514         [ +  + ]:       4163 :         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
    1515                 :       3900 :                 __queue_work(cpu, wq, work);
    1516                 :       3900 :                 ret = true;
    1517                 :            :         }
    1518                 :            : 
    1519                 :       4163 :         local_irq_restore(flags);
    1520                 :       4163 :         return ret;
    1521                 :            : }
    1522                 :            : EXPORT_SYMBOL(queue_work_on);
    1523                 :            : 
    1524                 :            : /**
    1525                 :            :  * workqueue_select_cpu_near - Select a CPU based on NUMA node
    1526                 :            :  * @node: NUMA node ID that we want to select a CPU from
    1527                 :            :  *
    1528                 :            :  * This function will attempt to find a "random" cpu available on a given
    1529                 :            :  * node. If there are no CPUs available on the given node it will return
    1530                 :            :  * WORK_CPU_UNBOUND indicating that we should just schedule to any
    1531                 :            :  * available CPU if we need to schedule this work.
    1532                 :            :  */
    1533                 :         55 : static int workqueue_select_cpu_near(int node)
    1534                 :            : {
    1535                 :         55 :         int cpu;
    1536                 :            : 
    1537                 :            :         /* No point in doing this if NUMA isn't enabled for workqueues */
    1538         [ -  + ]:         55 :         if (!wq_numa_enabled)
    1539                 :            :                 return WORK_CPU_UNBOUND;
    1540                 :            : 
    1541                 :            :         /* Delay binding to CPU if node is not valid or online */
    1542   [ #  #  #  # ]:          0 :         if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
    1543                 :          0 :                 return WORK_CPU_UNBOUND;
    1544                 :            : 
    1545                 :            :         /* Use local node/cpu if we are already there */
    1546                 :          0 :         cpu = raw_smp_processor_id();
    1547         [ #  # ]:          0 :         if (node == cpu_to_node(cpu))
    1548                 :            :                 return cpu;
    1549                 :            : 
    1550                 :            :         /* Use "random" otherwise know as "first" online CPU of node */
    1551                 :          0 :         cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
    1552                 :            : 
    1553                 :            :         /* If CPU is valid return that, otherwise just defer */
    1554         [ #  # ]:          0 :         return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
    1555                 :            : }
    1556                 :            : 
    1557                 :            : /**
    1558                 :            :  * queue_work_node - queue work on a "random" cpu for a given NUMA node
    1559                 :            :  * @node: NUMA node that we are targeting the work for
    1560                 :            :  * @wq: workqueue to use
    1561                 :            :  * @work: work to queue
    1562                 :            :  *
    1563                 :            :  * We queue the work to a "random" CPU within a given NUMA node. The basic
    1564                 :            :  * idea here is to provide a way to somehow associate work with a given
    1565                 :            :  * NUMA node.
    1566                 :            :  *
    1567                 :            :  * This function will only make a best effort attempt at getting this onto
    1568                 :            :  * the right NUMA node. If no node is requested or the requested node is
    1569                 :            :  * offline then we just fall back to standard queue_work behavior.
    1570                 :            :  *
    1571                 :            :  * Currently the "random" CPU ends up being the first available CPU in the
    1572                 :            :  * intersection of cpu_online_mask and the cpumask of the node, unless we
    1573                 :            :  * are running on the node. In that case we just use the current CPU.
    1574                 :            :  *
    1575                 :            :  * Return: %false if @work was already on a queue, %true otherwise.
    1576                 :            :  */
    1577                 :         55 : bool queue_work_node(int node, struct workqueue_struct *wq,
    1578                 :            :                      struct work_struct *work)
    1579                 :            : {
    1580                 :         55 :         unsigned long flags;
    1581                 :         55 :         bool ret = false;
    1582                 :            : 
    1583                 :            :         /*
    1584                 :            :          * This current implementation is specific to unbound workqueues.
    1585                 :            :          * Specifically we only return the first available CPU for a given
    1586                 :            :          * node instead of cycling through individual CPUs within the node.
    1587                 :            :          *
    1588                 :            :          * If this is used with a per-cpu workqueue then the logic in
    1589                 :            :          * workqueue_select_cpu_near would need to be updated to allow for
    1590                 :            :          * some round robin type logic.
    1591                 :            :          */
    1592         [ -  + ]:         55 :         WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
    1593                 :            : 
    1594                 :         55 :         local_irq_save(flags);
    1595                 :            : 
    1596         [ +  - ]:         55 :         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
    1597                 :         55 :                 int cpu = workqueue_select_cpu_near(node);
    1598                 :            : 
    1599                 :         55 :                 __queue_work(cpu, wq, work);
    1600                 :         55 :                 ret = true;
    1601                 :            :         }
    1602                 :            : 
    1603                 :         55 :         local_irq_restore(flags);
    1604                 :         55 :         return ret;
    1605                 :            : }
    1606                 :            : EXPORT_SYMBOL_GPL(queue_work_node);
    1607                 :            : 
    1608                 :        732 : void delayed_work_timer_fn(struct timer_list *t)
    1609                 :            : {
    1610                 :        732 :         struct delayed_work *dwork = from_timer(dwork, t, timer);
    1611                 :            : 
    1612                 :            :         /* should have been called from irqsafe timer with irq already off */
    1613                 :        732 :         __queue_work(dwork->cpu, dwork->wq, &dwork->work);
    1614                 :        732 : }
    1615                 :            : EXPORT_SYMBOL(delayed_work_timer_fn);
    1616                 :            : 
    1617                 :       4985 : static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
    1618                 :            :                                 struct delayed_work *dwork, unsigned long delay)
    1619                 :            : {
    1620                 :       4985 :         struct timer_list *timer = &dwork->timer;
    1621                 :       4985 :         struct work_struct *work = &dwork->work;
    1622                 :            : 
    1623         [ -  + ]:       4985 :         WARN_ON_ONCE(!wq);
    1624         [ -  + ]:       4985 :         WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
    1625         [ -  + ]:       4985 :         WARN_ON_ONCE(timer_pending(timer));
    1626         [ -  + ]:       4985 :         WARN_ON_ONCE(!list_empty(&work->entry));
    1627                 :            : 
    1628                 :            :         /*
    1629                 :            :          * If @delay is 0, queue @dwork->work immediately.  This is for
    1630                 :            :          * both optimization and correctness.  The earliest @timer can
    1631                 :            :          * expire is on the closest next tick and delayed_work users depend
    1632                 :            :          * on that there's no such delay when @delay is 0.
    1633                 :            :          */
    1634         [ +  + ]:       4985 :         if (!delay) {
    1635                 :       3977 :                 __queue_work(cpu, wq, &dwork->work);
    1636                 :       3977 :                 return;
    1637                 :            :         }
    1638                 :            : 
    1639                 :       1008 :         dwork->wq = wq;
    1640                 :       1008 :         dwork->cpu = cpu;
    1641                 :       1008 :         timer->expires = jiffies + delay;
    1642                 :            : 
    1643         [ +  + ]:       1008 :         if (unlikely(cpu != WORK_CPU_UNBOUND))
    1644                 :         12 :                 add_timer_on(timer, cpu);
    1645                 :            :         else
    1646                 :        996 :                 add_timer(timer);
    1647                 :            : }
    1648                 :            : 
    1649                 :            : /**
    1650                 :            :  * queue_delayed_work_on - queue work on specific CPU after delay
    1651                 :            :  * @cpu: CPU number to execute work on
    1652                 :            :  * @wq: workqueue to use
    1653                 :            :  * @dwork: work to queue
    1654                 :            :  * @delay: number of jiffies to wait before queueing
    1655                 :            :  *
    1656                 :            :  * Return: %false if @work was already on a queue, %true otherwise.  If
    1657                 :            :  * @delay is zero and @dwork is idle, it will be scheduled for immediate
    1658                 :            :  * execution.
    1659                 :            :  */
    1660                 :       2041 : bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
    1661                 :            :                            struct delayed_work *dwork, unsigned long delay)
    1662                 :            : {
    1663                 :       2041 :         struct work_struct *work = &dwork->work;
    1664                 :       2041 :         bool ret = false;
    1665                 :       2041 :         unsigned long flags;
    1666                 :            : 
    1667                 :            :         /* read the comment in __queue_work() */
    1668                 :       2041 :         local_irq_save(flags);
    1669                 :            : 
    1670         [ +  + ]:       2041 :         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
    1671                 :       1975 :                 __queue_delayed_work(cpu, wq, dwork, delay);
    1672                 :       1975 :                 ret = true;
    1673                 :            :         }
    1674                 :            : 
    1675                 :       2041 :         local_irq_restore(flags);
    1676                 :       2041 :         return ret;
    1677                 :            : }
    1678                 :            : EXPORT_SYMBOL(queue_delayed_work_on);
    1679                 :            : 
    1680                 :            : /**
    1681                 :            :  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
    1682                 :            :  * @cpu: CPU number to execute work on
    1683                 :            :  * @wq: workqueue to use
    1684                 :            :  * @dwork: work to queue
    1685                 :            :  * @delay: number of jiffies to wait before queueing
    1686                 :            :  *
    1687                 :            :  * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
    1688                 :            :  * modify @dwork's timer so that it expires after @delay.  If @delay is
    1689                 :            :  * zero, @work is guaranteed to be scheduled immediately regardless of its
    1690                 :            :  * current state.
    1691                 :            :  *
    1692                 :            :  * Return: %false if @dwork was idle and queued, %true if @dwork was
    1693                 :            :  * pending and its timer was modified.
    1694                 :            :  *
    1695                 :            :  * This function is safe to call from any context including IRQ handler.
    1696                 :            :  * See try_to_grab_pending() for details.
    1697                 :            :  */
    1698                 :       3010 : bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
    1699                 :            :                          struct delayed_work *dwork, unsigned long delay)
    1700                 :            : {
    1701                 :       3010 :         unsigned long flags;
    1702                 :       3010 :         int ret;
    1703                 :            : 
    1704                 :       3010 :         do {
    1705                 :       3010 :                 ret = try_to_grab_pending(&dwork->work, true, &flags);
    1706         [ -  + ]:       3010 :         } while (unlikely(ret == -EAGAIN));
    1707                 :            : 
    1708         [ +  - ]:       3010 :         if (likely(ret >= 0)) {
    1709                 :       3010 :                 __queue_delayed_work(cpu, wq, dwork, delay);
    1710                 :       3010 :                 local_irq_restore(flags);
    1711                 :            :         }
    1712                 :            : 
    1713                 :            :         /* -ENOENT from try_to_grab_pending() becomes %true */
    1714                 :       3010 :         return ret;
    1715                 :            : }
    1716                 :            : EXPORT_SYMBOL_GPL(mod_delayed_work_on);
    1717                 :            : 
    1718                 :        550 : static void rcu_work_rcufn(struct rcu_head *rcu)
    1719                 :            : {
    1720                 :        550 :         struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
    1721                 :            : 
    1722                 :            :         /* read the comment in __queue_work() */
    1723                 :        550 :         local_irq_disable();
    1724                 :        550 :         __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
    1725                 :        550 :         local_irq_enable();
    1726                 :        550 : }
    1727                 :            : 
    1728                 :            : /**
    1729                 :            :  * queue_rcu_work - queue work after a RCU grace period
    1730                 :            :  * @wq: workqueue to use
    1731                 :            :  * @rwork: work to queue
    1732                 :            :  *
    1733                 :            :  * Return: %false if @rwork was already pending, %true otherwise.  Note
    1734                 :            :  * that a full RCU grace period is guaranteed only after a %true return.
    1735                 :            :  * While @rwork is guaranteed to be executed after a %false return, the
    1736                 :            :  * execution may happen before a full RCU grace period has passed.
    1737                 :            :  */
    1738                 :        550 : bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
    1739                 :            : {
    1740                 :        550 :         struct work_struct *work = &rwork->work;
    1741                 :            : 
    1742         [ +  - ]:        550 :         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
    1743                 :        550 :                 rwork->wq = wq;
    1744                 :        550 :                 call_rcu(&rwork->rcu, rcu_work_rcufn);
    1745                 :        550 :                 return true;
    1746                 :            :         }
    1747                 :            : 
    1748                 :            :         return false;
    1749                 :            : }
    1750                 :            : EXPORT_SYMBOL(queue_rcu_work);
    1751                 :            : 
    1752                 :            : /**
    1753                 :            :  * worker_enter_idle - enter idle state
    1754                 :            :  * @worker: worker which is entering idle state
    1755                 :            :  *
    1756                 :            :  * @worker is entering idle state.  Update stats and idle timer if
    1757                 :            :  * necessary.
    1758                 :            :  *
    1759                 :            :  * LOCKING:
    1760                 :            :  * spin_lock_irq(pool->lock).
    1761                 :            :  */
    1762                 :       8159 : static void worker_enter_idle(struct worker *worker)
    1763                 :            : {
    1764                 :       8159 :         struct worker_pool *pool = worker->pool;
    1765                 :            : 
    1766   [ -  +  +  - ]:       8159 :         if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
    1767   [ +  +  +  -  :      16318 :             WARN_ON_ONCE(!list_empty(&worker->entry) &&
          +  -  -  +  +  
                      - ]
    1768                 :            :                          (worker->hentry.next || worker->hentry.pprev)))
    1769                 :            :                 return;
    1770                 :            : 
    1771                 :            :         /* can't use worker_set_flags(), also called from create_worker() */
    1772                 :       8159 :         worker->flags |= WORKER_IDLE;
    1773                 :       8159 :         pool->nr_idle++;
    1774                 :       8159 :         worker->last_active = jiffies;
    1775                 :            : 
    1776                 :            :         /* idle_list is LIFO */
    1777         [ +  + ]:       8159 :         list_add(&worker->entry, &pool->idle_list);
    1778                 :            : 
    1779   [ +  +  +  +  :      16318 :         if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
                   +  + ]
    1780                 :         22 :                 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
    1781                 :            : 
    1782                 :            :         /*
    1783                 :            :          * Sanity check nr_running.  Because unbind_workers() releases
    1784                 :            :          * pool->lock between setting %WORKER_UNBOUND and zapping
    1785                 :            :          * nr_running, the warning may trigger spuriously.  Check iff
    1786                 :            :          * unbind is not in progress.
    1787                 :            :          */
    1788   [ +  +  +  +  :      14511 :         WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
             +  -  -  + ]
    1789                 :            :                      pool->nr_workers == pool->nr_idle &&
    1790                 :            :                      atomic_read(&pool->nr_running));
    1791                 :            : }
    1792                 :            : 
    1793                 :            : /**
    1794                 :            :  * worker_leave_idle - leave idle state
    1795                 :            :  * @worker: worker which is leaving idle state
    1796                 :            :  *
    1797                 :            :  * @worker is leaving idle state.  Update stats.
    1798                 :            :  *
    1799                 :            :  * LOCKING:
    1800                 :            :  * spin_lock_irq(pool->lock).
    1801                 :            :  */
    1802                 :       8027 : static void worker_leave_idle(struct worker *worker)
    1803                 :            : {
    1804                 :       8027 :         struct worker_pool *pool = worker->pool;
    1805                 :            : 
    1806   [ -  +  +  - ]:       8027 :         if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
    1807                 :            :                 return;
    1808                 :       8027 :         worker_clr_flags(worker, WORKER_IDLE);
    1809                 :       8027 :         pool->nr_idle--;
    1810                 :       8027 :         list_del_init(&worker->entry);
    1811                 :            : }
    1812                 :            : 
    1813                 :        363 : static struct worker *alloc_worker(int node)
    1814                 :            : {
    1815                 :        363 :         struct worker *worker;
    1816                 :            : 
    1817                 :        363 :         worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
    1818         [ +  - ]:        363 :         if (worker) {
    1819                 :        363 :                 INIT_LIST_HEAD(&worker->entry);
    1820                 :        363 :                 INIT_LIST_HEAD(&worker->scheduled);
    1821                 :        363 :                 INIT_LIST_HEAD(&worker->node);
    1822                 :            :                 /* on creation a worker is in !idle && prep state */
    1823                 :        363 :                 worker->flags = WORKER_PREP;
    1824                 :            :         }
    1825                 :        363 :         return worker;
    1826                 :            : }
    1827                 :            : 
    1828                 :            : /**
    1829                 :            :  * worker_attach_to_pool() - attach a worker to a pool
    1830                 :            :  * @worker: worker to be attached
    1831                 :            :  * @pool: the target pool
    1832                 :            :  *
    1833                 :            :  * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
    1834                 :            :  * cpu-binding of @worker are kept coordinated with the pool across
    1835                 :            :  * cpu-[un]hotplugs.
    1836                 :            :  */
    1837                 :        132 : static void worker_attach_to_pool(struct worker *worker,
    1838                 :            :                                    struct worker_pool *pool)
    1839                 :            : {
    1840                 :        132 :         mutex_lock(&wq_pool_attach_mutex);
    1841                 :            : 
    1842                 :            :         /*
    1843                 :            :          * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
    1844                 :            :          * online CPUs.  It'll be re-applied when any of the CPUs come up.
    1845                 :            :          */
    1846                 :        132 :         set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
    1847                 :            : 
    1848                 :            :         /*
    1849                 :            :          * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
    1850                 :            :          * stable across this function.  See the comments above the flag
    1851                 :            :          * definition for details.
    1852                 :            :          */
    1853         [ +  + ]:        132 :         if (pool->flags & POOL_DISASSOCIATED)
    1854                 :         66 :                 worker->flags |= WORKER_UNBOUND;
    1855                 :            : 
    1856                 :        132 :         list_add_tail(&worker->node, &pool->workers);
    1857                 :        132 :         worker->pool = pool;
    1858                 :            : 
    1859                 :        132 :         mutex_unlock(&wq_pool_attach_mutex);
    1860                 :        132 : }
    1861                 :            : 
    1862                 :            : /**
    1863                 :            :  * worker_detach_from_pool() - detach a worker from its pool
    1864                 :            :  * @worker: worker which is attached to its pool
    1865                 :            :  *
    1866                 :            :  * Undo the attaching which had been done in worker_attach_to_pool().  The
    1867                 :            :  * caller worker shouldn't access to the pool after detached except it has
    1868                 :            :  * other reference to the pool.
    1869                 :            :  */
    1870                 :          0 : static void worker_detach_from_pool(struct worker *worker)
    1871                 :            : {
    1872                 :          0 :         struct worker_pool *pool = worker->pool;
    1873                 :          0 :         struct completion *detach_completion = NULL;
    1874                 :            : 
    1875                 :          0 :         mutex_lock(&wq_pool_attach_mutex);
    1876                 :            : 
    1877         [ #  # ]:          0 :         list_del(&worker->node);
    1878                 :          0 :         worker->pool = NULL;
    1879                 :            : 
    1880         [ #  # ]:          0 :         if (list_empty(&pool->workers))
    1881                 :          0 :                 detach_completion = pool->detach_completion;
    1882                 :          0 :         mutex_unlock(&wq_pool_attach_mutex);
    1883                 :            : 
    1884                 :            :         /* clear leftover flags without pool->lock after it is detached */
    1885                 :          0 :         worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
    1886                 :            : 
    1887         [ #  # ]:          0 :         if (detach_completion)
    1888                 :          0 :                 complete(detach_completion);
    1889                 :          0 : }
    1890                 :            : 
    1891                 :            : /**
    1892                 :            :  * create_worker - create a new workqueue worker
    1893                 :            :  * @pool: pool the new worker will belong to
    1894                 :            :  *
    1895                 :            :  * Create and start a new worker which is attached to @pool.
    1896                 :            :  *
    1897                 :            :  * CONTEXT:
    1898                 :            :  * Might sleep.  Does GFP_KERNEL allocations.
    1899                 :            :  *
    1900                 :            :  * Return:
    1901                 :            :  * Pointer to the newly created worker.
    1902                 :            :  */
    1903                 :        132 : static struct worker *create_worker(struct worker_pool *pool)
    1904                 :            : {
    1905                 :        132 :         struct worker *worker = NULL;
    1906                 :        132 :         int id = -1;
    1907                 :        132 :         char id_buf[16];
    1908                 :            : 
    1909                 :            :         /* ID is needed to determine kthread name */
    1910                 :        132 :         id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
    1911         [ -  + ]:        132 :         if (id < 0)
    1912                 :          0 :                 goto fail;
    1913                 :            : 
    1914                 :        132 :         worker = alloc_worker(pool->node);
    1915         [ -  + ]:        132 :         if (!worker)
    1916                 :          0 :                 goto fail;
    1917                 :            : 
    1918                 :        132 :         worker->id = id;
    1919                 :            : 
    1920         [ +  + ]:        132 :         if (pool->cpu >= 0)
    1921                 :         66 :                 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
    1922         [ +  + ]:         66 :                          pool->attrs->nice < 0  ? "H" : "");
    1923                 :            :         else
    1924                 :         66 :                 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
    1925                 :            : 
    1926                 :        132 :         worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
    1927                 :            :                                               "kworker/%s", id_buf);
    1928         [ -  + ]:        132 :         if (IS_ERR(worker->task))
    1929                 :          0 :                 goto fail;
    1930                 :            : 
    1931                 :        132 :         set_user_nice(worker->task, pool->attrs->nice);
    1932                 :        132 :         kthread_bind_mask(worker->task, pool->attrs->cpumask);
    1933                 :            : 
    1934                 :            :         /* successful, attach the worker to the pool */
    1935                 :        132 :         worker_attach_to_pool(worker, pool);
    1936                 :            : 
    1937                 :            :         /* start the newly created worker */
    1938                 :        132 :         spin_lock_irq(&pool->lock);
    1939                 :        132 :         worker->pool->nr_workers++;
    1940                 :        132 :         worker_enter_idle(worker);
    1941                 :        132 :         wake_up_process(worker->task);
    1942                 :        132 :         spin_unlock_irq(&pool->lock);
    1943                 :            : 
    1944                 :        132 :         return worker;
    1945                 :            : 
    1946                 :          0 : fail:
    1947         [ #  # ]:          0 :         if (id >= 0)
    1948                 :          0 :                 ida_simple_remove(&pool->worker_ida, id);
    1949                 :          0 :         kfree(worker);
    1950                 :          0 :         return NULL;
    1951                 :            : }
    1952                 :            : 
    1953                 :            : /**
    1954                 :            :  * destroy_worker - destroy a workqueue worker
    1955                 :            :  * @worker: worker to be destroyed
    1956                 :            :  *
    1957                 :            :  * Destroy @worker and adjust @pool stats accordingly.  The worker should
    1958                 :            :  * be idle.
    1959                 :            :  *
    1960                 :            :  * CONTEXT:
    1961                 :            :  * spin_lock_irq(pool->lock).
    1962                 :            :  */
    1963                 :          0 : static void destroy_worker(struct worker *worker)
    1964                 :            : {
    1965                 :          0 :         struct worker_pool *pool = worker->pool;
    1966                 :            : 
    1967                 :          0 :         lockdep_assert_held(&pool->lock);
    1968                 :            : 
    1969                 :            :         /* sanity check frenzy */
    1970   [ #  #  #  # ]:          0 :         if (WARN_ON(worker->current_work) ||
    1971   [ #  #  #  # ]:          0 :             WARN_ON(!list_empty(&worker->scheduled)) ||
    1972   [ #  #  #  # ]:          0 :             WARN_ON(!(worker->flags & WORKER_IDLE)))
    1973                 :            :                 return;
    1974                 :            : 
    1975                 :          0 :         pool->nr_workers--;
    1976                 :          0 :         pool->nr_idle--;
    1977                 :            : 
    1978                 :          0 :         list_del_init(&worker->entry);
    1979                 :          0 :         worker->flags |= WORKER_DIE;
    1980                 :          0 :         wake_up_process(worker->task);
    1981                 :            : }
    1982                 :            : 
    1983                 :          0 : static void idle_worker_timeout(struct timer_list *t)
    1984                 :            : {
    1985                 :          0 :         struct worker_pool *pool = from_timer(pool, t, idle_timer);
    1986                 :            : 
    1987                 :          0 :         spin_lock_irq(&pool->lock);
    1988                 :            : 
    1989   [ #  #  #  # ]:          0 :         while (too_many_workers(pool)) {
    1990                 :          0 :                 struct worker *worker;
    1991                 :          0 :                 unsigned long expires;
    1992                 :            : 
    1993                 :            :                 /* idle_list is kept in LIFO order, check the last one */
    1994                 :          0 :                 worker = list_entry(pool->idle_list.prev, struct worker, entry);
    1995                 :          0 :                 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
    1996                 :            : 
    1997         [ #  # ]:          0 :                 if (time_before(jiffies, expires)) {
    1998                 :          0 :                         mod_timer(&pool->idle_timer, expires);
    1999                 :          0 :                         break;
    2000                 :            :                 }
    2001                 :            : 
    2002                 :          0 :                 destroy_worker(worker);
    2003                 :            :         }
    2004                 :            : 
    2005                 :          0 :         spin_unlock_irq(&pool->lock);
    2006                 :          0 : }
    2007                 :            : 
    2008                 :          0 : static void send_mayday(struct work_struct *work)
    2009                 :            : {
    2010                 :          0 :         struct pool_workqueue *pwq = get_work_pwq(work);
    2011                 :          0 :         struct workqueue_struct *wq = pwq->wq;
    2012                 :            : 
    2013                 :          0 :         lockdep_assert_held(&wq_mayday_lock);
    2014                 :            : 
    2015         [ #  # ]:          0 :         if (!wq->rescuer)
    2016                 :            :                 return;
    2017                 :            : 
    2018                 :            :         /* mayday mayday mayday */
    2019         [ #  # ]:          0 :         if (list_empty(&pwq->mayday_node)) {
    2020                 :            :                 /*
    2021                 :            :                  * If @pwq is for an unbound wq, its base ref may be put at
    2022                 :            :                  * any time due to an attribute change.  Pin @pwq until the
    2023                 :            :                  * rescuer is done with it.
    2024                 :            :                  */
    2025         [ #  # ]:          0 :                 get_pwq(pwq);
    2026                 :          0 :                 list_add_tail(&pwq->mayday_node, &wq->maydays);
    2027                 :          0 :                 wake_up_process(wq->rescuer->task);
    2028                 :            :         }
    2029                 :            : }
    2030                 :            : 
    2031                 :          0 : static void pool_mayday_timeout(struct timer_list *t)
    2032                 :            : {
    2033                 :          0 :         struct worker_pool *pool = from_timer(pool, t, mayday_timer);
    2034                 :          0 :         struct work_struct *work;
    2035                 :            : 
    2036                 :          0 :         spin_lock_irq(&pool->lock);
    2037                 :          0 :         spin_lock(&wq_mayday_lock);         /* for wq->maydays */
    2038                 :            : 
    2039         [ #  # ]:          0 :         if (need_to_create_worker(pool)) {
    2040                 :            :                 /*
    2041                 :            :                  * We've been trying to create a new worker but
    2042                 :            :                  * haven't been successful.  We might be hitting an
    2043                 :            :                  * allocation deadlock.  Send distress signals to
    2044                 :            :                  * rescuers.
    2045                 :            :                  */
    2046         [ #  # ]:          0 :                 list_for_each_entry(work, &pool->worklist, entry)
    2047                 :          0 :                         send_mayday(work);
    2048                 :            :         }
    2049                 :            : 
    2050                 :          0 :         spin_unlock(&wq_mayday_lock);
    2051                 :          0 :         spin_unlock_irq(&pool->lock);
    2052                 :            : 
    2053                 :          0 :         mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
    2054                 :          0 : }
    2055                 :            : 
    2056                 :            : /**
    2057                 :            :  * maybe_create_worker - create a new worker if necessary
    2058                 :            :  * @pool: pool to create a new worker for
    2059                 :            :  *
    2060                 :            :  * Create a new worker for @pool if necessary.  @pool is guaranteed to
    2061                 :            :  * have at least one idle worker on return from this function.  If
    2062                 :            :  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
    2063                 :            :  * sent to all rescuers with works scheduled on @pool to resolve
    2064                 :            :  * possible allocation deadlock.
    2065                 :            :  *
    2066                 :            :  * On return, need_to_create_worker() is guaranteed to be %false and
    2067                 :            :  * may_start_working() %true.
    2068                 :            :  *
    2069                 :            :  * LOCKING:
    2070                 :            :  * spin_lock_irq(pool->lock) which may be released and regrabbed
    2071                 :            :  * multiple times.  Does GFP_KERNEL allocations.  Called only from
    2072                 :            :  * manager.
    2073                 :            :  */
    2074                 :         88 : static void maybe_create_worker(struct worker_pool *pool)
    2075                 :            : __releases(&pool->lock)
    2076                 :            : __acquires(&pool->lock)
    2077                 :            : {
    2078                 :         88 : restart:
    2079                 :         88 :         spin_unlock_irq(&pool->lock);
    2080                 :            : 
    2081                 :            :         /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
    2082                 :         88 :         mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
    2083                 :            : 
    2084                 :         88 :         while (true) {
    2085   [ -  +  -  - ]:         88 :                 if (create_worker(pool) || !need_to_create_worker(pool))
    2086                 :            :                         break;
    2087                 :            : 
    2088                 :          0 :                 schedule_timeout_interruptible(CREATE_COOLDOWN);
    2089                 :            : 
    2090         [ #  # ]:          0 :                 if (!need_to_create_worker(pool))
    2091                 :            :                         break;
    2092                 :            :         }
    2093                 :            : 
    2094                 :         88 :         del_timer_sync(&pool->mayday_timer);
    2095                 :         88 :         spin_lock_irq(&pool->lock);
    2096                 :            :         /*
    2097                 :            :          * This is necessary even after a new worker was just successfully
    2098                 :            :          * created as @pool->lock was dropped and the new worker might have
    2099                 :            :          * already become busy.
    2100                 :            :          */
    2101         [ -  + ]:         88 :         if (need_to_create_worker(pool))
    2102                 :          0 :                 goto restart;
    2103                 :         88 : }
    2104                 :            : 
    2105                 :            : /**
    2106                 :            :  * manage_workers - manage worker pool
    2107                 :            :  * @worker: self
    2108                 :            :  *
    2109                 :            :  * Assume the manager role and manage the worker pool @worker belongs
    2110                 :            :  * to.  At any given time, there can be only zero or one manager per
    2111                 :            :  * pool.  The exclusion is handled automatically by this function.
    2112                 :            :  *
    2113                 :            :  * The caller can safely start processing works on false return.  On
    2114                 :            :  * true return, it's guaranteed that need_to_create_worker() is false
    2115                 :            :  * and may_start_working() is true.
    2116                 :            :  *
    2117                 :            :  * CONTEXT:
    2118                 :            :  * spin_lock_irq(pool->lock) which may be released and regrabbed
    2119                 :            :  * multiple times.  Does GFP_KERNEL allocations.
    2120                 :            :  *
    2121                 :            :  * Return:
    2122                 :            :  * %false if the pool doesn't need management and the caller can safely
    2123                 :            :  * start processing works, %true if management function was performed and
    2124                 :            :  * the conditions that the caller verified before calling the function may
    2125                 :            :  * no longer be true.
    2126                 :            :  */
    2127                 :         88 : static bool manage_workers(struct worker *worker)
    2128                 :            : {
    2129                 :         88 :         struct worker_pool *pool = worker->pool;
    2130                 :            : 
    2131         [ +  - ]:         88 :         if (pool->flags & POOL_MANAGER_ACTIVE)
    2132                 :            :                 return false;
    2133                 :            : 
    2134                 :         88 :         pool->flags |= POOL_MANAGER_ACTIVE;
    2135                 :         88 :         pool->manager = worker;
    2136                 :            : 
    2137                 :         88 :         maybe_create_worker(pool);
    2138                 :            : 
    2139                 :         88 :         pool->manager = NULL;
    2140                 :         88 :         pool->flags &= ~POOL_MANAGER_ACTIVE;
    2141                 :         88 :         wake_up(&wq_manager_wait);
    2142                 :         88 :         return true;
    2143                 :            : }
    2144                 :            : 
    2145                 :            : /**
    2146                 :            :  * process_one_work - process single work
    2147                 :            :  * @worker: self
    2148                 :            :  * @work: work to process
    2149                 :            :  *
    2150                 :            :  * Process @work.  This function contains all the logics necessary to
    2151                 :            :  * process a single work including synchronization against and
    2152                 :            :  * interaction with other workers on the same cpu, queueing and
    2153                 :            :  * flushing.  As long as context requirement is met, any worker can
    2154                 :            :  * call this function to process a work.
    2155                 :            :  *
    2156                 :            :  * CONTEXT:
    2157                 :            :  * spin_lock_irq(pool->lock) which is released and regrabbed.
    2158                 :            :  */
    2159                 :       9215 : static void process_one_work(struct worker *worker, struct work_struct *work)
    2160                 :            : __releases(&pool->lock)
    2161                 :            : __acquires(&pool->lock)
    2162                 :            : {
    2163                 :       9215 :         struct pool_workqueue *pwq = get_work_pwq(work);
    2164                 :       9215 :         struct worker_pool *pool = worker->pool;
    2165                 :       9215 :         bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
    2166                 :       9215 :         int work_color;
    2167                 :       9215 :         struct worker *collision;
    2168                 :            : #ifdef CONFIG_LOCKDEP
    2169                 :            :         /*
    2170                 :            :          * It is permissible to free the struct work_struct from
    2171                 :            :          * inside the function that is called from it, this we need to
    2172                 :            :          * take into account for lockdep too.  To avoid bogus "held
    2173                 :            :          * lock freed" warnings as well as problems when looking into
    2174                 :            :          * work->lockdep_map, make a copy and use that here.
    2175                 :            :          */
    2176                 :            :         struct lockdep_map lockdep_map;
    2177                 :            : 
    2178                 :            :         lockdep_copy_map(&lockdep_map, &work->lockdep_map);
    2179                 :            : #endif
    2180                 :            :         /* ensure we're on the correct CPU */
    2181   [ +  +  +  -  :      18430 :         WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
                   -  + ]
    2182                 :            :                      raw_smp_processor_id() != pool->cpu);
    2183                 :            : 
    2184                 :            :         /*
    2185                 :            :          * A single work shouldn't be executed concurrently by
    2186                 :            :          * multiple workers on a single cpu.  Check whether anyone is
    2187                 :            :          * already processing the work.  If so, defer the work to the
    2188                 :            :          * currently executing one.
    2189                 :            :          */
    2190         [ +  + ]:       9215 :         collision = find_worker_executing_work(pool, work);
    2191         [ -  + ]:       9215 :         if (unlikely(collision)) {
    2192                 :          0 :                 move_linked_works(work, &collision->scheduled, NULL);
    2193                 :          0 :                 return;
    2194                 :            :         }
    2195                 :            : 
    2196                 :            :         /* claim and dequeue */
    2197                 :       9215 :         debug_work_deactivate(work);
    2198         [ +  + ]:       9215 :         hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
    2199                 :       9215 :         worker->current_work = work;
    2200                 :       9215 :         worker->current_func = work->func;
    2201                 :       9215 :         worker->current_pwq = pwq;
    2202                 :       9215 :         work_color = get_work_color(work);
    2203                 :            : 
    2204                 :            :         /*
    2205                 :            :          * Record wq name for cmdline and debug reporting, may get
    2206                 :            :          * overridden through set_worker_desc().
    2207                 :            :          */
    2208                 :       9215 :         strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
    2209                 :            : 
    2210         [ -  + ]:       9215 :         list_del_init(&work->entry);
    2211                 :            : 
    2212                 :            :         /*
    2213                 :            :          * CPU intensive works don't participate in concurrency management.
    2214                 :            :          * They're the scheduler's responsibility.  This takes @worker out
    2215                 :            :          * of concurrency management and the next code block will chain
    2216                 :            :          * execution of the pending work items.
    2217                 :            :          */
    2218         [ -  + ]:       9215 :         if (unlikely(cpu_intensive))
    2219                 :          0 :                 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
    2220                 :            : 
    2221                 :            :         /*
    2222                 :            :          * Wake up another worker if necessary.  The condition is always
    2223                 :            :          * false for normal per-cpu workers since nr_running would always
    2224                 :            :          * be >= 1 at this point.  This is used to chain execution of the
    2225                 :            :          * pending work items for WORKER_NOT_RUNNING workers such as the
    2226                 :            :          * UNBOUND and CPU_INTENSIVE ones.
    2227                 :            :          */
    2228   [ +  +  +  + ]:      10106 :         if (need_more_worker(pool))
    2229         [ +  - ]:         25 :                 wake_up_worker(pool);
    2230                 :            : 
    2231                 :            :         /*
    2232                 :            :          * Record the last pool and clear PENDING which should be the last
    2233                 :            :          * update to @work.  Also, do this inside @pool->lock so that
    2234                 :            :          * PENDING and queued state changes happen together while IRQ is
    2235                 :            :          * disabled.
    2236                 :            :          */
    2237                 :       9215 :         set_work_pool_and_clear_pending(work, pool->id);
    2238                 :            : 
    2239                 :       9215 :         spin_unlock_irq(&pool->lock);
    2240                 :            : 
    2241                 :       9215 :         lock_map_acquire(&pwq->wq->lockdep_map);
    2242                 :       9215 :         lock_map_acquire(&lockdep_map);
    2243                 :            :         /*
    2244                 :            :          * Strictly speaking we should mark the invariant state without holding
    2245                 :            :          * any locks, that is, before these two lock_map_acquire()'s.
    2246                 :            :          *
    2247                 :            :          * However, that would result in:
    2248                 :            :          *
    2249                 :            :          *   A(W1)
    2250                 :            :          *   WFC(C)
    2251                 :            :          *              A(W1)
    2252                 :            :          *              C(C)
    2253                 :            :          *
    2254                 :            :          * Which would create W1->C->W1 dependencies, even though there is no
    2255                 :            :          * actual deadlock possible. There are two solutions, using a
    2256                 :            :          * read-recursive acquire on the work(queue) 'locks', but this will then
    2257                 :            :          * hit the lockdep limitation on recursive locks, or simply discard
    2258                 :            :          * these locks.
    2259                 :            :          *
    2260                 :            :          * AFAICT there is no possible deadlock scenario between the
    2261                 :            :          * flush_work() and complete() primitives (except for single-threaded
    2262                 :            :          * workqueues), so hiding them isn't a problem.
    2263                 :            :          */
    2264                 :       9215 :         lockdep_invariant_state(true);
    2265                 :       9215 :         trace_workqueue_execute_start(work);
    2266                 :       9215 :         worker->current_func(work);
    2267                 :            :         /*
    2268                 :            :          * While we must be careful to not use "work" after this, the trace
    2269                 :            :          * point will only record its address.
    2270                 :            :          */
    2271                 :       9215 :         trace_workqueue_execute_end(work, worker->current_func);
    2272                 :       9215 :         lock_map_release(&lockdep_map);
    2273                 :       9215 :         lock_map_release(&pwq->wq->lockdep_map);
    2274                 :            : 
    2275         [ -  + ]:       9215 :         if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
    2276                 :          0 :                 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
    2277                 :            :                        "     last function: %ps\n",
    2278                 :            :                        current->comm, preempt_count(), task_pid_nr(current),
    2279                 :            :                        worker->current_func);
    2280                 :          0 :                 debug_show_held_locks(current);
    2281                 :          0 :                 dump_stack();
    2282                 :            :         }
    2283                 :            : 
    2284                 :            :         /*
    2285                 :            :          * The following prevents a kworker from hogging CPU on !PREEMPTION
    2286                 :            :          * kernels, where a requeueing work item waiting for something to
    2287                 :            :          * happen could deadlock with stop_machine as such work item could
    2288                 :            :          * indefinitely requeue itself while all other CPUs are trapped in
    2289                 :            :          * stop_machine. At the same time, report a quiescent RCU state so
    2290                 :            :          * the same condition doesn't freeze RCU.
    2291                 :            :          */
    2292                 :       9215 :         cond_resched();
    2293                 :            : 
    2294                 :       9215 :         spin_lock_irq(&pool->lock);
    2295                 :            : 
    2296                 :            :         /* clear cpu intensive status */
    2297         [ -  + ]:       9215 :         if (unlikely(cpu_intensive))
    2298                 :          0 :                 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
    2299                 :            : 
    2300                 :            :         /* tag the worker for identification in schedule() */
    2301                 :       9215 :         worker->last_func = worker->current_func;
    2302                 :            : 
    2303                 :            :         /* we're done with it, release */
    2304         [ +  - ]:       9215 :         hash_del(&worker->hentry);
    2305                 :       9215 :         worker->current_work = NULL;
    2306                 :       9215 :         worker->current_func = NULL;
    2307                 :       9215 :         worker->current_pwq = NULL;
    2308                 :       9215 :         pwq_dec_nr_in_flight(pwq, work_color);
    2309                 :            : }
    2310                 :            : 
    2311                 :            : /**
    2312                 :            :  * process_scheduled_works - process scheduled works
    2313                 :            :  * @worker: self
    2314                 :            :  *
    2315                 :            :  * Process all scheduled works.  Please note that the scheduled list
    2316                 :            :  * may change while processing a work, so this function repeatedly
    2317                 :            :  * fetches a work from the top and executes it.
    2318                 :            :  *
    2319                 :            :  * CONTEXT:
    2320                 :            :  * spin_lock_irq(pool->lock) which may be released and regrabbed
    2321                 :            :  * multiple times.
    2322                 :            :  */
    2323                 :            : static void process_scheduled_works(struct worker *worker)
    2324                 :            : {
    2325   [ -  -  +  +  :        101 :         while (!list_empty(&worker->scheduled)) {
                   +  + ]
    2326                 :         63 :                 struct work_struct *work = list_first_entry(&worker->scheduled,
    2327                 :            :                                                 struct work_struct, entry);
    2328                 :         63 :                 process_one_work(worker, work);
    2329                 :            :         }
    2330                 :            : }
    2331                 :            : 
    2332                 :        374 : static void set_pf_worker(bool val)
    2333                 :            : {
    2334                 :        374 :         mutex_lock(&wq_pool_attach_mutex);
    2335         [ +  + ]:        374 :         if (val)
    2336                 :        363 :                 current->flags |= PF_WQ_WORKER;
    2337                 :            :         else
    2338                 :         11 :                 current->flags &= ~PF_WQ_WORKER;
    2339                 :        374 :         mutex_unlock(&wq_pool_attach_mutex);
    2340                 :        374 : }
    2341                 :            : 
    2342                 :            : /**
    2343                 :            :  * worker_thread - the worker thread function
    2344                 :            :  * @__worker: self
    2345                 :            :  *
    2346                 :            :  * The worker thread function.  All workers belong to a worker_pool -
    2347                 :            :  * either a per-cpu one or dynamic unbound one.  These workers process all
    2348                 :            :  * work items regardless of their specific target workqueue.  The only
    2349                 :            :  * exception is work items which belong to workqueues with a rescuer which
    2350                 :            :  * will be explained in rescuer_thread().
    2351                 :            :  *
    2352                 :            :  * Return: 0
    2353                 :            :  */
    2354                 :        132 : static int worker_thread(void *__worker)
    2355                 :            : {
    2356                 :        132 :         struct worker *worker = __worker;
    2357                 :        132 :         struct worker_pool *pool = worker->pool;
    2358                 :            : 
    2359                 :            :         /* tell the scheduler that this is a workqueue worker */
    2360                 :        132 :         set_pf_worker(true);
    2361                 :       8027 : woke_up:
    2362                 :       8027 :         spin_lock_irq(&pool->lock);
    2363                 :            : 
    2364                 :            :         /* am I supposed to die? */
    2365         [ -  + ]:       8027 :         if (unlikely(worker->flags & WORKER_DIE)) {
    2366                 :          0 :                 spin_unlock_irq(&pool->lock);
    2367         [ #  # ]:          0 :                 WARN_ON_ONCE(!list_empty(&worker->entry));
    2368                 :          0 :                 set_pf_worker(false);
    2369                 :            : 
    2370                 :          0 :                 set_task_comm(worker->task, "kworker/dying");
    2371                 :          0 :                 ida_simple_remove(&pool->worker_ida, worker->id);
    2372                 :          0 :                 worker_detach_from_pool(worker);
    2373                 :          0 :                 kfree(worker);
    2374                 :          0 :                 return 0;
    2375                 :            :         }
    2376                 :            : 
    2377                 :       8027 :         worker_leave_idle(worker);
    2378                 :       8115 : recheck:
    2379                 :            :         /* no more worker necessary? */
    2380   [ +  +  +  + ]:      15702 :         if (!need_more_worker(pool))
    2381                 :        528 :                 goto sleep;
    2382                 :            : 
    2383                 :            :         /* do we need to manage? */
    2384   [ +  +  +  - ]:       7587 :         if (unlikely(!may_start_working(pool)) && manage_workers(worker))
    2385                 :         88 :                 goto recheck;
    2386                 :            : 
    2387                 :            :         /*
    2388                 :            :          * ->scheduled list can only be filled while a worker is
    2389                 :            :          * preparing to process a work or actually processing it.
    2390                 :            :          * Make sure nobody diddled with it while I was sleeping.
    2391                 :            :          */
    2392         [ -  + ]:       7499 :         WARN_ON_ONCE(!list_empty(&worker->scheduled));
    2393                 :            : 
    2394                 :            :         /*
    2395                 :            :          * Finish PREP stage.  We're guaranteed to have at least one idle
    2396                 :            :          * worker or that someone else has already assumed the manager
    2397                 :            :          * role.  This is where @worker starts participating in concurrency
    2398                 :            :          * management if applicable and concurrency management is restored
    2399                 :            :          * after being rebound.  See rebind_workers() for details.
    2400                 :            :          */
    2401                 :       7499 :         worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
    2402                 :            : 
    2403                 :       9177 :         do {
    2404                 :       9177 :                 struct work_struct *work =
    2405                 :       9177 :                         list_first_entry(&pool->worklist,
    2406                 :            :                                          struct work_struct, entry);
    2407                 :            : 
    2408                 :       9177 :                 pool->watchdog_ts = jiffies;
    2409                 :            : 
    2410         [ +  + ]:       9177 :                 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
    2411                 :            :                         /* optimization path, not strictly necessary */
    2412                 :       9152 :                         process_one_work(worker, work);
    2413         [ +  + ]:       9152 :                         if (unlikely(!list_empty(&worker->scheduled)))
    2414                 :            :                                 process_scheduled_works(worker);
    2415                 :            :                 } else {
    2416                 :         25 :                         move_linked_works(work, &worker->scheduled, NULL);
    2417                 :            :                         process_scheduled_works(worker);
    2418                 :            :                 }
    2419   [ +  +  +  + ]:      10855 :         } while (keep_working(pool));
    2420                 :            : 
    2421                 :       7499 :         worker_set_flags(worker, WORKER_PREP);
    2422                 :       8027 : sleep:
    2423                 :            :         /*
    2424                 :            :          * pool->lock is held and there's no work to process and no need to
    2425                 :            :          * manage, sleep.  Workers are woken up only while holding
    2426                 :            :          * pool->lock or from local cpu, so setting the current state
    2427                 :            :          * before releasing pool->lock is enough to prevent losing any
    2428                 :            :          * event.
    2429                 :            :          */
    2430                 :       8027 :         worker_enter_idle(worker);
    2431                 :       8027 :         __set_current_state(TASK_IDLE);
    2432                 :       8027 :         spin_unlock_irq(&pool->lock);
    2433                 :       8027 :         schedule();
    2434                 :       7895 :         goto woke_up;
    2435                 :            : }
    2436                 :            : 
    2437                 :            : /**
    2438                 :            :  * rescuer_thread - the rescuer thread function
    2439                 :            :  * @__rescuer: self
    2440                 :            :  *
    2441                 :            :  * Workqueue rescuer thread function.  There's one rescuer for each
    2442                 :            :  * workqueue which has WQ_MEM_RECLAIM set.
    2443                 :            :  *
    2444                 :            :  * Regular work processing on a pool may block trying to create a new
    2445                 :            :  * worker which uses GFP_KERNEL allocation which has slight chance of
    2446                 :            :  * developing into deadlock if some works currently on the same queue
    2447                 :            :  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
    2448                 :            :  * the problem rescuer solves.
    2449                 :            :  *
    2450                 :            :  * When such condition is possible, the pool summons rescuers of all
    2451                 :            :  * workqueues which have works queued on the pool and let them process
    2452                 :            :  * those works so that forward progress can be guaranteed.
    2453                 :            :  *
    2454                 :            :  * This should happen rarely.
    2455                 :            :  *
    2456                 :            :  * Return: 0
    2457                 :            :  */
    2458                 :        231 : static int rescuer_thread(void *__rescuer)
    2459                 :            : {
    2460                 :        231 :         struct worker *rescuer = __rescuer;
    2461                 :        231 :         struct workqueue_struct *wq = rescuer->rescue_wq;
    2462                 :        231 :         struct list_head *scheduled = &rescuer->scheduled;
    2463                 :        231 :         bool should_stop;
    2464                 :            : 
    2465                 :        231 :         set_user_nice(current, RESCUER_NICE_LEVEL);
    2466                 :            : 
    2467                 :            :         /*
    2468                 :            :          * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
    2469                 :            :          * doesn't participate in concurrency management.
    2470                 :            :          */
    2471                 :        231 :         set_pf_worker(true);
    2472                 :        242 : repeat:
    2473                 :        242 :         set_current_state(TASK_IDLE);
    2474                 :            : 
    2475                 :            :         /*
    2476                 :            :          * By the time the rescuer is requested to stop, the workqueue
    2477                 :            :          * shouldn't have any work pending, but @wq->maydays may still have
    2478                 :            :          * pwq(s) queued.  This can happen by non-rescuer workers consuming
    2479                 :            :          * all the work items before the rescuer got to them.  Go through
    2480                 :            :          * @wq->maydays processing before acting on should_stop so that the
    2481                 :            :          * list is always empty on exit.
    2482                 :            :          */
    2483                 :        242 :         should_stop = kthread_should_stop();
    2484                 :            : 
    2485                 :            :         /* see whether any pwq is asking for help */
    2486                 :        242 :         spin_lock_irq(&wq_mayday_lock);
    2487                 :            : 
    2488         [ -  + ]:        242 :         while (!list_empty(&wq->maydays)) {
    2489                 :          0 :                 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
    2490                 :            :                                         struct pool_workqueue, mayday_node);
    2491                 :          0 :                 struct worker_pool *pool = pwq->pool;
    2492                 :          0 :                 struct work_struct *work, *n;
    2493                 :          0 :                 bool first = true;
    2494                 :            : 
    2495                 :          0 :                 __set_current_state(TASK_RUNNING);
    2496                 :          0 :                 list_del_init(&pwq->mayday_node);
    2497                 :            : 
    2498                 :          0 :                 spin_unlock_irq(&wq_mayday_lock);
    2499                 :            : 
    2500                 :          0 :                 worker_attach_to_pool(rescuer, pool);
    2501                 :            : 
    2502                 :          0 :                 spin_lock_irq(&pool->lock);
    2503                 :            : 
    2504                 :            :                 /*
    2505                 :            :                  * Slurp in all works issued via this workqueue and
    2506                 :            :                  * process'em.
    2507                 :            :                  */
    2508         [ #  # ]:          0 :                 WARN_ON_ONCE(!list_empty(scheduled));
    2509         [ #  # ]:          0 :                 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
    2510         [ #  # ]:          0 :                         if (get_work_pwq(work) == pwq) {
    2511         [ #  # ]:          0 :                                 if (first)
    2512                 :          0 :                                         pool->watchdog_ts = jiffies;
    2513                 :          0 :                                 move_linked_works(work, scheduled, &n);
    2514                 :            :                         }
    2515                 :          0 :                         first = false;
    2516                 :            :                 }
    2517                 :            : 
    2518         [ #  # ]:          0 :                 if (!list_empty(scheduled)) {
    2519                 :            :                         process_scheduled_works(rescuer);
    2520                 :            : 
    2521                 :            :                         /*
    2522                 :            :                          * The above execution of rescued work items could
    2523                 :            :                          * have created more to rescue through
    2524                 :            :                          * pwq_activate_first_delayed() or chained
    2525                 :            :                          * queueing.  Let's put @pwq back on mayday list so
    2526                 :            :                          * that such back-to-back work items, which may be
    2527                 :            :                          * being used to relieve memory pressure, don't
    2528                 :            :                          * incur MAYDAY_INTERVAL delay inbetween.
    2529                 :            :                          */
    2530         [ #  # ]:          0 :                         if (need_to_create_worker(pool)) {
    2531                 :          0 :                                 spin_lock(&wq_mayday_lock);
    2532                 :            :                                 /*
    2533                 :            :                                  * Queue iff we aren't racing destruction
    2534                 :            :                                  * and somebody else hasn't queued it already.
    2535                 :            :                                  */
    2536   [ #  #  #  # ]:          0 :                                 if (wq->rescuer && list_empty(&pwq->mayday_node)) {
    2537         [ #  # ]:          0 :                                         get_pwq(pwq);
    2538                 :          0 :                                         list_add_tail(&pwq->mayday_node, &wq->maydays);
    2539                 :            :                                 }
    2540                 :          0 :                                 spin_unlock(&wq_mayday_lock);
    2541                 :            :                         }
    2542                 :            :                 }
    2543                 :            : 
    2544                 :            :                 /*
    2545                 :            :                  * Put the reference grabbed by send_mayday().  @pool won't
    2546                 :            :                  * go away while we're still attached to it.
    2547                 :            :                  */
    2548                 :          0 :                 put_pwq(pwq);
    2549                 :            : 
    2550                 :            :                 /*
    2551                 :            :                  * Leave this pool.  If need_more_worker() is %true, notify a
    2552                 :            :                  * regular worker; otherwise, we end up with 0 concurrency
    2553                 :            :                  * and stalling the execution.
    2554                 :            :                  */
    2555   [ #  #  #  # ]:          0 :                 if (need_more_worker(pool))
    2556         [ #  # ]:          0 :                         wake_up_worker(pool);
    2557                 :            : 
    2558                 :          0 :                 spin_unlock_irq(&pool->lock);
    2559                 :            : 
    2560                 :          0 :                 worker_detach_from_pool(rescuer);
    2561                 :            : 
    2562                 :          0 :                 spin_lock_irq(&wq_mayday_lock);
    2563                 :            :         }
    2564                 :            : 
    2565                 :        242 :         spin_unlock_irq(&wq_mayday_lock);
    2566                 :            : 
    2567         [ +  + ]:        242 :         if (should_stop) {
    2568                 :         11 :                 __set_current_state(TASK_RUNNING);
    2569                 :         11 :                 set_pf_worker(false);
    2570                 :         11 :                 return 0;
    2571                 :            :         }
    2572                 :            : 
    2573                 :            :         /* rescuers should never participate in concurrency management */
    2574         [ -  + ]:        231 :         WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
    2575                 :        231 :         schedule();
    2576                 :         11 :         goto repeat;
    2577                 :            : }
    2578                 :            : 
    2579                 :            : /**
    2580                 :            :  * check_flush_dependency - check for flush dependency sanity
    2581                 :            :  * @target_wq: workqueue being flushed
    2582                 :            :  * @target_work: work item being flushed (NULL for workqueue flushes)
    2583                 :            :  *
    2584                 :            :  * %current is trying to flush the whole @target_wq or @target_work on it.
    2585                 :            :  * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
    2586                 :            :  * reclaiming memory or running on a workqueue which doesn't have
    2587                 :            :  * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
    2588                 :            :  * a deadlock.
    2589                 :            :  */
    2590                 :         38 : static void check_flush_dependency(struct workqueue_struct *target_wq,
    2591                 :            :                                    struct work_struct *target_work)
    2592                 :            : {
    2593         [ +  - ]:         38 :         work_func_t target_func = target_work ? target_work->func : NULL;
    2594                 :         38 :         struct worker *worker;
    2595                 :            : 
    2596         [ +  + ]:         38 :         if (target_wq->flags & WQ_MEM_RECLAIM)
    2597                 :            :                 return;
    2598                 :            : 
    2599                 :         34 :         worker = current_wq_worker();
    2600                 :            : 
    2601   [ -  +  -  - ]:         34 :         WARN_ONCE(current->flags & PF_MEMALLOC,
    2602                 :            :                   "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
    2603                 :            :                   current->pid, current->comm, target_wq->name, target_func);
    2604   [ -  +  -  -  :         68 :         WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
             -  +  -  - ]
    2605                 :            :                               (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
    2606                 :            :                   "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
    2607                 :            :                   worker->current_pwq->wq->name, worker->current_func,
    2608                 :            :                   target_wq->name, target_func);
    2609                 :            : }
    2610                 :            : 
    2611                 :            : struct wq_barrier {
    2612                 :            :         struct work_struct      work;
    2613                 :            :         struct completion       done;
    2614                 :            :         struct task_struct      *task;  /* purely informational */
    2615                 :            : };
    2616                 :            : 
    2617                 :         38 : static void wq_barrier_func(struct work_struct *work)
    2618                 :            : {
    2619                 :         38 :         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
    2620                 :         38 :         complete(&barr->done);
    2621                 :         38 : }
    2622                 :            : 
    2623                 :            : /**
    2624                 :            :  * insert_wq_barrier - insert a barrier work
    2625                 :            :  * @pwq: pwq to insert barrier into
    2626                 :            :  * @barr: wq_barrier to insert
    2627                 :            :  * @target: target work to attach @barr to
    2628                 :            :  * @worker: worker currently executing @target, NULL if @target is not executing
    2629                 :            :  *
    2630                 :            :  * @barr is linked to @target such that @barr is completed only after
    2631                 :            :  * @target finishes execution.  Please note that the ordering
    2632                 :            :  * guarantee is observed only with respect to @target and on the local
    2633                 :            :  * cpu.
    2634                 :            :  *
    2635                 :            :  * Currently, a queued barrier can't be canceled.  This is because
    2636                 :            :  * try_to_grab_pending() can't determine whether the work to be
    2637                 :            :  * grabbed is at the head of the queue and thus can't clear LINKED
    2638                 :            :  * flag of the previous work while there must be a valid next work
    2639                 :            :  * after a work with LINKED flag set.
    2640                 :            :  *
    2641                 :            :  * Note that when @worker is non-NULL, @target may be modified
    2642                 :            :  * underneath us, so we can't reliably determine pwq from @target.
    2643                 :            :  *
    2644                 :            :  * CONTEXT:
    2645                 :            :  * spin_lock_irq(pool->lock).
    2646                 :            :  */
    2647                 :         38 : static void insert_wq_barrier(struct pool_workqueue *pwq,
    2648                 :            :                               struct wq_barrier *barr,
    2649                 :            :                               struct work_struct *target, struct worker *worker)
    2650                 :            : {
    2651                 :         38 :         struct list_head *head;
    2652                 :         38 :         unsigned int linked = 0;
    2653                 :            : 
    2654                 :            :         /*
    2655                 :            :          * debugobject calls are safe here even with pool->lock locked
    2656                 :            :          * as we know for sure that this will not trigger any of the
    2657                 :            :          * checks and call back into the fixup functions where we
    2658                 :            :          * might deadlock.
    2659                 :            :          */
    2660                 :         38 :         INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
    2661                 :         38 :         __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
    2662                 :            : 
    2663                 :         38 :         init_completion_map(&barr->done, &target->lockdep_map);
    2664                 :            : 
    2665         [ +  + ]:         38 :         barr->task = current;
    2666                 :            : 
    2667                 :            :         /*
    2668                 :            :          * If @target is currently being executed, schedule the
    2669                 :            :          * barrier to the worker; otherwise, put it after @target.
    2670                 :            :          */
    2671         [ +  + ]:         38 :         if (worker)
    2672                 :         13 :                 head = worker->scheduled.next;
    2673                 :            :         else {
    2674                 :         25 :                 unsigned long *bits = work_data_bits(target);
    2675                 :            : 
    2676                 :         25 :                 head = target->entry.next;
    2677                 :            :                 /* there can already be other linked works, inherit and set */
    2678                 :         25 :                 linked = *bits & WORK_STRUCT_LINKED;
    2679                 :         25 :                 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
    2680                 :            :         }
    2681                 :            : 
    2682                 :         38 :         debug_work_activate(&barr->work);
    2683                 :         38 :         insert_work(pwq, &barr->work, head,
    2684                 :            :                     work_color_to_flags(WORK_NO_COLOR) | linked);
    2685                 :         38 : }
    2686                 :            : 
    2687                 :            : /**
    2688                 :            :  * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
    2689                 :            :  * @wq: workqueue being flushed
    2690                 :            :  * @flush_color: new flush color, < 0 for no-op
    2691                 :            :  * @work_color: new work color, < 0 for no-op
    2692                 :            :  *
    2693                 :            :  * Prepare pwqs for workqueue flushing.
    2694                 :            :  *
    2695                 :            :  * If @flush_color is non-negative, flush_color on all pwqs should be
    2696                 :            :  * -1.  If no pwq has in-flight commands at the specified color, all
    2697                 :            :  * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
    2698                 :            :  * has in flight commands, its pwq->flush_color is set to
    2699                 :            :  * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
    2700                 :            :  * wakeup logic is armed and %true is returned.
    2701                 :            :  *
    2702                 :            :  * The caller should have initialized @wq->first_flusher prior to
    2703                 :            :  * calling this function with non-negative @flush_color.  If
    2704                 :            :  * @flush_color is negative, no flush color update is done and %false
    2705                 :            :  * is returned.
    2706                 :            :  *
    2707                 :            :  * If @work_color is non-negative, all pwqs should have the same
    2708                 :            :  * work_color which is previous to @work_color and all will be
    2709                 :            :  * advanced to @work_color.
    2710                 :            :  *
    2711                 :            :  * CONTEXT:
    2712                 :            :  * mutex_lock(wq->mutex).
    2713                 :            :  *
    2714                 :            :  * Return:
    2715                 :            :  * %true if @flush_color >= 0 and there's something to flush.  %false
    2716                 :            :  * otherwise.
    2717                 :            :  */
    2718                 :       1892 : static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
    2719                 :            :                                       int flush_color, int work_color)
    2720                 :            : {
    2721                 :       1892 :         bool wait = false;
    2722                 :       1892 :         struct pool_workqueue *pwq;
    2723                 :            : 
    2724         [ +  - ]:       1892 :         if (flush_color >= 0) {
    2725         [ -  + ]:       1892 :                 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
    2726                 :       1892 :                 atomic_set(&wq->nr_pwqs_to_flush, 1);
    2727                 :            :         }
    2728                 :            : 
    2729         [ +  + ]:       3784 :         for_each_pwq(pwq, wq) {
    2730                 :       1892 :                 struct worker_pool *pool = pwq->pool;
    2731                 :            : 
    2732                 :       1892 :                 spin_lock_irq(&pool->lock);
    2733                 :            : 
    2734         [ +  - ]:       1892 :                 if (flush_color >= 0) {
    2735         [ -  + ]:       1892 :                         WARN_ON_ONCE(pwq->flush_color != -1);
    2736                 :            : 
    2737         [ -  + ]:       1892 :                         if (pwq->nr_in_flight[flush_color]) {
    2738                 :          0 :                                 pwq->flush_color = flush_color;
    2739                 :          0 :                                 atomic_inc(&wq->nr_pwqs_to_flush);
    2740                 :          0 :                                 wait = true;
    2741                 :            :                         }
    2742                 :            :                 }
    2743                 :            : 
    2744         [ +  - ]:       1892 :                 if (work_color >= 0) {
    2745         [ -  + ]:       1892 :                         WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
    2746                 :       1892 :                         pwq->work_color = work_color;
    2747                 :            :                 }
    2748                 :            : 
    2749                 :       1892 :                 spin_unlock_irq(&pool->lock);
    2750                 :            :         }
    2751                 :            : 
    2752   [ +  -  +  - ]:       1892 :         if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
    2753                 :       1892 :                 complete(&wq->first_flusher->done);
    2754                 :            : 
    2755                 :       1892 :         return wait;
    2756                 :            : }
    2757                 :            : 
    2758                 :            : /**
    2759                 :            :  * flush_workqueue - ensure that any scheduled work has run to completion.
    2760                 :            :  * @wq: workqueue to flush
    2761                 :            :  *
    2762                 :            :  * This function sleeps until all work items which were queued on entry
    2763                 :            :  * have finished execution, but it is not livelocked by new incoming ones.
    2764                 :            :  */
    2765                 :       1892 : void flush_workqueue(struct workqueue_struct *wq)
    2766                 :            : {
    2767                 :       3784 :         struct wq_flusher this_flusher = {
    2768                 :            :                 .list = LIST_HEAD_INIT(this_flusher.list),
    2769                 :            :                 .flush_color = -1,
    2770                 :       1892 :                 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
    2771                 :            :         };
    2772                 :       1892 :         int next_color;
    2773                 :            : 
    2774   [ -  +  +  - ]:       1892 :         if (WARN_ON(!wq_online))
    2775                 :          0 :                 return;
    2776                 :            : 
    2777                 :       1892 :         lock_map_acquire(&wq->lockdep_map);
    2778                 :       1892 :         lock_map_release(&wq->lockdep_map);
    2779                 :            : 
    2780                 :       1892 :         mutex_lock(&wq->mutex);
    2781                 :            : 
    2782                 :            :         /*
    2783                 :            :          * Start-to-wait phase
    2784                 :            :          */
    2785                 :       1892 :         next_color = work_next_color(wq->work_color);
    2786                 :            : 
    2787         [ +  - ]:       1892 :         if (next_color != wq->flush_color) {
    2788                 :            :                 /*
    2789                 :            :                  * Color space is not full.  The current work_color
    2790                 :            :                  * becomes our flush_color and work_color is advanced
    2791                 :            :                  * by one.
    2792                 :            :                  */
    2793         [ -  + ]:       1892 :                 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
    2794                 :       1892 :                 this_flusher.flush_color = wq->work_color;
    2795                 :       1892 :                 wq->work_color = next_color;
    2796                 :            : 
    2797         [ +  - ]:       1892 :                 if (!wq->first_flusher) {
    2798                 :            :                         /* no flush in progress, become the first flusher */
    2799         [ -  + ]:       1892 :                         WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
    2800                 :            : 
    2801                 :       1892 :                         wq->first_flusher = &this_flusher;
    2802                 :            : 
    2803         [ +  - ]:       1892 :                         if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
    2804                 :            :                                                        wq->work_color)) {
    2805                 :            :                                 /* nothing to flush, done */
    2806                 :       1892 :                                 wq->flush_color = next_color;
    2807                 :       1892 :                                 wq->first_flusher = NULL;
    2808                 :       1892 :                                 goto out_unlock;
    2809                 :            :                         }
    2810                 :            :                 } else {
    2811                 :            :                         /* wait in queue */
    2812         [ #  # ]:          0 :                         WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
    2813                 :          0 :                         list_add_tail(&this_flusher.list, &wq->flusher_queue);
    2814                 :          0 :                         flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
    2815                 :            :                 }
    2816                 :            :         } else {
    2817                 :            :                 /*
    2818                 :            :                  * Oops, color space is full, wait on overflow queue.
    2819                 :            :                  * The next flush completion will assign us
    2820                 :            :                  * flush_color and transfer to flusher_queue.
    2821                 :            :                  */
    2822                 :          0 :                 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
    2823                 :            :         }
    2824                 :            : 
    2825                 :          0 :         check_flush_dependency(wq, NULL);
    2826                 :            : 
    2827                 :          0 :         mutex_unlock(&wq->mutex);
    2828                 :            : 
    2829                 :          0 :         wait_for_completion(&this_flusher.done);
    2830                 :            : 
    2831                 :            :         /*
    2832                 :            :          * Wake-up-and-cascade phase
    2833                 :            :          *
    2834                 :            :          * First flushers are responsible for cascading flushes and
    2835                 :            :          * handling overflow.  Non-first flushers can simply return.
    2836                 :            :          */
    2837         [ #  # ]:          0 :         if (wq->first_flusher != &this_flusher)
    2838                 :            :                 return;
    2839                 :            : 
    2840                 :          0 :         mutex_lock(&wq->mutex);
    2841                 :            : 
    2842                 :            :         /* we might have raced, check again with mutex held */
    2843         [ #  # ]:          0 :         if (wq->first_flusher != &this_flusher)
    2844                 :          0 :                 goto out_unlock;
    2845                 :            : 
    2846                 :          0 :         wq->first_flusher = NULL;
    2847                 :            : 
    2848         [ #  # ]:          0 :         WARN_ON_ONCE(!list_empty(&this_flusher.list));
    2849         [ #  # ]:          0 :         WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
    2850                 :            : 
    2851                 :          0 :         while (true) {
    2852                 :          0 :                 struct wq_flusher *next, *tmp;
    2853                 :            : 
    2854                 :            :                 /* complete all the flushers sharing the current flush color */
    2855         [ #  # ]:          0 :                 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
    2856         [ #  # ]:          0 :                         if (next->flush_color != wq->flush_color)
    2857                 :            :                                 break;
    2858                 :          0 :                         list_del_init(&next->list);
    2859                 :          0 :                         complete(&next->done);
    2860                 :            :                 }
    2861                 :            : 
    2862   [ #  #  #  #  :          0 :                 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
                   #  # ]
    2863                 :            :                              wq->flush_color != work_next_color(wq->work_color));
    2864                 :            : 
    2865                 :            :                 /* this flush_color is finished, advance by one */
    2866                 :          0 :                 wq->flush_color = work_next_color(wq->flush_color);
    2867                 :            : 
    2868                 :            :                 /* one color has been freed, handle overflow queue */
    2869         [ #  # ]:          0 :                 if (!list_empty(&wq->flusher_overflow)) {
    2870                 :            :                         /*
    2871                 :            :                          * Assign the same color to all overflowed
    2872                 :            :                          * flushers, advance work_color and append to
    2873                 :            :                          * flusher_queue.  This is the start-to-wait
    2874                 :            :                          * phase for these overflowed flushers.
    2875                 :            :                          */
    2876         [ #  # ]:          0 :                         list_for_each_entry(tmp, &wq->flusher_overflow, list)
    2877                 :          0 :                                 tmp->flush_color = wq->work_color;
    2878                 :            : 
    2879                 :          0 :                         wq->work_color = work_next_color(wq->work_color);
    2880                 :            : 
    2881         [ #  # ]:          0 :                         list_splice_tail_init(&wq->flusher_overflow,
    2882                 :            :                                               &wq->flusher_queue);
    2883                 :          0 :                         flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
    2884                 :            :                 }
    2885                 :            : 
    2886         [ #  # ]:          0 :                 if (list_empty(&wq->flusher_queue)) {
    2887         [ #  # ]:          0 :                         WARN_ON_ONCE(wq->flush_color != wq->work_color);
    2888                 :            :                         break;
    2889                 :            :                 }
    2890                 :            : 
    2891                 :            :                 /*
    2892                 :            :                  * Need to flush more colors.  Make the next flusher
    2893                 :            :                  * the new first flusher and arm pwqs.
    2894                 :            :                  */
    2895         [ #  # ]:          0 :                 WARN_ON_ONCE(wq->flush_color == wq->work_color);
    2896         [ #  # ]:          0 :                 WARN_ON_ONCE(wq->flush_color != next->flush_color);
    2897                 :            : 
    2898                 :          0 :                 list_del_init(&next->list);
    2899                 :          0 :                 wq->first_flusher = next;
    2900                 :            : 
    2901         [ #  # ]:          0 :                 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
    2902                 :            :                         break;
    2903                 :            : 
    2904                 :            :                 /*
    2905                 :            :                  * Meh... this color is already done, clear first
    2906                 :            :                  * flusher and repeat cascading.
    2907                 :            :                  */
    2908                 :          0 :                 wq->first_flusher = NULL;
    2909                 :            :         }
    2910                 :            : 
    2911                 :          0 : out_unlock:
    2912                 :       1892 :         mutex_unlock(&wq->mutex);
    2913                 :            : }
    2914                 :            : EXPORT_SYMBOL(flush_workqueue);
    2915                 :            : 
    2916                 :            : /**
    2917                 :            :  * drain_workqueue - drain a workqueue
    2918                 :            :  * @wq: workqueue to drain
    2919                 :            :  *
    2920                 :            :  * Wait until the workqueue becomes empty.  While draining is in progress,
    2921                 :            :  * only chain queueing is allowed.  IOW, only currently pending or running
    2922                 :            :  * work items on @wq can queue further work items on it.  @wq is flushed
    2923                 :            :  * repeatedly until it becomes empty.  The number of flushing is determined
    2924                 :            :  * by the depth of chaining and should be relatively short.  Whine if it
    2925                 :            :  * takes too long.
    2926                 :            :  */
    2927                 :         11 : void drain_workqueue(struct workqueue_struct *wq)
    2928                 :            : {
    2929                 :         11 :         unsigned int flush_cnt = 0;
    2930                 :         11 :         struct pool_workqueue *pwq;
    2931                 :            : 
    2932                 :            :         /*
    2933                 :            :          * __queue_work() needs to test whether there are drainers, is much
    2934                 :            :          * hotter than drain_workqueue() and already looks at @wq->flags.
    2935                 :            :          * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
    2936                 :            :          */
    2937                 :         11 :         mutex_lock(&wq->mutex);
    2938         [ +  - ]:         11 :         if (!wq->nr_drainers++)
    2939                 :         11 :                 wq->flags |= __WQ_DRAINING;
    2940                 :         11 :         mutex_unlock(&wq->mutex);
    2941                 :         11 : reflush:
    2942                 :         11 :         flush_workqueue(wq);
    2943                 :            : 
    2944                 :         11 :         mutex_lock(&wq->mutex);
    2945                 :            : 
    2946         [ +  + ]:         22 :         for_each_pwq(pwq, wq) {
    2947                 :         11 :                 bool drained;
    2948                 :            : 
    2949                 :         11 :                 spin_lock_irq(&pwq->pool->lock);
    2950   [ +  -  -  + ]:         11 :                 drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
    2951                 :         11 :                 spin_unlock_irq(&pwq->pool->lock);
    2952                 :            : 
    2953         [ +  - ]:         11 :                 if (drained)
    2954                 :         11 :                         continue;
    2955                 :            : 
    2956         [ #  # ]:          0 :                 if (++flush_cnt == 10 ||
    2957   [ #  #  #  # ]:          0 :                     (flush_cnt % 100 == 0 && flush_cnt <= 1000))
    2958                 :          0 :                         pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
    2959                 :            :                                 wq->name, flush_cnt);
    2960                 :            : 
    2961                 :          0 :                 mutex_unlock(&wq->mutex);
    2962                 :          0 :                 goto reflush;
    2963                 :            :         }
    2964                 :            : 
    2965         [ +  - ]:         11 :         if (!--wq->nr_drainers)
    2966                 :         11 :                 wq->flags &= ~__WQ_DRAINING;
    2967                 :         11 :         mutex_unlock(&wq->mutex);
    2968                 :         11 : }
    2969                 :            : EXPORT_SYMBOL_GPL(drain_workqueue);
    2970                 :            : 
    2971                 :       3751 : static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
    2972                 :            :                              bool from_cancel)
    2973                 :            : {
    2974                 :       3751 :         struct worker *worker = NULL;
    2975                 :       3751 :         struct worker_pool *pool;
    2976                 :       3751 :         struct pool_workqueue *pwq;
    2977                 :            : 
    2978                 :       3751 :         might_sleep();
    2979                 :            : 
    2980                 :       3751 :         rcu_read_lock();
    2981                 :       3751 :         pool = get_work_pool(work);
    2982         [ +  + ]:       3751 :         if (!pool) {
    2983                 :       3322 :                 rcu_read_unlock();
    2984                 :       3322 :                 return false;
    2985                 :            :         }
    2986                 :            : 
    2987                 :        429 :         spin_lock_irq(&pool->lock);
    2988                 :            :         /* see the comment in try_to_grab_pending() with the same code */
    2989                 :        429 :         pwq = get_work_pwq(work);
    2990         [ +  - ]:         25 :         if (pwq) {
    2991         [ -  + ]:         25 :                 if (unlikely(pwq->pool != pool))
    2992                 :          0 :                         goto already_gone;
    2993                 :            :         } else {
    2994         [ +  + ]:        404 :                 worker = find_worker_executing_work(pool, work);
    2995         [ +  + ]:        404 :                 if (!worker)
    2996                 :        391 :                         goto already_gone;
    2997                 :         13 :                 pwq = worker->current_pwq;
    2998                 :            :         }
    2999                 :            : 
    3000                 :         38 :         check_flush_dependency(pwq->wq, work);
    3001                 :            : 
    3002                 :         38 :         insert_wq_barrier(pwq, barr, work, worker);
    3003                 :         38 :         spin_unlock_irq(&pool->lock);
    3004                 :            : 
    3005                 :            :         /*
    3006                 :            :          * Force a lock recursion deadlock when using flush_work() inside a
    3007                 :            :          * single-threaded or rescuer equipped workqueue.
    3008                 :            :          *
    3009                 :            :          * For single threaded workqueues the deadlock happens when the work
    3010                 :            :          * is after the work issuing the flush_work(). For rescuer equipped
    3011                 :            :          * workqueues the deadlock happens when the rescuer stalls, blocking
    3012                 :            :          * forward progress.
    3013                 :            :          */
    3014                 :         38 :         if (!from_cancel &&
    3015                 :            :             (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
    3016                 :         38 :                 lock_map_acquire(&pwq->wq->lockdep_map);
    3017                 :         38 :                 lock_map_release(&pwq->wq->lockdep_map);
    3018                 :            :         }
    3019                 :         38 :         rcu_read_unlock();
    3020                 :         38 :         return true;
    3021                 :        391 : already_gone:
    3022                 :        391 :         spin_unlock_irq(&pool->lock);
    3023                 :        391 :         rcu_read_unlock();
    3024                 :        391 :         return false;
    3025                 :            : }
    3026                 :            : 
    3027                 :       3751 : static bool __flush_work(struct work_struct *work, bool from_cancel)
    3028                 :            : {
    3029                 :       3751 :         struct wq_barrier barr;
    3030                 :            : 
    3031   [ -  +  +  - ]:       3751 :         if (WARN_ON(!wq_online))
    3032                 :            :                 return false;
    3033                 :            : 
    3034   [ -  +  +  - ]:       3751 :         if (WARN_ON(!work->func))
    3035                 :            :                 return false;
    3036                 :            : 
    3037                 :       3751 :         if (!from_cancel) {
    3038                 :       3751 :                 lock_map_acquire(&work->lockdep_map);
    3039                 :       3751 :                 lock_map_release(&work->lockdep_map);
    3040                 :            :         }
    3041                 :            : 
    3042         [ +  + ]:       3751 :         if (start_flush_work(work, &barr, from_cancel)) {
    3043                 :         38 :                 wait_for_completion(&barr.done);
    3044                 :         38 :                 destroy_work_on_stack(&barr.work);
    3045                 :         38 :                 return true;
    3046                 :            :         } else {
    3047                 :            :                 return false;
    3048                 :            :         }
    3049                 :            : }
    3050                 :            : 
    3051                 :            : /**
    3052                 :            :  * flush_work - wait for a work to finish executing the last queueing instance
    3053                 :            :  * @work: the work to flush
    3054                 :            :  *
    3055                 :            :  * Wait until @work has finished execution.  @work is guaranteed to be idle
    3056                 :            :  * on return if it hasn't been requeued since flush started.
    3057                 :            :  *
    3058                 :            :  * Return:
    3059                 :            :  * %true if flush_work() waited for the work to finish execution,
    3060                 :            :  * %false if it was already idle.
    3061                 :            :  */
    3062                 :       2046 : bool flush_work(struct work_struct *work)
    3063                 :            : {
    3064                 :       2024 :         return __flush_work(work, false);
    3065                 :            : }
    3066                 :            : EXPORT_SYMBOL_GPL(flush_work);
    3067                 :            : 
    3068                 :            : struct cwt_wait {
    3069                 :            :         wait_queue_entry_t              wait;
    3070                 :            :         struct work_struct      *work;
    3071                 :            : };
    3072                 :            : 
    3073                 :          0 : static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
    3074                 :            : {
    3075                 :          0 :         struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
    3076                 :            : 
    3077         [ #  # ]:          0 :         if (cwait->work != key)
    3078                 :            :                 return 0;
    3079                 :          0 :         return autoremove_wake_function(wait, mode, sync, key);
    3080                 :            : }
    3081                 :            : 
    3082                 :       1705 : static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
    3083                 :            : {
    3084                 :       1705 :         static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
    3085                 :       1705 :         unsigned long flags;
    3086                 :       1705 :         int ret;
    3087                 :            : 
    3088                 :       1705 :         do {
    3089                 :       1705 :                 ret = try_to_grab_pending(work, is_dwork, &flags);
    3090                 :            :                 /*
    3091                 :            :                  * If someone else is already canceling, wait for it to
    3092                 :            :                  * finish.  flush_work() doesn't work for PREEMPT_NONE
    3093                 :            :                  * because we may get scheduled between @work's completion
    3094                 :            :                  * and the other canceling task resuming and clearing
    3095                 :            :                  * CANCELING - flush_work() will return false immediately
    3096                 :            :                  * as @work is no longer busy, try_to_grab_pending() will
    3097                 :            :                  * return -ENOENT as @work is still being canceled and the
    3098                 :            :                  * other canceling task won't be able to clear CANCELING as
    3099                 :            :                  * we're hogging the CPU.
    3100                 :            :                  *
    3101                 :            :                  * Let's wait for completion using a waitqueue.  As this
    3102                 :            :                  * may lead to the thundering herd problem, use a custom
    3103                 :            :                  * wake function which matches @work along with exclusive
    3104                 :            :                  * wait and wakeup.
    3105                 :            :                  */
    3106         [ -  + ]:       1705 :                 if (unlikely(ret == -ENOENT)) {
    3107                 :          0 :                         struct cwt_wait cwait;
    3108                 :            : 
    3109                 :          0 :                         init_wait(&cwait.wait);
    3110                 :          0 :                         cwait.wait.func = cwt_wakefn;
    3111                 :          0 :                         cwait.work = work;
    3112                 :            : 
    3113                 :          0 :                         prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
    3114                 :            :                                                   TASK_UNINTERRUPTIBLE);
    3115         [ #  # ]:          0 :                         if (work_is_canceling(work))
    3116                 :          0 :                                 schedule();
    3117                 :          0 :                         finish_wait(&cancel_waitq, &cwait.wait);
    3118                 :            :                 }
    3119         [ -  + ]:       1705 :         } while (unlikely(ret < 0));
    3120                 :            : 
    3121                 :            :         /* tell other tasks trying to grab @work to back off */
    3122                 :       1705 :         mark_work_canceling(work);
    3123                 :       1705 :         local_irq_restore(flags);
    3124                 :            : 
    3125                 :            :         /*
    3126                 :            :          * This allows canceling during early boot.  We know that @work
    3127                 :            :          * isn't executing.
    3128                 :            :          */
    3129         [ +  - ]:       1705 :         if (wq_online)
    3130                 :       1705 :                 __flush_work(work, true);
    3131                 :            : 
    3132                 :       1705 :         clear_work_data(work);
    3133                 :            : 
    3134                 :            :         /*
    3135                 :            :          * Paired with prepare_to_wait() above so that either
    3136                 :            :          * waitqueue_active() is visible here or !work_is_canceling() is
    3137                 :            :          * visible there.
    3138                 :            :          */
    3139                 :       1705 :         smp_mb();
    3140         [ -  + ]:       1705 :         if (waitqueue_active(&cancel_waitq))
    3141                 :          0 :                 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
    3142                 :            : 
    3143                 :       1705 :         return ret;
    3144                 :            : }
    3145                 :            : 
    3146                 :            : /**
    3147                 :            :  * cancel_work_sync - cancel a work and wait for it to finish
    3148                 :            :  * @work: the work to cancel
    3149                 :            :  *
    3150                 :            :  * Cancel @work and wait for its execution to finish.  This function
    3151                 :            :  * can be used even if the work re-queues itself or migrates to
    3152                 :            :  * another workqueue.  On return from this function, @work is
    3153                 :            :  * guaranteed to be not pending or executing on any CPU.
    3154                 :            :  *
    3155                 :            :  * cancel_work_sync(&delayed_work->work) must not be used for
    3156                 :            :  * delayed_work's.  Use cancel_delayed_work_sync() instead.
    3157                 :            :  *
    3158                 :            :  * The caller must ensure that the workqueue on which @work was last
    3159                 :            :  * queued can't be destroyed before this function returns.
    3160                 :            :  *
    3161                 :            :  * Return:
    3162                 :            :  * %true if @work was pending, %false otherwise.
    3163                 :            :  */
    3164                 :       1309 : bool cancel_work_sync(struct work_struct *work)
    3165                 :            : {
    3166                 :       1309 :         return __cancel_work_timer(work, false);
    3167                 :            : }
    3168                 :            : EXPORT_SYMBOL_GPL(cancel_work_sync);
    3169                 :            : 
    3170                 :            : /**
    3171                 :            :  * flush_delayed_work - wait for a dwork to finish executing the last queueing
    3172                 :            :  * @dwork: the delayed work to flush
    3173                 :            :  *
    3174                 :            :  * Delayed timer is cancelled and the pending work is queued for
    3175                 :            :  * immediate execution.  Like flush_work(), this function only
    3176                 :            :  * considers the last queueing instance of @dwork.
    3177                 :            :  *
    3178                 :            :  * Return:
    3179                 :            :  * %true if flush_work() waited for the work to finish execution,
    3180                 :            :  * %false if it was already idle.
    3181                 :            :  */
    3182                 :         22 : bool flush_delayed_work(struct delayed_work *dwork)
    3183                 :            : {
    3184                 :         22 :         local_irq_disable();
    3185         [ +  - ]:         22 :         if (del_timer_sync(&dwork->timer))
    3186                 :         22 :                 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
    3187                 :         22 :         local_irq_enable();
    3188                 :         22 :         return flush_work(&dwork->work);
    3189                 :            : }
    3190                 :            : EXPORT_SYMBOL(flush_delayed_work);
    3191                 :            : 
    3192                 :            : /**
    3193                 :            :  * flush_rcu_work - wait for a rwork to finish executing the last queueing
    3194                 :            :  * @rwork: the rcu work to flush
    3195                 :            :  *
    3196                 :            :  * Return:
    3197                 :            :  * %true if flush_rcu_work() waited for the work to finish execution,
    3198                 :            :  * %false if it was already idle.
    3199                 :            :  */
    3200                 :          0 : bool flush_rcu_work(struct rcu_work *rwork)
    3201                 :            : {
    3202         [ #  # ]:          0 :         if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
    3203                 :          0 :                 rcu_barrier();
    3204                 :          0 :                 flush_work(&rwork->work);
    3205                 :          0 :                 return true;
    3206                 :            :         } else {
    3207                 :          0 :                 return flush_work(&rwork->work);
    3208                 :            :         }
    3209                 :            : }
    3210                 :            : EXPORT_SYMBOL(flush_rcu_work);
    3211                 :            : 
    3212                 :         44 : static bool __cancel_work(struct work_struct *work, bool is_dwork)
    3213                 :            : {
    3214                 :         44 :         unsigned long flags;
    3215                 :         44 :         int ret;
    3216                 :            : 
    3217                 :         44 :         do {
    3218                 :         44 :                 ret = try_to_grab_pending(work, is_dwork, &flags);
    3219         [ -  + ]:         44 :         } while (unlikely(ret == -EAGAIN));
    3220                 :            : 
    3221         [ +  - ]:         44 :         if (unlikely(ret < 0))
    3222                 :            :                 return false;
    3223                 :            : 
    3224                 :         44 :         set_work_pool_and_clear_pending(work, get_work_pool_id(work));
    3225                 :         44 :         local_irq_restore(flags);
    3226                 :         44 :         return ret;
    3227                 :            : }
    3228                 :            : 
    3229                 :            : /**
    3230                 :            :  * cancel_delayed_work - cancel a delayed work
    3231                 :            :  * @dwork: delayed_work to cancel
    3232                 :            :  *
    3233                 :            :  * Kill off a pending delayed_work.
    3234                 :            :  *
    3235                 :            :  * Return: %true if @dwork was pending and canceled; %false if it wasn't
    3236                 :            :  * pending.
    3237                 :            :  *
    3238                 :            :  * Note:
    3239                 :            :  * The work callback function may still be running on return, unless
    3240                 :            :  * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
    3241                 :            :  * use cancel_delayed_work_sync() to wait on it.
    3242                 :            :  *
    3243                 :            :  * This function is safe to call from any context including IRQ handler.
    3244                 :            :  */
    3245                 :         44 : bool cancel_delayed_work(struct delayed_work *dwork)
    3246                 :            : {
    3247                 :         44 :         return __cancel_work(&dwork->work, true);
    3248                 :            : }
    3249                 :            : EXPORT_SYMBOL(cancel_delayed_work);
    3250                 :            : 
    3251                 :            : /**
    3252                 :            :  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
    3253                 :            :  * @dwork: the delayed work cancel
    3254                 :            :  *
    3255                 :            :  * This is cancel_work_sync() for delayed works.
    3256                 :            :  *
    3257                 :            :  * Return:
    3258                 :            :  * %true if @dwork was pending, %false otherwise.
    3259                 :            :  */
    3260                 :        396 : bool cancel_delayed_work_sync(struct delayed_work *dwork)
    3261                 :            : {
    3262                 :        396 :         return __cancel_work_timer(&dwork->work, true);
    3263                 :            : }
    3264                 :            : EXPORT_SYMBOL(cancel_delayed_work_sync);
    3265                 :            : 
    3266                 :            : /**
    3267                 :            :  * schedule_on_each_cpu - execute a function synchronously on each online CPU
    3268                 :            :  * @func: the function to call
    3269                 :            :  *
    3270                 :            :  * schedule_on_each_cpu() executes @func on each online CPU using the
    3271                 :            :  * system workqueue and blocks until all CPUs have completed.
    3272                 :            :  * schedule_on_each_cpu() is very slow.
    3273                 :            :  *
    3274                 :            :  * Return:
    3275                 :            :  * 0 on success, -errno on failure.
    3276                 :            :  */
    3277                 :          0 : int schedule_on_each_cpu(work_func_t func)
    3278                 :            : {
    3279                 :          0 :         int cpu;
    3280                 :          0 :         struct work_struct __percpu *works;
    3281                 :            : 
    3282                 :          0 :         works = alloc_percpu(struct work_struct);
    3283         [ #  # ]:          0 :         if (!works)
    3284                 :            :                 return -ENOMEM;
    3285                 :            : 
    3286                 :          0 :         get_online_cpus();
    3287                 :            : 
    3288         [ #  # ]:          0 :         for_each_online_cpu(cpu) {
    3289                 :          0 :                 struct work_struct *work = per_cpu_ptr(works, cpu);
    3290                 :            : 
    3291                 :          0 :                 INIT_WORK(work, func);
    3292                 :          0 :                 schedule_work_on(cpu, work);
    3293                 :            :         }
    3294                 :            : 
    3295         [ #  # ]:          0 :         for_each_online_cpu(cpu)
    3296                 :          0 :                 flush_work(per_cpu_ptr(works, cpu));
    3297                 :            : 
    3298                 :          0 :         put_online_cpus();
    3299                 :          0 :         free_percpu(works);
    3300                 :          0 :         return 0;
    3301                 :            : }
    3302                 :            : 
    3303                 :            : /**
    3304                 :            :  * execute_in_process_context - reliably execute the routine with user context
    3305                 :            :  * @fn:         the function to execute
    3306                 :            :  * @ew:         guaranteed storage for the execute work structure (must
    3307                 :            :  *              be available when the work executes)
    3308                 :            :  *
    3309                 :            :  * Executes the function immediately if process context is available,
    3310                 :            :  * otherwise schedules the function for delayed execution.
    3311                 :            :  *
    3312                 :            :  * Return:      0 - function was executed
    3313                 :            :  *              1 - function was scheduled for execution
    3314                 :            :  */
    3315                 :          0 : int execute_in_process_context(work_func_t fn, struct execute_work *ew)
    3316                 :            : {
    3317         [ #  # ]:          0 :         if (!in_interrupt()) {
    3318                 :          0 :                 fn(&ew->work);
    3319                 :          0 :                 return 0;
    3320                 :            :         }
    3321                 :            : 
    3322                 :          0 :         INIT_WORK(&ew->work, fn);
    3323                 :          0 :         schedule_work(&ew->work);
    3324                 :            : 
    3325                 :          0 :         return 1;
    3326                 :            : }
    3327                 :            : EXPORT_SYMBOL_GPL(execute_in_process_context);
    3328                 :            : 
    3329                 :            : /**
    3330                 :            :  * free_workqueue_attrs - free a workqueue_attrs
    3331                 :            :  * @attrs: workqueue_attrs to free
    3332                 :            :  *
    3333                 :            :  * Undo alloc_workqueue_attrs().
    3334                 :            :  */
    3335                 :        429 : void free_workqueue_attrs(struct workqueue_attrs *attrs)
    3336                 :            : {
    3337         [ #  # ]:          0 :         if (attrs) {
    3338                 :        220 :                 free_cpumask_var(attrs->cpumask);
    3339                 :        220 :                 kfree(attrs);
    3340                 :            :         }
    3341                 :          0 : }
    3342                 :            : 
    3343                 :            : /**
    3344                 :            :  * alloc_workqueue_attrs - allocate a workqueue_attrs
    3345                 :            :  *
    3346                 :            :  * Allocate a new workqueue_attrs, initialize with default settings and
    3347                 :            :  * return it.
    3348                 :            :  *
    3349                 :            :  * Return: The allocated new workqueue_attr on success. %NULL on failure.
    3350                 :            :  */
    3351                 :        715 : struct workqueue_attrs *alloc_workqueue_attrs(void)
    3352                 :            : {
    3353                 :        715 :         struct workqueue_attrs *attrs;
    3354                 :            : 
    3355                 :        715 :         attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
    3356         [ -  + ]:        715 :         if (!attrs)
    3357                 :          0 :                 goto fail;
    3358                 :        715 :         if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
    3359                 :            :                 goto fail;
    3360                 :            : 
    3361                 :        715 :         cpumask_copy(attrs->cpumask, cpu_possible_mask);
    3362                 :        715 :         return attrs;
    3363                 :            : fail:
    3364                 :          0 :         free_workqueue_attrs(attrs);
    3365                 :          0 :         return NULL;
    3366                 :            : }
    3367                 :            : 
    3368                 :        858 : static void copy_workqueue_attrs(struct workqueue_attrs *to,
    3369                 :            :                                  const struct workqueue_attrs *from)
    3370                 :            : {
    3371                 :        858 :         to->nice = from->nice;
    3372                 :        858 :         cpumask_copy(to->cpumask, from->cpumask);
    3373                 :            :         /*
    3374                 :            :          * Unlike hash and equality test, this function doesn't ignore
    3375                 :            :          * ->no_numa as it is used for both pool and wq attrs.  Instead,
    3376                 :            :          * get_unbound_pool() explicitly clears ->no_numa after copying.
    3377                 :            :          */
    3378                 :        858 :         to->no_numa = from->no_numa;
    3379                 :            : }
    3380                 :            : 
    3381                 :            : /* hash value of the content of @attr */
    3382                 :        209 : static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
    3383                 :            : {
    3384                 :        209 :         u32 hash = 0;
    3385                 :            : 
    3386                 :        209 :         hash = jhash_1word(attrs->nice, hash);
    3387                 :        209 :         hash = jhash(cpumask_bits(attrs->cpumask),
    3388                 :            :                      BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
    3389                 :        209 :         return hash;
    3390                 :            : }
    3391                 :            : 
    3392                 :            : /* content equality test */
    3393                 :        187 : static bool wqattrs_equal(const struct workqueue_attrs *a,
    3394                 :            :                           const struct workqueue_attrs *b)
    3395                 :            : {
    3396                 :        187 :         if (a->nice != b->nice)
    3397                 :            :                 return false;
    3398         [ +  - ]:        187 :         if (!cpumask_equal(a->cpumask, b->cpumask))
    3399                 :            :                 return false;
    3400                 :            :         return true;
    3401                 :            : }
    3402                 :            : 
    3403                 :            : /**
    3404                 :            :  * init_worker_pool - initialize a newly zalloc'd worker_pool
    3405                 :            :  * @pool: worker_pool to initialize
    3406                 :            :  *
    3407                 :            :  * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
    3408                 :            :  *
    3409                 :            :  * Return: 0 on success, -errno on failure.  Even on failure, all fields
    3410                 :            :  * inside @pool proper are initialized and put_unbound_pool() can be called
    3411                 :            :  * on @pool safely to release it.
    3412                 :            :  */
    3413                 :         44 : static int init_worker_pool(struct worker_pool *pool)
    3414                 :            : {
    3415                 :         44 :         spin_lock_init(&pool->lock);
    3416                 :         44 :         pool->id = -1;
    3417                 :         44 :         pool->cpu = -1;
    3418                 :         44 :         pool->node = NUMA_NO_NODE;
    3419                 :         44 :         pool->flags |= POOL_DISASSOCIATED;
    3420                 :         44 :         pool->watchdog_ts = jiffies;
    3421                 :         44 :         INIT_LIST_HEAD(&pool->worklist);
    3422                 :         44 :         INIT_LIST_HEAD(&pool->idle_list);
    3423                 :         44 :         hash_init(pool->busy_hash);
    3424                 :            : 
    3425                 :         44 :         timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
    3426                 :            : 
    3427                 :         44 :         timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
    3428                 :            : 
    3429                 :         44 :         INIT_LIST_HEAD(&pool->workers);
    3430                 :            : 
    3431                 :         44 :         ida_init(&pool->worker_ida);
    3432                 :         44 :         INIT_HLIST_NODE(&pool->hash_node);
    3433                 :         44 :         pool->refcnt = 1;
    3434                 :            : 
    3435                 :            :         /* shouldn't fail above this point */
    3436                 :         44 :         pool->attrs = alloc_workqueue_attrs();
    3437         [ -  + ]:         44 :         if (!pool->attrs)
    3438                 :          0 :                 return -ENOMEM;
    3439                 :            :         return 0;
    3440                 :            : }
    3441                 :            : 
    3442                 :            : #ifdef CONFIG_LOCKDEP
    3443                 :            : static void wq_init_lockdep(struct workqueue_struct *wq)
    3444                 :            : {
    3445                 :            :         char *lock_name;
    3446                 :            : 
    3447                 :            :         lockdep_register_key(&wq->key);
    3448                 :            :         lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
    3449                 :            :         if (!lock_name)
    3450                 :            :                 lock_name = wq->name;
    3451                 :            : 
    3452                 :            :         wq->lock_name = lock_name;
    3453                 :            :         lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
    3454                 :            : }
    3455                 :            : 
    3456                 :            : static void wq_unregister_lockdep(struct workqueue_struct *wq)
    3457                 :            : {
    3458                 :            :         lockdep_unregister_key(&wq->key);
    3459                 :            : }
    3460                 :            : 
    3461                 :            : static void wq_free_lockdep(struct workqueue_struct *wq)
    3462                 :            : {
    3463                 :            :         if (wq->lock_name != wq->name)
    3464                 :            :                 kfree(wq->lock_name);
    3465                 :            : }
    3466                 :            : #else
    3467                 :        484 : static void wq_init_lockdep(struct workqueue_struct *wq)
    3468                 :            : {
    3469                 :        484 : }
    3470                 :            : 
    3471                 :         11 : static void wq_unregister_lockdep(struct workqueue_struct *wq)
    3472                 :            : {
    3473                 :         11 : }
    3474                 :            : 
    3475                 :         11 : static void wq_free_lockdep(struct workqueue_struct *wq)
    3476                 :            : {
    3477                 :         11 : }
    3478                 :            : #endif
    3479                 :            : 
    3480                 :         11 : static void rcu_free_wq(struct rcu_head *rcu)
    3481                 :            : {
    3482                 :         11 :         struct workqueue_struct *wq =
    3483                 :         11 :                 container_of(rcu, struct workqueue_struct, rcu);
    3484                 :            : 
    3485                 :         11 :         wq_free_lockdep(wq);
    3486                 :            : 
    3487         [ -  + ]:         11 :         if (!(wq->flags & WQ_UNBOUND))
    3488                 :          0 :                 free_percpu(wq->cpu_pwqs);
    3489                 :            :         else
    3490         [ +  - ]:         11 :                 free_workqueue_attrs(wq->unbound_attrs);
    3491                 :            : 
    3492                 :         11 :         kfree(wq->rescuer);
    3493                 :         11 :         kfree(wq);
    3494                 :         11 : }
    3495                 :            : 
    3496                 :          0 : static void rcu_free_pool(struct rcu_head *rcu)
    3497                 :            : {
    3498                 :          0 :         struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
    3499                 :            : 
    3500                 :          0 :         ida_destroy(&pool->worker_ida);
    3501         [ #  # ]:          0 :         free_workqueue_attrs(pool->attrs);
    3502                 :          0 :         kfree(pool);
    3503                 :          0 : }
    3504                 :            : 
    3505                 :            : /**
    3506                 :            :  * put_unbound_pool - put a worker_pool
    3507                 :            :  * @pool: worker_pool to put
    3508                 :            :  *
    3509                 :            :  * Put @pool.  If its refcnt reaches zero, it gets destroyed in RCU
    3510                 :            :  * safe manner.  get_unbound_pool() calls this function on its failure path
    3511                 :            :  * and this function should be able to release pools which went through,
    3512                 :            :  * successfully or not, init_worker_pool().
    3513                 :            :  *
    3514                 :            :  * Should be called with wq_pool_mutex held.
    3515                 :            :  */
    3516                 :         11 : static void put_unbound_pool(struct worker_pool *pool)
    3517                 :            : {
    3518                 :         11 :         DECLARE_COMPLETION_ONSTACK(detach_completion);
    3519                 :         11 :         struct worker *worker;
    3520                 :            : 
    3521                 :         11 :         lockdep_assert_held(&wq_pool_mutex);
    3522                 :            : 
    3523         [ -  + ]:         11 :         if (--pool->refcnt)
    3524                 :         11 :                 return;
    3525                 :            : 
    3526                 :            :         /* sanity checks */
    3527   [ #  #  #  # ]:          0 :         if (WARN_ON(!(pool->cpu < 0)) ||
    3528   [ #  #  #  # ]:          0 :             WARN_ON(!list_empty(&pool->worklist)))
    3529                 :            :                 return;
    3530                 :            : 
    3531                 :            :         /* release id and unhash */
    3532         [ #  # ]:          0 :         if (pool->id >= 0)
    3533                 :          0 :                 idr_remove(&worker_pool_idr, pool->id);
    3534         [ #  # ]:          0 :         hash_del(&pool->hash_node);
    3535                 :            : 
    3536                 :            :         /*
    3537                 :            :          * Become the manager and destroy all workers.  This prevents
    3538                 :            :          * @pool's workers from blocking on attach_mutex.  We're the last
    3539                 :            :          * manager and @pool gets freed with the flag set.
    3540                 :            :          */
    3541                 :          0 :         spin_lock_irq(&pool->lock);
    3542   [ #  #  #  # ]:          0 :         wait_event_lock_irq(wq_manager_wait,
    3543                 :            :                             !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
    3544                 :          0 :         pool->flags |= POOL_MANAGER_ACTIVE;
    3545                 :            : 
    3546   [ #  #  #  # ]:          0 :         while ((worker = first_idle_worker(pool)))
    3547                 :          0 :                 destroy_worker(worker);
    3548         [ #  # ]:          0 :         WARN_ON(pool->nr_workers || pool->nr_idle);
    3549                 :          0 :         spin_unlock_irq(&pool->lock);
    3550                 :            : 
    3551                 :          0 :         mutex_lock(&wq_pool_attach_mutex);
    3552         [ #  # ]:          0 :         if (!list_empty(&pool->workers))
    3553                 :          0 :                 pool->detach_completion = &detach_completion;
    3554                 :          0 :         mutex_unlock(&wq_pool_attach_mutex);
    3555                 :            : 
    3556         [ #  # ]:          0 :         if (pool->detach_completion)
    3557                 :          0 :                 wait_for_completion(pool->detach_completion);
    3558                 :            : 
    3559                 :            :         /* shut down the timers */
    3560                 :          0 :         del_timer_sync(&pool->idle_timer);
    3561                 :          0 :         del_timer_sync(&pool->mayday_timer);
    3562                 :            : 
    3563                 :            :         /* RCU protected to allow dereferences from get_work_pool() */
    3564                 :          0 :         call_rcu(&pool->rcu, rcu_free_pool);
    3565                 :            : }
    3566                 :            : 
    3567                 :            : /**
    3568                 :            :  * get_unbound_pool - get a worker_pool with the specified attributes
    3569                 :            :  * @attrs: the attributes of the worker_pool to get
    3570                 :            :  *
    3571                 :            :  * Obtain a worker_pool which has the same attributes as @attrs, bump the
    3572                 :            :  * reference count and return it.  If there already is a matching
    3573                 :            :  * worker_pool, it will be used; otherwise, this function attempts to
    3574                 :            :  * create a new one.
    3575                 :            :  *
    3576                 :            :  * Should be called with wq_pool_mutex held.
    3577                 :            :  *
    3578                 :            :  * Return: On success, a worker_pool with the same attributes as @attrs.
    3579                 :            :  * On failure, %NULL.
    3580                 :            :  */
    3581                 :        209 : static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
    3582                 :            : {
    3583                 :        209 :         u32 hash = wqattrs_hash(attrs);
    3584                 :        209 :         struct worker_pool *pool;
    3585                 :        209 :         int node;
    3586                 :        209 :         int target_node = NUMA_NO_NODE;
    3587                 :            : 
    3588                 :        209 :         lockdep_assert_held(&wq_pool_mutex);
    3589                 :            : 
    3590                 :            :         /* do we already have a matching pool? */
    3591   [ +  +  -  -  :        418 :         hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
                   +  + ]
    3592         [ +  - ]:        187 :                 if (wqattrs_equal(pool->attrs, attrs)) {
    3593                 :        187 :                         pool->refcnt++;
    3594                 :        187 :                         return pool;
    3595                 :            :                 }
    3596                 :            :         }
    3597                 :            : 
    3598                 :            :         /* if cpumask is contained inside a NUMA node, we belong to that node */
    3599         [ -  + ]:         22 :         if (wq_numa_enabled) {
    3600         [ #  # ]:          0 :                 for_each_node(node) {
    3601                 :          0 :                         if (cpumask_subset(attrs->cpumask,
    3602         [ #  # ]:          0 :                                            wq_numa_possible_cpumask[node])) {
    3603                 :            :                                 target_node = node;
    3604                 :            :                                 break;
    3605                 :            :                         }
    3606                 :            :                 }
    3607                 :            :         }
    3608                 :            : 
    3609                 :            :         /* nope, create a new one */
    3610                 :         22 :         pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
    3611   [ +  -  -  + ]:         22 :         if (!pool || init_worker_pool(pool) < 0)
    3612                 :          0 :                 goto fail;
    3613                 :            : 
    3614                 :         22 :         lockdep_set_subclass(&pool->lock, 1);    /* see put_pwq() */
    3615                 :         22 :         copy_workqueue_attrs(pool->attrs, attrs);
    3616                 :         22 :         pool->node = target_node;
    3617                 :            : 
    3618                 :            :         /*
    3619                 :            :          * no_numa isn't a worker_pool attribute, always clear it.  See
    3620                 :            :          * 'struct workqueue_attrs' comments for detail.
    3621                 :            :          */
    3622                 :         22 :         pool->attrs->no_numa = false;
    3623                 :            : 
    3624                 :         22 :         if (worker_pool_assign_id(pool) < 0)
    3625                 :          0 :                 goto fail;
    3626                 :            : 
    3627                 :            :         /* create and start the initial worker */
    3628   [ +  +  -  + ]:         22 :         if (wq_online && !create_worker(pool))
    3629                 :          0 :                 goto fail;
    3630                 :            : 
    3631                 :            :         /* install */
    3632         [ -  + ]:         22 :         hash_add(unbound_pool_hash, &pool->hash_node, hash);
    3633                 :            : 
    3634                 :         22 :         return pool;
    3635                 :          0 : fail:
    3636         [ #  # ]:          0 :         if (pool)
    3637                 :          0 :                 put_unbound_pool(pool);
    3638                 :            :         return NULL;
    3639                 :            : }
    3640                 :            : 
    3641                 :         11 : static void rcu_free_pwq(struct rcu_head *rcu)
    3642                 :            : {
    3643                 :         11 :         kmem_cache_free(pwq_cache,
    3644                 :         11 :                         container_of(rcu, struct pool_workqueue, rcu));
    3645                 :         11 : }
    3646                 :            : 
    3647                 :            : /*
    3648                 :            :  * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
    3649                 :            :  * and needs to be destroyed.
    3650                 :            :  */
    3651                 :         11 : static void pwq_unbound_release_workfn(struct work_struct *work)
    3652                 :            : {
    3653                 :         11 :         struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
    3654                 :            :                                                   unbound_release_work);
    3655                 :         11 :         struct workqueue_struct *wq = pwq->wq;
    3656                 :         11 :         struct worker_pool *pool = pwq->pool;
    3657                 :         11 :         bool is_last;
    3658                 :            : 
    3659   [ -  +  +  - ]:         11 :         if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
    3660                 :            :                 return;
    3661                 :            : 
    3662                 :         11 :         mutex_lock(&wq->mutex);
    3663                 :         11 :         list_del_rcu(&pwq->pwqs_node);
    3664                 :         11 :         is_last = list_empty(&wq->pwqs);
    3665                 :         11 :         mutex_unlock(&wq->mutex);
    3666                 :            : 
    3667                 :         11 :         mutex_lock(&wq_pool_mutex);
    3668                 :         11 :         put_unbound_pool(pool);
    3669                 :         11 :         mutex_unlock(&wq_pool_mutex);
    3670                 :            : 
    3671                 :         11 :         call_rcu(&pwq->rcu, rcu_free_pwq);
    3672                 :            : 
    3673                 :            :         /*
    3674                 :            :          * If we're the last pwq going away, @wq is already dead and no one
    3675                 :            :          * is gonna access it anymore.  Schedule RCU free.
    3676                 :            :          */
    3677         [ +  - ]:         11 :         if (is_last) {
    3678                 :         11 :                 wq_unregister_lockdep(wq);
    3679                 :         11 :                 call_rcu(&wq->rcu, rcu_free_wq);
    3680                 :            :         }
    3681                 :            : }
    3682                 :            : 
    3683                 :            : /**
    3684                 :            :  * pwq_adjust_max_active - update a pwq's max_active to the current setting
    3685                 :            :  * @pwq: target pool_workqueue
    3686                 :            :  *
    3687                 :            :  * If @pwq isn't freezing, set @pwq->max_active to the associated
    3688                 :            :  * workqueue's saved_max_active and activate delayed work items
    3689                 :            :  * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
    3690                 :            :  */
    3691                 :        968 : static void pwq_adjust_max_active(struct pool_workqueue *pwq)
    3692                 :            : {
    3693                 :        968 :         struct workqueue_struct *wq = pwq->wq;
    3694                 :        968 :         bool freezable = wq->flags & WQ_FREEZABLE;
    3695                 :        968 :         unsigned long flags;
    3696                 :            : 
    3697                 :            :         /* for @wq->saved_max_active */
    3698                 :        968 :         lockdep_assert_held(&wq->mutex);
    3699                 :            : 
    3700                 :            :         /* fast exit for non-freezable wqs */
    3701   [ +  +  +  + ]:        968 :         if (!freezable && pwq->max_active == wq->saved_max_active)
    3702                 :            :                 return;
    3703                 :            : 
    3704                 :            :         /* this function can be called during early boot w/ irq disabled */
    3705                 :        528 :         spin_lock_irqsave(&pwq->pool->lock, flags);
    3706                 :            : 
    3707                 :            :         /*
    3708                 :            :          * During [un]freezing, the caller is responsible for ensuring that
    3709                 :            :          * this function is called at least once after @workqueue_freezing
    3710                 :            :          * is updated and visible.
    3711                 :            :          */
    3712   [ +  +  +  - ]:        528 :         if (!freezable || !workqueue_freezing) {
    3713                 :        528 :                 pwq->max_active = wq->saved_max_active;
    3714                 :            : 
    3715         [ -  + ]:        528 :                 while (!list_empty(&pwq->delayed_works) &&
    3716         [ #  # ]:          0 :                        pwq->nr_active < pwq->max_active)
    3717                 :          0 :                         pwq_activate_first_delayed(pwq);
    3718                 :            : 
    3719                 :            :                 /*
    3720                 :            :                  * Need to kick a worker after thawed or an unbound wq's
    3721                 :            :                  * max_active is bumped.  It's a slow path.  Do it always.
    3722                 :            :                  */
    3723         [ +  + ]:        528 :                 wake_up_worker(pwq->pool);
    3724                 :            :         } else {
    3725                 :          0 :                 pwq->max_active = 0;
    3726                 :            :         }
    3727                 :            : 
    3728                 :        528 :         spin_unlock_irqrestore(&pwq->pool->lock, flags);
    3729                 :            : }
    3730                 :            : 
    3731                 :            : /* initialize newly alloced @pwq which is associated with @wq and @pool */
    3732                 :        484 : static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
    3733                 :            :                      struct worker_pool *pool)
    3734                 :            : {
    3735         [ -  + ]:        484 :         BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
    3736                 :            : 
    3737                 :        484 :         memset(pwq, 0, sizeof(*pwq));
    3738                 :            : 
    3739                 :        484 :         pwq->pool = pool;
    3740                 :        484 :         pwq->wq = wq;
    3741                 :        484 :         pwq->flush_color = -1;
    3742                 :        484 :         pwq->refcnt = 1;
    3743                 :        484 :         INIT_LIST_HEAD(&pwq->delayed_works);
    3744                 :        484 :         INIT_LIST_HEAD(&pwq->pwqs_node);
    3745                 :        484 :         INIT_LIST_HEAD(&pwq->mayday_node);
    3746                 :        484 :         INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
    3747                 :        484 : }
    3748                 :            : 
    3749                 :            : /* sync @pwq with the current state of its associated wq and link it */
    3750                 :        693 : static void link_pwq(struct pool_workqueue *pwq)
    3751                 :            : {
    3752                 :        693 :         struct workqueue_struct *wq = pwq->wq;
    3753                 :            : 
    3754                 :        693 :         lockdep_assert_held(&wq->mutex);
    3755                 :            : 
    3756                 :            :         /* may be called multiple times, ignore if already linked */
    3757         [ +  + ]:        693 :         if (!list_empty(&pwq->pwqs_node))
    3758                 :            :                 return;
    3759                 :            : 
    3760                 :            :         /* set the matching work_color */
    3761                 :        484 :         pwq->work_color = wq->work_color;
    3762                 :            : 
    3763                 :            :         /* sync max_active to the current setting */
    3764                 :        484 :         pwq_adjust_max_active(pwq);
    3765                 :            : 
    3766                 :            :         /* link in @pwq */
    3767                 :        484 :         list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
    3768                 :            : }
    3769                 :            : 
    3770                 :            : /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
    3771                 :        209 : static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
    3772                 :            :                                         const struct workqueue_attrs *attrs)
    3773                 :            : {
    3774                 :        209 :         struct worker_pool *pool;
    3775                 :        209 :         struct pool_workqueue *pwq;
    3776                 :            : 
    3777                 :        209 :         lockdep_assert_held(&wq_pool_mutex);
    3778                 :            : 
    3779                 :        209 :         pool = get_unbound_pool(attrs);
    3780         [ +  - ]:        209 :         if (!pool)
    3781                 :            :                 return NULL;
    3782                 :            : 
    3783                 :        209 :         pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
    3784         [ -  + ]:        209 :         if (!pwq) {
    3785                 :          0 :                 put_unbound_pool(pool);
    3786                 :          0 :                 return NULL;
    3787                 :            :         }
    3788                 :            : 
    3789                 :        209 :         init_pwq(pwq, wq, pool);
    3790                 :        209 :         return pwq;
    3791                 :            : }
    3792                 :            : 
    3793                 :            : /**
    3794                 :            :  * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
    3795                 :            :  * @attrs: the wq_attrs of the default pwq of the target workqueue
    3796                 :            :  * @node: the target NUMA node
    3797                 :            :  * @cpu_going_down: if >= 0, the CPU to consider as offline
    3798                 :            :  * @cpumask: outarg, the resulting cpumask
    3799                 :            :  *
    3800                 :            :  * Calculate the cpumask a workqueue with @attrs should use on @node.  If
    3801                 :            :  * @cpu_going_down is >= 0, that cpu is considered offline during
    3802                 :            :  * calculation.  The result is stored in @cpumask.
    3803                 :            :  *
    3804                 :            :  * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
    3805                 :            :  * enabled and @node has online CPUs requested by @attrs, the returned
    3806                 :            :  * cpumask is the intersection of the possible CPUs of @node and
    3807                 :            :  * @attrs->cpumask.
    3808                 :            :  *
    3809                 :            :  * The caller is responsible for ensuring that the cpumask of @node stays
    3810                 :            :  * stable.
    3811                 :            :  *
    3812                 :            :  * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
    3813                 :            :  * %false if equal.
    3814                 :            :  */
    3815                 :        209 : static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
    3816                 :            :                                  int cpu_going_down, cpumask_t *cpumask)
    3817                 :            : {
    3818   [ -  +  -  - ]:        209 :         if (!wq_numa_enabled || attrs->no_numa)
    3819                 :        209 :                 goto use_dfl;
    3820                 :            : 
    3821                 :            :         /* does @node have any online CPUs @attrs wants? */
    3822         [ #  # ]:          0 :         cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
    3823         [ #  # ]:          0 :         if (cpu_going_down >= 0)
    3824                 :          0 :                 cpumask_clear_cpu(cpu_going_down, cpumask);
    3825                 :            : 
    3826         [ #  # ]:          0 :         if (cpumask_empty(cpumask))
    3827                 :          0 :                 goto use_dfl;
    3828                 :            : 
    3829                 :            :         /* yeap, return possible CPUs in @node that @attrs wants */
    3830         [ #  # ]:          0 :         cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
    3831                 :            : 
    3832         [ #  # ]:          0 :         if (cpumask_empty(cpumask)) {
    3833         [ #  # ]:          0 :                 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
    3834                 :            :                                 "possible intersect\n");
    3835                 :          0 :                 return false;
    3836                 :            :         }
    3837                 :            : 
    3838                 :          0 :         return !cpumask_equal(cpumask, attrs->cpumask);
    3839                 :            : 
    3840                 :        209 : use_dfl:
    3841                 :        209 :         cpumask_copy(cpumask, attrs->cpumask);
    3842                 :        209 :         return false;
    3843                 :            : }
    3844                 :            : 
    3845                 :            : /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
    3846                 :        209 : static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
    3847                 :            :                                                    int node,
    3848                 :            :                                                    struct pool_workqueue *pwq)
    3849                 :            : {
    3850                 :        209 :         struct pool_workqueue *old_pwq;
    3851                 :            : 
    3852                 :        209 :         lockdep_assert_held(&wq_pool_mutex);
    3853                 :        209 :         lockdep_assert_held(&wq->mutex);
    3854                 :            : 
    3855                 :            :         /* link_pwq() can handle duplicate calls */
    3856                 :        209 :         link_pwq(pwq);
    3857                 :            : 
    3858                 :        209 :         old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
    3859                 :        209 :         rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
    3860                 :          0 :         return old_pwq;
    3861                 :            : }
    3862                 :            : 
    3863                 :            : /* context to store the prepared attrs & pwqs before applying */
    3864                 :            : struct apply_wqattrs_ctx {
    3865                 :            :         struct workqueue_struct *wq;            /* target workqueue */
    3866                 :            :         struct workqueue_attrs  *attrs;         /* attrs to apply */
    3867                 :            :         struct list_head        list;           /* queued for batching commit */
    3868                 :            :         struct pool_workqueue   *dfl_pwq;
    3869                 :            :         struct pool_workqueue   *pwq_tbl[];
    3870                 :            : };
    3871                 :            : 
    3872                 :            : /* free the resources after success or abort */
    3873                 :        209 : static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
    3874                 :            : {
    3875         [ +  - ]:        209 :         if (ctx) {
    3876                 :        209 :                 int node;
    3877                 :            : 
    3878         [ +  + ]:        836 :                 for_each_node(node)
    3879                 :        209 :                         put_pwq_unlocked(ctx->pwq_tbl[node]);
    3880                 :        209 :                 put_pwq_unlocked(ctx->dfl_pwq);
    3881                 :            : 
    3882         [ +  - ]:        209 :                 free_workqueue_attrs(ctx->attrs);
    3883                 :            : 
    3884                 :        209 :                 kfree(ctx);
    3885                 :            :         }
    3886                 :        209 : }
    3887                 :            : 
    3888                 :            : /* allocate the attrs and pwqs for later installation */
    3889                 :            : static struct apply_wqattrs_ctx *
    3890                 :        209 : apply_wqattrs_prepare(struct workqueue_struct *wq,
    3891                 :            :                       const struct workqueue_attrs *attrs)
    3892                 :            : {
    3893                 :        209 :         struct apply_wqattrs_ctx *ctx;
    3894                 :        209 :         struct workqueue_attrs *new_attrs, *tmp_attrs;
    3895                 :        209 :         int node;
    3896                 :            : 
    3897                 :        209 :         lockdep_assert_held(&wq_pool_mutex);
    3898                 :            : 
    3899         [ -  + ]:        209 :         ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
    3900                 :            : 
    3901                 :        209 :         new_attrs = alloc_workqueue_attrs();
    3902                 :        209 :         tmp_attrs = alloc_workqueue_attrs();
    3903   [ +  -  -  + ]:        209 :         if (!ctx || !new_attrs || !tmp_attrs)
    3904                 :          0 :                 goto out_free;
    3905                 :            : 
    3906                 :            :         /*
    3907                 :            :          * Calculate the attrs of the default pwq.
    3908                 :            :          * If the user configured cpumask doesn't overlap with the
    3909                 :            :          * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
    3910                 :            :          */
    3911         [ -  + ]:        209 :         copy_workqueue_attrs(new_attrs, attrs);
    3912         [ -  + ]:        209 :         cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
    3913         [ -  + ]:        209 :         if (unlikely(cpumask_empty(new_attrs->cpumask)))
    3914                 :          0 :                 cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
    3915                 :            : 
    3916                 :            :         /*
    3917                 :            :          * We may create multiple pwqs with differing cpumasks.  Make a
    3918                 :            :          * copy of @new_attrs which will be modified and used to obtain
    3919                 :            :          * pools.
    3920                 :            :          */
    3921                 :        209 :         copy_workqueue_attrs(tmp_attrs, new_attrs);
    3922                 :            : 
    3923                 :            :         /*
    3924                 :            :          * If something goes wrong during CPU up/down, we'll fall back to
    3925                 :            :          * the default pwq covering whole @attrs->cpumask.  Always create
    3926                 :            :          * it even if we don't use it immediately.
    3927                 :            :          */
    3928                 :        209 :         ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
    3929         [ -  + ]:        209 :         if (!ctx->dfl_pwq)
    3930                 :          0 :                 goto out_free;
    3931                 :            : 
    3932         [ +  + ]:        836 :         for_each_node(node) {
    3933         [ -  + ]:        209 :                 if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
    3934                 :          0 :                         ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
    3935         [ #  # ]:          0 :                         if (!ctx->pwq_tbl[node])
    3936                 :          0 :                                 goto out_free;
    3937                 :            :                 } else {
    3938                 :        209 :                         ctx->dfl_pwq->refcnt++;
    3939                 :        209 :                         ctx->pwq_tbl[node] = ctx->dfl_pwq;
    3940                 :            :                 }
    3941                 :            :         }
    3942                 :            : 
    3943                 :            :         /* save the user configured attrs and sanitize it. */
    3944                 :        209 :         copy_workqueue_attrs(new_attrs, attrs);
    3945                 :        209 :         cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
    3946                 :        209 :         ctx->attrs = new_attrs;
    3947                 :            : 
    3948                 :        209 :         ctx->wq = wq;
    3949                 :        209 :         free_workqueue_attrs(tmp_attrs);
    3950                 :        209 :         return ctx;
    3951                 :            : 
    3952                 :          0 : out_free:
    3953         [ #  # ]:          0 :         free_workqueue_attrs(tmp_attrs);
    3954         [ #  # ]:          0 :         free_workqueue_attrs(new_attrs);
    3955                 :          0 :         apply_wqattrs_cleanup(ctx);
    3956                 :          0 :         return NULL;
    3957                 :            : }
    3958                 :            : 
    3959                 :            : /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
    3960                 :        209 : static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
    3961                 :            : {
    3962                 :        209 :         int node;
    3963                 :            : 
    3964                 :            :         /* all pwqs have been created successfully, let's install'em */
    3965                 :        209 :         mutex_lock(&ctx->wq->mutex);
    3966                 :            : 
    3967                 :        209 :         copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
    3968                 :            : 
    3969                 :            :         /* save the previous pwq and install the new one */
    3970         [ +  + ]:        627 :         for_each_node(node)
    3971                 :        418 :                 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
    3972                 :            :                                                           ctx->pwq_tbl[node]);
    3973                 :            : 
    3974                 :            :         /* @dfl_pwq might not have been used, ensure it's linked */
    3975                 :        209 :         link_pwq(ctx->dfl_pwq);
    3976                 :        209 :         swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
    3977                 :            : 
    3978                 :        209 :         mutex_unlock(&ctx->wq->mutex);
    3979                 :        209 : }
    3980                 :            : 
    3981                 :          0 : static void apply_wqattrs_lock(void)
    3982                 :            : {
    3983                 :            :         /* CPUs should stay stable across pwq creations and installations */
    3984                 :          0 :         get_online_cpus();
    3985                 :          0 :         mutex_lock(&wq_pool_mutex);
    3986                 :            : }
    3987                 :            : 
    3988                 :          0 : static void apply_wqattrs_unlock(void)
    3989                 :            : {
    3990                 :          0 :         mutex_unlock(&wq_pool_mutex);
    3991                 :          0 :         put_online_cpus();
    3992                 :          0 : }
    3993                 :            : 
    3994                 :        209 : static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
    3995                 :            :                                         const struct workqueue_attrs *attrs)
    3996                 :            : {
    3997                 :        209 :         struct apply_wqattrs_ctx *ctx;
    3998                 :            : 
    3999                 :            :         /* only unbound workqueues can change attributes */
    4000   [ -  +  +  - ]:        209 :         if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
    4001                 :            :                 return -EINVAL;
    4002                 :            : 
    4003                 :            :         /* creating multiple pwqs breaks ordering guarantee */
    4004         [ -  + ]:        209 :         if (!list_empty(&wq->pwqs)) {
    4005   [ #  #  #  # ]:          0 :                 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
    4006                 :            :                         return -EINVAL;
    4007                 :            : 
    4008                 :          0 :                 wq->flags &= ~__WQ_ORDERED;
    4009                 :            :         }
    4010                 :            : 
    4011                 :        209 :         ctx = apply_wqattrs_prepare(wq, attrs);
    4012         [ +  - ]:        209 :         if (!ctx)
    4013                 :            :                 return -ENOMEM;
    4014                 :            : 
    4015                 :            :         /* the ctx has been prepared successfully, let's commit it */
    4016                 :        209 :         apply_wqattrs_commit(ctx);
    4017                 :        209 :         apply_wqattrs_cleanup(ctx);
    4018                 :            : 
    4019                 :        209 :         return 0;
    4020                 :            : }
    4021                 :            : 
    4022                 :            : /**
    4023                 :            :  * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
    4024                 :            :  * @wq: the target workqueue
    4025                 :            :  * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
    4026                 :            :  *
    4027                 :            :  * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
    4028                 :            :  * machines, this function maps a separate pwq to each NUMA node with
    4029                 :            :  * possibles CPUs in @attrs->cpumask so that work items are affine to the
    4030                 :            :  * NUMA node it was issued on.  Older pwqs are released as in-flight work
    4031                 :            :  * items finish.  Note that a work item which repeatedly requeues itself
    4032                 :            :  * back-to-back will stay on its current pwq.
    4033                 :            :  *
    4034                 :            :  * Performs GFP_KERNEL allocations.
    4035                 :            :  *
    4036                 :            :  * Assumes caller has CPU hotplug read exclusion, i.e. get_online_cpus().
    4037                 :            :  *
    4038                 :            :  * Return: 0 on success and -errno on failure.
    4039                 :            :  */
    4040                 :        209 : int apply_workqueue_attrs(struct workqueue_struct *wq,
    4041                 :            :                           const struct workqueue_attrs *attrs)
    4042                 :            : {
    4043                 :        209 :         int ret;
    4044                 :            : 
    4045                 :        209 :         lockdep_assert_cpus_held();
    4046                 :            : 
    4047                 :        209 :         mutex_lock(&wq_pool_mutex);
    4048                 :        209 :         ret = apply_workqueue_attrs_locked(wq, attrs);
    4049                 :        209 :         mutex_unlock(&wq_pool_mutex);
    4050                 :            : 
    4051                 :        209 :         return ret;
    4052                 :            : }
    4053                 :            : 
    4054                 :            : /**
    4055                 :            :  * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
    4056                 :            :  * @wq: the target workqueue
    4057                 :            :  * @cpu: the CPU coming up or going down
    4058                 :            :  * @online: whether @cpu is coming up or going down
    4059                 :            :  *
    4060                 :            :  * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
    4061                 :            :  * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
    4062                 :            :  * @wq accordingly.
    4063                 :            :  *
    4064                 :            :  * If NUMA affinity can't be adjusted due to memory allocation failure, it
    4065                 :            :  * falls back to @wq->dfl_pwq which may not be optimal but is always
    4066                 :            :  * correct.
    4067                 :            :  *
    4068                 :            :  * Note that when the last allowed CPU of a NUMA node goes offline for a
    4069                 :            :  * workqueue with a cpumask spanning multiple nodes, the workers which were
    4070                 :            :  * already executing the work items for the workqueue will lose their CPU
    4071                 :            :  * affinity and may execute on any CPU.  This is similar to how per-cpu
    4072                 :            :  * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
    4073                 :            :  * affinity, it's the user's responsibility to flush the work item from
    4074                 :            :  * CPU_DOWN_PREPARE.
    4075                 :            :  */
    4076                 :         99 : static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
    4077                 :            :                                    bool online)
    4078                 :            : {
    4079         [ -  + ]:         99 :         int node = cpu_to_node(cpu);
    4080         [ -  + ]:         99 :         int cpu_off = online ? -1 : cpu;
    4081                 :         99 :         struct pool_workqueue *old_pwq = NULL, *pwq;
    4082                 :         99 :         struct workqueue_attrs *target_attrs;
    4083                 :         99 :         cpumask_t *cpumask;
    4084                 :            : 
    4085                 :         99 :         lockdep_assert_held(&wq_pool_mutex);
    4086                 :            : 
    4087   [ -  +  -  - ]:         99 :         if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
    4088         [ #  # ]:          0 :             wq->unbound_attrs->no_numa)
    4089                 :            :                 return;
    4090                 :            : 
    4091                 :            :         /*
    4092                 :            :          * We don't wanna alloc/free wq_attrs for each wq for each CPU.
    4093                 :            :          * Let's use a preallocated one.  The following buf is protected by
    4094                 :            :          * CPU hotplug exclusion.
    4095                 :            :          */
    4096                 :          0 :         target_attrs = wq_update_unbound_numa_attrs_buf;
    4097                 :          0 :         cpumask = target_attrs->cpumask;
    4098                 :            : 
    4099         [ #  # ]:          0 :         copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
    4100         [ #  # ]:          0 :         pwq = unbound_pwq_by_node(wq, node);
    4101                 :            : 
    4102                 :            :         /*
    4103                 :            :          * Let's determine what needs to be done.  If the target cpumask is
    4104                 :            :          * different from the default pwq's, we need to compare it to @pwq's
    4105                 :            :          * and create a new one if they don't match.  If the target cpumask
    4106                 :            :          * equals the default pwq's, the default pwq should be used.
    4107                 :            :          */
    4108         [ #  # ]:          0 :         if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
    4109         [ #  # ]:          0 :                 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
    4110                 :            :                         return;
    4111                 :            :         } else {
    4112                 :          0 :                 goto use_dfl_pwq;
    4113                 :            :         }
    4114                 :            : 
    4115                 :            :         /* create a new pwq */
    4116                 :          0 :         pwq = alloc_unbound_pwq(wq, target_attrs);
    4117         [ #  # ]:          0 :         if (!pwq) {
    4118                 :          0 :                 pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
    4119                 :            :                         wq->name);
    4120                 :          0 :                 goto use_dfl_pwq;
    4121                 :            :         }
    4122                 :            : 
    4123                 :            :         /* Install the new pwq. */
    4124                 :          0 :         mutex_lock(&wq->mutex);
    4125                 :          0 :         old_pwq = numa_pwq_tbl_install(wq, node, pwq);
    4126                 :          0 :         goto out_unlock;
    4127                 :            : 
    4128                 :          0 : use_dfl_pwq:
    4129                 :          0 :         mutex_lock(&wq->mutex);
    4130                 :          0 :         spin_lock_irq(&wq->dfl_pwq->pool->lock);
    4131         [ #  # ]:          0 :         get_pwq(wq->dfl_pwq);
    4132                 :          0 :         spin_unlock_irq(&wq->dfl_pwq->pool->lock);
    4133                 :          0 :         old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
    4134                 :          0 : out_unlock:
    4135                 :          0 :         mutex_unlock(&wq->mutex);
    4136                 :          0 :         put_pwq_unlocked(old_pwq);
    4137                 :            : }
    4138                 :            : 
    4139                 :        484 : static int alloc_and_link_pwqs(struct workqueue_struct *wq)
    4140                 :            : {
    4141                 :        484 :         bool highpri = wq->flags & WQ_HIGHPRI;
    4142                 :        484 :         int cpu, ret;
    4143                 :            : 
    4144         [ +  + ]:        484 :         if (!(wq->flags & WQ_UNBOUND)) {
    4145                 :        275 :                 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
    4146         [ +  - ]:        275 :                 if (!wq->cpu_pwqs)
    4147                 :            :                         return -ENOMEM;
    4148                 :            : 
    4149         [ +  + ]:        550 :                 for_each_possible_cpu(cpu) {
    4150                 :        275 :                         struct pool_workqueue *pwq =
    4151                 :        275 :                                 per_cpu_ptr(wq->cpu_pwqs, cpu);
    4152                 :        275 :                         struct worker_pool *cpu_pools =
    4153                 :        275 :                                 per_cpu(cpu_worker_pools, cpu);
    4154                 :            : 
    4155                 :        275 :                         init_pwq(pwq, wq, &cpu_pools[highpri]);
    4156                 :            : 
    4157                 :        275 :                         mutex_lock(&wq->mutex);
    4158                 :        275 :                         link_pwq(pwq);
    4159                 :        275 :                         mutex_unlock(&wq->mutex);
    4160                 :            :                 }
    4161                 :            :                 return 0;
    4162                 :            :         }
    4163                 :            : 
    4164                 :        209 :         get_online_cpus();
    4165         [ +  + ]:        209 :         if (wq->flags & __WQ_ORDERED) {
    4166                 :        165 :                 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
    4167                 :            :                 /* there should only be single pwq for ordering guarantee */
    4168   [ +  -  +  -  :        330 :                 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
             +  -  -  + ]
    4169                 :            :                               wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
    4170                 :            :                      "ordering guarantee broken for workqueue %s\n", wq->name);
    4171                 :            :         } else {
    4172                 :         44 :                 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
    4173                 :            :         }
    4174                 :        209 :         put_online_cpus();
    4175                 :            : 
    4176                 :        209 :         return ret;
    4177                 :            : }
    4178                 :            : 
    4179                 :        484 : static int wq_clamp_max_active(int max_active, unsigned int flags,
    4180                 :            :                                const char *name)
    4181                 :            : {
    4182         [ +  + ]:        484 :         int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
    4183                 :            : 
    4184         [ -  + ]:        484 :         if (max_active < 1 || max_active > lim)
    4185                 :          0 :                 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
    4186                 :            :                         max_active, name, 1, lim);
    4187                 :            : 
    4188                 :        484 :         return clamp_val(max_active, 1, lim);
    4189                 :            : }
    4190                 :            : 
    4191                 :            : /*
    4192                 :            :  * Workqueues which may be used during memory reclaim should have a rescuer
    4193                 :            :  * to guarantee forward progress.
    4194                 :            :  */
    4195                 :        484 : static int init_rescuer(struct workqueue_struct *wq)
    4196                 :            : {
    4197                 :        484 :         struct worker *rescuer;
    4198                 :        484 :         int ret;
    4199                 :            : 
    4200         [ +  + ]:        484 :         if (!(wq->flags & WQ_MEM_RECLAIM))
    4201                 :            :                 return 0;
    4202                 :            : 
    4203                 :        231 :         rescuer = alloc_worker(NUMA_NO_NODE);
    4204         [ +  - ]:        231 :         if (!rescuer)
    4205                 :            :                 return -ENOMEM;
    4206                 :            : 
    4207                 :        231 :         rescuer->rescue_wq = wq;
    4208                 :        231 :         rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
    4209         [ -  + ]:        231 :         ret = PTR_ERR_OR_ZERO(rescuer->task);
    4210         [ #  # ]:          0 :         if (ret) {
    4211                 :          0 :                 kfree(rescuer);
    4212                 :          0 :                 return ret;
    4213                 :            :         }
    4214                 :            : 
    4215                 :        231 :         wq->rescuer = rescuer;
    4216                 :        231 :         kthread_bind_mask(rescuer->task, cpu_possible_mask);
    4217                 :        231 :         wake_up_process(rescuer->task);
    4218                 :            : 
    4219                 :        231 :         return 0;
    4220                 :            : }
    4221                 :            : 
    4222                 :            : __printf(1, 4)
    4223                 :        484 : struct workqueue_struct *alloc_workqueue(const char *fmt,
    4224                 :            :                                          unsigned int flags,
    4225                 :            :                                          int max_active, ...)
    4226                 :            : {
    4227                 :        484 :         size_t tbl_size = 0;
    4228                 :        484 :         va_list args;
    4229                 :        484 :         struct workqueue_struct *wq;
    4230                 :        484 :         struct pool_workqueue *pwq;
    4231                 :            : 
    4232                 :            :         /*
    4233                 :            :          * Unbound && max_active == 1 used to imply ordered, which is no
    4234                 :            :          * longer the case on NUMA machines due to per-node pools.  While
    4235                 :            :          * alloc_ordered_workqueue() is the right way to create an ordered
    4236                 :            :          * workqueue, keep the previous behavior to avoid subtle breakages
    4237                 :            :          * on NUMA.
    4238                 :            :          */
    4239   [ +  +  +  + ]:        484 :         if ((flags & WQ_UNBOUND) && max_active == 1)
    4240                 :        165 :                 flags |= __WQ_ORDERED;
    4241                 :            : 
    4242                 :            :         /* see the comment above the definition of WQ_POWER_EFFICIENT */
    4243   [ +  +  -  + ]:        484 :         if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
    4244                 :          0 :                 flags |= WQ_UNBOUND;
    4245                 :            : 
    4246                 :            :         /* allocate wq and format name */
    4247         [ +  + ]:        484 :         if (flags & WQ_UNBOUND)
    4248                 :        209 :                 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
    4249                 :            : 
    4250                 :        484 :         wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
    4251         [ +  - ]:        484 :         if (!wq)
    4252                 :            :                 return NULL;
    4253                 :            : 
    4254         [ +  + ]:        484 :         if (flags & WQ_UNBOUND) {
    4255                 :        209 :                 wq->unbound_attrs = alloc_workqueue_attrs();
    4256         [ -  + ]:        209 :                 if (!wq->unbound_attrs)
    4257                 :          0 :                         goto err_free_wq;
    4258                 :            :         }
    4259                 :            : 
    4260                 :        484 :         va_start(args, max_active);
    4261                 :        484 :         vsnprintf(wq->name, sizeof(wq->name), fmt, args);
    4262                 :        484 :         va_end(args);
    4263                 :            : 
    4264         [ +  + ]:        484 :         max_active = max_active ?: WQ_DFL_ACTIVE;
    4265                 :        484 :         max_active = wq_clamp_max_active(max_active, flags, wq->name);
    4266                 :            : 
    4267                 :            :         /* init wq */
    4268                 :        484 :         wq->flags = flags;
    4269                 :        484 :         wq->saved_max_active = max_active;
    4270                 :        484 :         mutex_init(&wq->mutex);
    4271                 :        484 :         atomic_set(&wq->nr_pwqs_to_flush, 0);
    4272                 :        484 :         INIT_LIST_HEAD(&wq->pwqs);
    4273                 :        484 :         INIT_LIST_HEAD(&wq->flusher_queue);
    4274                 :        484 :         INIT_LIST_HEAD(&wq->flusher_overflow);
    4275                 :        484 :         INIT_LIST_HEAD(&wq->maydays);
    4276                 :            : 
    4277                 :        484 :         wq_init_lockdep(wq);
    4278                 :        484 :         INIT_LIST_HEAD(&wq->list);
    4279                 :            : 
    4280         [ -  + ]:        484 :         if (alloc_and_link_pwqs(wq) < 0)
    4281                 :          0 :                 goto err_unreg_lockdep;
    4282                 :            : 
    4283   [ +  +  -  + ]:        484 :         if (wq_online && init_rescuer(wq) < 0)
    4284                 :          0 :                 goto err_destroy;
    4285                 :            : 
    4286   [ +  +  -  + ]:        484 :         if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
    4287                 :          0 :                 goto err_destroy;
    4288                 :            : 
    4289                 :            :         /*
    4290                 :            :          * wq_pool_mutex protects global freeze state and workqueues list.
    4291                 :            :          * Grab it, adjust max_active and add the new @wq to workqueues
    4292                 :            :          * list.
    4293                 :            :          */
    4294                 :        484 :         mutex_lock(&wq_pool_mutex);
    4295                 :            : 
    4296                 :        484 :         mutex_lock(&wq->mutex);
    4297         [ +  + ]:        968 :         for_each_pwq(pwq, wq)
    4298                 :        484 :                 pwq_adjust_max_active(pwq);
    4299                 :        484 :         mutex_unlock(&wq->mutex);
    4300                 :            : 
    4301                 :        484 :         list_add_tail_rcu(&wq->list, &workqueues);
    4302                 :            : 
    4303                 :        484 :         mutex_unlock(&wq_pool_mutex);
    4304                 :            : 
    4305                 :        484 :         return wq;
    4306                 :            : 
    4307                 :            : err_unreg_lockdep:
    4308                 :          0 :         wq_unregister_lockdep(wq);
    4309                 :            :         wq_free_lockdep(wq);
    4310                 :          0 : err_free_wq:
    4311         [ #  # ]:          0 :         free_workqueue_attrs(wq->unbound_attrs);
    4312                 :          0 :         kfree(wq);
    4313                 :          0 :         return NULL;
    4314                 :          0 : err_destroy:
    4315                 :          0 :         destroy_workqueue(wq);
    4316                 :          0 :         return NULL;
    4317                 :            : }
    4318                 :            : EXPORT_SYMBOL_GPL(alloc_workqueue);
    4319                 :            : 
    4320                 :            : static bool pwq_busy(struct pool_workqueue *pwq)
    4321                 :            : {
    4322                 :            :         int i;
    4323                 :            : 
    4324         [ +  + ]:        176 :         for (i = 0; i < WORK_NR_COLORS; i++)
    4325         [ +  - ]:        165 :                 if (pwq->nr_in_flight[i])
    4326                 :            :                         return true;
    4327                 :            : 
    4328   [ -  +  -  - ]:         11 :         if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
    4329                 :            :                 return true;
    4330   [ +  -  -  + ]:         11 :         if (pwq->nr_active || !list_empty(&pwq->delayed_works))
    4331                 :          0 :                 return true;
    4332                 :            : 
    4333                 :            :         return false;
    4334                 :            : }
    4335                 :            : 
    4336                 :            : /**
    4337                 :            :  * destroy_workqueue - safely terminate a workqueue
    4338                 :            :  * @wq: target workqueue
    4339                 :            :  *
    4340                 :            :  * Safely destroy a workqueue. All work currently pending will be done first.
    4341                 :            :  */
    4342                 :         11 : void destroy_workqueue(struct workqueue_struct *wq)
    4343                 :            : {
    4344                 :         11 :         struct pool_workqueue *pwq;
    4345                 :         11 :         int node;
    4346                 :            : 
    4347                 :            :         /*
    4348                 :            :          * Remove it from sysfs first so that sanity check failure doesn't
    4349                 :            :          * lead to sysfs name conflicts.
    4350                 :            :          */
    4351                 :         11 :         workqueue_sysfs_unregister(wq);
    4352                 :            : 
    4353                 :            :         /* drain it before proceeding with destruction */
    4354                 :         11 :         drain_workqueue(wq);
    4355                 :            : 
    4356                 :            :         /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
    4357         [ +  - ]:         11 :         if (wq->rescuer) {
    4358                 :         11 :                 struct worker *rescuer = wq->rescuer;
    4359                 :            : 
    4360                 :            :                 /* this prevents new queueing */
    4361                 :         11 :                 spin_lock_irq(&wq_mayday_lock);
    4362                 :         11 :                 wq->rescuer = NULL;
    4363                 :         11 :                 spin_unlock_irq(&wq_mayday_lock);
    4364                 :            : 
    4365                 :            :                 /* rescuer will empty maydays list before exiting */
    4366                 :         11 :                 kthread_stop(rescuer->task);
    4367                 :         11 :                 kfree(rescuer);
    4368                 :            :         }
    4369                 :            : 
    4370                 :            :         /*
    4371                 :            :          * Sanity checks - grab all the locks so that we wait for all
    4372                 :            :          * in-flight operations which may do put_pwq().
    4373                 :            :          */
    4374                 :         11 :         mutex_lock(&wq_pool_mutex);
    4375                 :         11 :         mutex_lock(&wq->mutex);
    4376         [ +  + ]:         22 :         for_each_pwq(pwq, wq) {
    4377                 :         11 :                 spin_lock_irq(&pwq->pool->lock);
    4378   [ -  +  -  + ]:         11 :                 if (WARN_ON(pwq_busy(pwq))) {
    4379                 :          0 :                         pr_warn("%s: %s has the following busy pwq\n",
    4380                 :            :                                 __func__, wq->name);
    4381                 :          0 :                         show_pwq(pwq);
    4382                 :          0 :                         spin_unlock_irq(&pwq->pool->lock);
    4383                 :          0 :                         mutex_unlock(&wq->mutex);
    4384                 :          0 :                         mutex_unlock(&wq_pool_mutex);
    4385                 :          0 :                         show_workqueue_state();
    4386                 :          0 :                         return;
    4387                 :            :                 }
    4388                 :         11 :                 spin_unlock_irq(&pwq->pool->lock);
    4389                 :            :         }
    4390                 :         11 :         mutex_unlock(&wq->mutex);
    4391                 :         11 :         mutex_unlock(&wq_pool_mutex);
    4392                 :            : 
    4393                 :            :         /*
    4394                 :            :          * wq list is used to freeze wq, remove from list after
    4395                 :            :          * flushing is complete in case freeze races us.
    4396                 :            :          */
    4397                 :         11 :         mutex_lock(&wq_pool_mutex);
    4398                 :         11 :         list_del_rcu(&wq->list);
    4399                 :         11 :         mutex_unlock(&wq_pool_mutex);
    4400                 :            : 
    4401         [ -  + ]:         11 :         if (!(wq->flags & WQ_UNBOUND)) {
    4402                 :          0 :                 wq_unregister_lockdep(wq);
    4403                 :            :                 /*
    4404                 :            :                  * The base ref is never dropped on per-cpu pwqs.  Directly
    4405                 :            :                  * schedule RCU free.
    4406                 :            :                  */
    4407                 :          0 :                 call_rcu(&wq->rcu, rcu_free_wq);
    4408                 :            :         } else {
    4409                 :            :                 /*
    4410                 :            :                  * We're the sole accessor of @wq at this point.  Directly
    4411                 :            :                  * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
    4412                 :            :                  * @wq will be freed when the last pwq is released.
    4413                 :            :                  */
    4414         [ +  + ]:         44 :                 for_each_node(node) {
    4415                 :         11 :                         pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
    4416                 :         11 :                         RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
    4417                 :         11 :                         put_pwq_unlocked(pwq);
    4418                 :            :                 }
    4419                 :            : 
    4420                 :            :                 /*
    4421                 :            :                  * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
    4422                 :            :                  * put.  Don't access it afterwards.
    4423                 :            :                  */
    4424                 :         11 :                 pwq = wq->dfl_pwq;
    4425                 :         11 :                 wq->dfl_pwq = NULL;
    4426                 :         11 :                 put_pwq_unlocked(pwq);
    4427                 :            :         }
    4428                 :            : }
    4429                 :            : EXPORT_SYMBOL_GPL(destroy_workqueue);
    4430                 :            : 
    4431                 :            : /**
    4432                 :            :  * workqueue_set_max_active - adjust max_active of a workqueue
    4433                 :            :  * @wq: target workqueue
    4434                 :            :  * @max_active: new max_active value.
    4435                 :            :  *
    4436                 :            :  * Set max_active of @wq to @max_active.
    4437                 :            :  *
    4438                 :            :  * CONTEXT:
    4439                 :            :  * Don't call from IRQ context.
    4440                 :            :  */
    4441                 :          0 : void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
    4442                 :            : {
    4443                 :          0 :         struct pool_workqueue *pwq;
    4444                 :            : 
    4445                 :            :         /* disallow meddling with max_active for ordered workqueues */
    4446   [ #  #  #  # ]:          0 :         if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
    4447                 :            :                 return;
    4448                 :            : 
    4449                 :          0 :         max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
    4450                 :            : 
    4451                 :          0 :         mutex_lock(&wq->mutex);
    4452                 :            : 
    4453                 :          0 :         wq->flags &= ~__WQ_ORDERED;
    4454                 :          0 :         wq->saved_max_active = max_active;
    4455                 :            : 
    4456         [ #  # ]:          0 :         for_each_pwq(pwq, wq)
    4457                 :          0 :                 pwq_adjust_max_active(pwq);
    4458                 :            : 
    4459                 :          0 :         mutex_unlock(&wq->mutex);
    4460                 :            : }
    4461                 :            : EXPORT_SYMBOL_GPL(workqueue_set_max_active);
    4462                 :            : 
    4463                 :            : /**
    4464                 :            :  * current_work - retrieve %current task's work struct
    4465                 :            :  *
    4466                 :            :  * Determine if %current task is a workqueue worker and what it's working on.
    4467                 :            :  * Useful to find out the context that the %current task is running in.
    4468                 :            :  *
    4469                 :            :  * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
    4470                 :            :  */
    4471                 :          0 : struct work_struct *current_work(void)
    4472                 :            : {
    4473                 :          0 :         struct worker *worker = current_wq_worker();
    4474                 :            : 
    4475         [ #  # ]:          0 :         return worker ? worker->current_work : NULL;
    4476                 :            : }
    4477                 :            : EXPORT_SYMBOL(current_work);
    4478                 :            : 
    4479                 :            : /**
    4480                 :            :  * current_is_workqueue_rescuer - is %current workqueue rescuer?
    4481                 :            :  *
    4482                 :            :  * Determine whether %current is a workqueue rescuer.  Can be used from
    4483                 :            :  * work functions to determine whether it's being run off the rescuer task.
    4484                 :            :  *
    4485                 :            :  * Return: %true if %current is a workqueue rescuer. %false otherwise.
    4486                 :            :  */
    4487                 :          0 : bool current_is_workqueue_rescuer(void)
    4488                 :            : {
    4489                 :          0 :         struct worker *worker = current_wq_worker();
    4490                 :            : 
    4491   [ #  #  #  # ]:          0 :         return worker && worker->rescue_wq;
    4492                 :            : }
    4493                 :            : 
    4494                 :            : /**
    4495                 :            :  * workqueue_congested - test whether a workqueue is congested
    4496                 :            :  * @cpu: CPU in question
    4497                 :            :  * @wq: target workqueue
    4498                 :            :  *
    4499                 :            :  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
    4500                 :            :  * no synchronization around this function and the test result is
    4501                 :            :  * unreliable and only useful as advisory hints or for debugging.
    4502                 :            :  *
    4503                 :            :  * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
    4504                 :            :  * Note that both per-cpu and unbound workqueues may be associated with
    4505                 :            :  * multiple pool_workqueues which have separate congested states.  A
    4506                 :            :  * workqueue being congested on one CPU doesn't mean the workqueue is also
    4507                 :            :  * contested on other CPUs / NUMA nodes.
    4508                 :            :  *
    4509                 :            :  * Return:
    4510                 :            :  * %true if congested, %false otherwise.
    4511                 :            :  */
    4512                 :          0 : bool workqueue_congested(int cpu, struct workqueue_struct *wq)
    4513                 :            : {
    4514                 :          0 :         struct pool_workqueue *pwq;
    4515                 :          0 :         bool ret;
    4516                 :            : 
    4517                 :          0 :         rcu_read_lock();
    4518                 :          0 :         preempt_disable();
    4519                 :            : 
    4520         [ #  # ]:          0 :         if (cpu == WORK_CPU_UNBOUND)
    4521                 :          0 :                 cpu = smp_processor_id();
    4522                 :            : 
    4523         [ #  # ]:          0 :         if (!(wq->flags & WQ_UNBOUND))
    4524                 :          0 :                 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
    4525                 :            :         else
    4526         [ #  # ]:          0 :                 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
    4527                 :            : 
    4528                 :          0 :         ret = !list_empty(&pwq->delayed_works);
    4529                 :          0 :         preempt_enable();
    4530                 :          0 :         rcu_read_unlock();
    4531                 :            : 
    4532                 :          0 :         return ret;
    4533                 :            : }
    4534                 :            : EXPORT_SYMBOL_GPL(workqueue_congested);
    4535                 :            : 
    4536                 :            : /**
    4537                 :            :  * work_busy - test whether a work is currently pending or running
    4538                 :            :  * @work: the work to be tested
    4539                 :            :  *
    4540                 :            :  * Test whether @work is currently pending or running.  There is no
    4541                 :            :  * synchronization around this function and the test result is
    4542                 :            :  * unreliable and only useful as advisory hints or for debugging.
    4543                 :            :  *
    4544                 :            :  * Return:
    4545                 :            :  * OR'd bitmask of WORK_BUSY_* bits.
    4546                 :            :  */
    4547                 :          0 : unsigned int work_busy(struct work_struct *work)
    4548                 :            : {
    4549                 :          0 :         struct worker_pool *pool;
    4550                 :          0 :         unsigned long flags;
    4551                 :          0 :         unsigned int ret = 0;
    4552                 :            : 
    4553         [ #  # ]:          0 :         if (work_pending(work))
    4554                 :          0 :                 ret |= WORK_BUSY_PENDING;
    4555                 :            : 
    4556                 :          0 :         rcu_read_lock();
    4557                 :          0 :         pool = get_work_pool(work);
    4558         [ #  # ]:          0 :         if (pool) {
    4559                 :          0 :                 spin_lock_irqsave(&pool->lock, flags);
    4560   [ #  #  #  # ]:          0 :                 if (find_worker_executing_work(pool, work))
    4561                 :          0 :                         ret |= WORK_BUSY_RUNNING;
    4562                 :          0 :                 spin_unlock_irqrestore(&pool->lock, flags);
    4563                 :            :         }
    4564                 :          0 :         rcu_read_unlock();
    4565                 :            : 
    4566                 :          0 :         return ret;
    4567                 :            : }
    4568                 :            : EXPORT_SYMBOL_GPL(work_busy);
    4569                 :            : 
    4570                 :            : /**
    4571                 :            :  * set_worker_desc - set description for the current work item
    4572                 :            :  * @fmt: printf-style format string
    4573                 :            :  * @...: arguments for the format string
    4574                 :            :  *
    4575                 :            :  * This function can be called by a running work function to describe what
    4576                 :            :  * the work item is about.  If the worker task gets dumped, this
    4577                 :            :  * information will be printed out together to help debugging.  The
    4578                 :            :  * description can be at most WORKER_DESC_LEN including the trailing '\0'.
    4579                 :            :  */
    4580                 :          0 : void set_worker_desc(const char *fmt, ...)
    4581                 :            : {
    4582                 :          0 :         struct worker *worker = current_wq_worker();
    4583                 :          0 :         va_list args;
    4584                 :            : 
    4585         [ #  # ]:          0 :         if (worker) {
    4586                 :          0 :                 va_start(args, fmt);
    4587                 :          0 :                 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
    4588                 :          0 :                 va_end(args);
    4589                 :            :         }
    4590                 :          0 : }
    4591                 :            : EXPORT_SYMBOL_GPL(set_worker_desc);
    4592                 :            : 
    4593                 :            : /**
    4594                 :            :  * print_worker_info - print out worker information and description
    4595                 :            :  * @log_lvl: the log level to use when printing
    4596                 :            :  * @task: target task
    4597                 :            :  *
    4598                 :            :  * If @task is a worker and currently executing a work item, print out the
    4599                 :            :  * name of the workqueue being serviced and worker description set with
    4600                 :            :  * set_worker_desc() by the currently executing work item.
    4601                 :            :  *
    4602                 :            :  * This function can be safely called on any task as long as the
    4603                 :            :  * task_struct itself is accessible.  While safe, this function isn't
    4604                 :            :  * synchronized and may print out mixups or garbages of limited length.
    4605                 :            :  */
    4606                 :          0 : void print_worker_info(const char *log_lvl, struct task_struct *task)
    4607                 :            : {
    4608                 :          0 :         work_func_t *fn = NULL;
    4609                 :          0 :         char name[WQ_NAME_LEN] = { };
    4610                 :          0 :         char desc[WORKER_DESC_LEN] = { };
    4611                 :          0 :         struct pool_workqueue *pwq = NULL;
    4612                 :          0 :         struct workqueue_struct *wq = NULL;
    4613                 :          0 :         struct worker *worker;
    4614                 :            : 
    4615         [ #  # ]:          0 :         if (!(task->flags & PF_WQ_WORKER))
    4616                 :          0 :                 return;
    4617                 :            : 
    4618                 :            :         /*
    4619                 :            :          * This function is called without any synchronization and @task
    4620                 :            :          * could be in any state.  Be careful with dereferences.
    4621                 :            :          */
    4622                 :          0 :         worker = kthread_probe_data(task);
    4623                 :            : 
    4624                 :            :         /*
    4625                 :            :          * Carefully copy the associated workqueue's workfn, name and desc.
    4626                 :            :          * Keep the original last '\0' in case the original is garbage.
    4627                 :            :          */
    4628                 :          0 :         probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
    4629                 :          0 :         probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
    4630                 :          0 :         probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
    4631                 :          0 :         probe_kernel_read(name, wq->name, sizeof(name) - 1);
    4632                 :          0 :         probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
    4633                 :            : 
    4634   [ #  #  #  #  :          0 :         if (fn || name[0] || desc[0]) {
                   #  # ]
    4635                 :          0 :                 printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
    4636         [ #  # ]:          0 :                 if (strcmp(name, desc))
    4637                 :          0 :                         pr_cont(" (%s)", desc);
    4638                 :          0 :                 pr_cont("\n");
    4639                 :            :         }
    4640                 :            : }
    4641                 :            : 
    4642                 :          0 : static void pr_cont_pool_info(struct worker_pool *pool)
    4643                 :            : {
    4644                 :          0 :         pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
    4645         [ #  # ]:          0 :         if (pool->node != NUMA_NO_NODE)
    4646                 :          0 :                 pr_cont(" node=%d", pool->node);
    4647                 :          0 :         pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
    4648                 :          0 : }
    4649                 :            : 
    4650                 :          0 : static void pr_cont_work(bool comma, struct work_struct *work)
    4651                 :            : {
    4652         [ #  # ]:          0 :         if (work->func == wq_barrier_func) {
    4653                 :          0 :                 struct wq_barrier *barr;
    4654                 :            : 
    4655                 :          0 :                 barr = container_of(work, struct wq_barrier, work);
    4656                 :            : 
    4657         [ #  # ]:          0 :                 pr_cont("%s BAR(%d)", comma ? "," : "",
    4658                 :            :                         task_pid_nr(barr->task));
    4659                 :            :         } else {
    4660         [ #  # ]:          0 :                 pr_cont("%s %ps", comma ? "," : "", work->func);
    4661                 :            :         }
    4662                 :          0 : }
    4663                 :            : 
    4664                 :          0 : static void show_pwq(struct pool_workqueue *pwq)
    4665                 :            : {
    4666                 :          0 :         struct worker_pool *pool = pwq->pool;
    4667                 :          0 :         struct work_struct *work;
    4668                 :          0 :         struct worker *worker;
    4669                 :          0 :         bool has_in_flight = false, has_pending = false;
    4670                 :          0 :         int bkt;
    4671                 :            : 
    4672                 :          0 :         pr_info("  pwq %d:", pool->id);
    4673                 :          0 :         pr_cont_pool_info(pool);
    4674                 :            : 
    4675         [ #  # ]:          0 :         pr_cont(" active=%d/%d refcnt=%d%s\n",
    4676                 :            :                 pwq->nr_active, pwq->max_active, pwq->refcnt,
    4677                 :            :                 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
    4678                 :            : 
    4679   [ #  #  #  #  :          0 :         hash_for_each(pool->busy_hash, bkt, worker, hentry) {
             #  #  #  # ]
    4680         [ #  # ]:          0 :                 if (worker->current_pwq == pwq) {
    4681                 :            :                         has_in_flight = true;
    4682                 :            :                         break;
    4683                 :            :                 }
    4684                 :            :         }
    4685         [ #  # ]:          0 :         if (has_in_flight) {
    4686                 :          0 :                 bool comma = false;
    4687                 :            : 
    4688                 :          0 :                 pr_info("    in-flight:");
    4689   [ #  #  #  #  :          0 :                 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
             #  #  #  # ]
    4690         [ #  # ]:          0 :                         if (worker->current_pwq != pwq)
    4691                 :          0 :                                 continue;
    4692                 :            : 
    4693   [ #  #  #  # ]:          0 :                         pr_cont("%s %d%s:%ps", comma ? "," : "",
    4694                 :            :                                 task_pid_nr(worker->task),
    4695                 :            :                                 worker->rescue_wq ? "(RESCUER)" : "",
    4696                 :            :                                 worker->current_func);
    4697         [ #  # ]:          0 :                         list_for_each_entry(work, &worker->scheduled, entry)
    4698                 :          0 :                                 pr_cont_work(false, work);
    4699                 :            :                         comma = true;
    4700                 :            :                 }
    4701                 :          0 :                 pr_cont("\n");
    4702                 :            :         }
    4703                 :            : 
    4704         [ #  # ]:          0 :         list_for_each_entry(work, &pool->worklist, entry) {
    4705         [ #  # ]:          0 :                 if (get_work_pwq(work) == pwq) {
    4706                 :            :                         has_pending = true;
    4707                 :            :                         break;
    4708                 :            :                 }
    4709                 :            :         }
    4710         [ #  # ]:          0 :         if (has_pending) {
    4711                 :          0 :                 bool comma = false;
    4712                 :            : 
    4713                 :          0 :                 pr_info("    pending:");
    4714         [ #  # ]:          0 :                 list_for_each_entry(work, &pool->worklist, entry) {
    4715         [ #  # ]:          0 :                         if (get_work_pwq(work) != pwq)
    4716                 :          0 :                                 continue;
    4717                 :            : 
    4718                 :          0 :                         pr_cont_work(comma, work);
    4719                 :          0 :                         comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
    4720                 :            :                 }
    4721                 :          0 :                 pr_cont("\n");
    4722                 :            :         }
    4723                 :            : 
    4724         [ #  # ]:          0 :         if (!list_empty(&pwq->delayed_works)) {
    4725                 :          0 :                 bool comma = false;
    4726                 :            : 
    4727                 :          0 :                 pr_info("    delayed:");
    4728         [ #  # ]:          0 :                 list_for_each_entry(work, &pwq->delayed_works, entry) {
    4729                 :          0 :                         pr_cont_work(comma, work);
    4730                 :          0 :                         comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
    4731                 :            :                 }
    4732                 :          0 :                 pr_cont("\n");
    4733                 :            :         }
    4734                 :          0 : }
    4735                 :            : 
    4736                 :            : /**
    4737                 :            :  * show_workqueue_state - dump workqueue state
    4738                 :            :  *
    4739                 :            :  * Called from a sysrq handler or try_to_freeze_tasks() and prints out
    4740                 :            :  * all busy workqueues and pools.
    4741                 :            :  */
    4742                 :          0 : void show_workqueue_state(void)
    4743                 :            : {
    4744                 :          0 :         struct workqueue_struct *wq;
    4745                 :          0 :         struct worker_pool *pool;
    4746                 :          0 :         unsigned long flags;
    4747                 :          0 :         int pi;
    4748                 :            : 
    4749                 :          0 :         rcu_read_lock();
    4750                 :            : 
    4751                 :          0 :         pr_info("Showing busy workqueues and worker pools:\n");
    4752                 :            : 
    4753         [ #  # ]:          0 :         list_for_each_entry_rcu(wq, &workqueues, list) {
    4754                 :          0 :                 struct pool_workqueue *pwq;
    4755                 :          0 :                 bool idle = true;
    4756                 :            : 
    4757         [ #  # ]:          0 :                 for_each_pwq(pwq, wq) {
    4758   [ #  #  #  # ]:          0 :                         if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
    4759                 :          0 :                                 idle = false;
    4760                 :          0 :                                 break;
    4761                 :            :                         }
    4762                 :            :                 }
    4763                 :          0 :                 if (idle)
    4764                 :          0 :                         continue;
    4765                 :            : 
    4766                 :          0 :                 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
    4767                 :            : 
    4768         [ #  # ]:          0 :                 for_each_pwq(pwq, wq) {
    4769                 :          0 :                         spin_lock_irqsave(&pwq->pool->lock, flags);
    4770   [ #  #  #  # ]:          0 :                         if (pwq->nr_active || !list_empty(&pwq->delayed_works))
    4771                 :          0 :                                 show_pwq(pwq);
    4772                 :          0 :                         spin_unlock_irqrestore(&pwq->pool->lock, flags);
    4773                 :            :                         /*
    4774                 :            :                          * We could be printing a lot from atomic context, e.g.
    4775                 :            :                          * sysrq-t -> show_workqueue_state(). Avoid triggering
    4776                 :            :                          * hard lockup.
    4777                 :            :                          */
    4778                 :          0 :                         touch_nmi_watchdog();
    4779                 :            :                 }
    4780                 :            :         }
    4781                 :            : 
    4782         [ #  # ]:          0 :         for_each_pool(pool, pi) {
    4783                 :          0 :                 struct worker *worker;
    4784                 :          0 :                 bool first = true;
    4785                 :            : 
    4786                 :          0 :                 spin_lock_irqsave(&pool->lock, flags);
    4787         [ #  # ]:          0 :                 if (pool->nr_workers == pool->nr_idle)
    4788                 :          0 :                         goto next_pool;
    4789                 :            : 
    4790                 :          0 :                 pr_info("pool %d:", pool->id);
    4791                 :          0 :                 pr_cont_pool_info(pool);
    4792                 :          0 :                 pr_cont(" hung=%us workers=%d",
    4793                 :            :                         jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
    4794                 :            :                         pool->nr_workers);
    4795         [ #  # ]:          0 :                 if (pool->manager)
    4796                 :          0 :                         pr_cont(" manager: %d",
    4797                 :            :                                 task_pid_nr(pool->manager->task));
    4798         [ #  # ]:          0 :                 list_for_each_entry(worker, &pool->idle_list, entry) {
    4799         [ #  # ]:          0 :                         pr_cont(" %s%d", first ? "idle: " : "",
    4800                 :            :                                 task_pid_nr(worker->task));
    4801                 :          0 :                         first = false;
    4802                 :            :                 }
    4803                 :          0 :                 pr_cont("\n");
    4804                 :          0 :         next_pool:
    4805                 :          0 :                 spin_unlock_irqrestore(&pool->lock, flags);
    4806                 :            :                 /*
    4807                 :            :                  * We could be printing a lot from atomic context, e.g.
    4808                 :            :                  * sysrq-t -> show_workqueue_state(). Avoid triggering
    4809                 :            :                  * hard lockup.
    4810                 :            :                  */
    4811                 :            :                 touch_nmi_watchdog();
    4812                 :            :         }
    4813                 :            : 
    4814                 :          0 :         rcu_read_unlock();
    4815                 :          0 : }
    4816                 :            : 
    4817                 :            : /* used to show worker information through /proc/PID/{comm,stat,status} */
    4818                 :        330 : void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
    4819                 :            : {
    4820                 :        330 :         int off;
    4821                 :            : 
    4822                 :            :         /* always show the actual comm */
    4823                 :        330 :         off = strscpy(buf, task->comm, size);
    4824         [ +  - ]:        330 :         if (off < 0)
    4825                 :            :                 return;
    4826                 :            : 
    4827                 :            :         /* stabilize PF_WQ_WORKER and worker pool association */
    4828                 :        330 :         mutex_lock(&wq_pool_attach_mutex);
    4829                 :            : 
    4830         [ +  - ]:        330 :         if (task->flags & PF_WQ_WORKER) {
    4831                 :        330 :                 struct worker *worker = kthread_data(task);
    4832                 :        330 :                 struct worker_pool *pool = worker->pool;
    4833                 :            : 
    4834         [ +  + ]:        330 :                 if (pool) {
    4835                 :        132 :                         spin_lock_irq(&pool->lock);
    4836                 :            :                         /*
    4837                 :            :                          * ->desc tracks information (wq name or
    4838                 :            :                          * set_worker_desc()) for the latest execution.  If
    4839                 :            :                          * current, prepend '+', otherwise '-'.
    4840                 :            :                          */
    4841         [ +  + ]:        132 :                         if (worker->desc[0] != '\0') {
    4842         [ -  + ]:         88 :                                 if (worker->current_work)
    4843                 :          0 :                                         scnprintf(buf + off, size - off, "+%s",
    4844                 :          0 :                                                   worker->desc);
    4845                 :            :                                 else
    4846                 :         88 :                                         scnprintf(buf + off, size - off, "-%s",
    4847                 :         88 :                                                   worker->desc);
    4848                 :            :                         }
    4849                 :        132 :                         spin_unlock_irq(&pool->lock);
    4850                 :            :                 }
    4851                 :            :         }
    4852                 :            : 
    4853                 :        330 :         mutex_unlock(&wq_pool_attach_mutex);
    4854                 :            : }
    4855                 :            : 
    4856                 :            : #ifdef CONFIG_SMP
    4857                 :            : 
    4858                 :            : /*
    4859                 :            :  * CPU hotplug.
    4860                 :            :  *
    4861                 :            :  * There are two challenges in supporting CPU hotplug.  Firstly, there
    4862                 :            :  * are a lot of assumptions on strong associations among work, pwq and
    4863                 :            :  * pool which make migrating pending and scheduled works very
    4864                 :            :  * difficult to implement without impacting hot paths.  Secondly,
    4865                 :            :  * worker pools serve mix of short, long and very long running works making
    4866                 :            :  * blocked draining impractical.
    4867                 :            :  *
    4868                 :            :  * This is solved by allowing the pools to be disassociated from the CPU
    4869                 :            :  * running as an unbound one and allowing it to be reattached later if the
    4870                 :            :  * cpu comes back online.
    4871                 :            :  */
    4872                 :            : 
    4873                 :          0 : static void unbind_workers(int cpu)
    4874                 :            : {
    4875                 :          0 :         struct worker_pool *pool;
    4876                 :          0 :         struct worker *worker;
    4877                 :            : 
    4878         [ #  # ]:          0 :         for_each_cpu_worker_pool(pool, cpu) {
    4879                 :          0 :                 mutex_lock(&wq_pool_attach_mutex);
    4880                 :          0 :                 spin_lock_irq(&pool->lock);
    4881                 :            : 
    4882                 :            :                 /*
    4883                 :            :                  * We've blocked all attach/detach operations. Make all workers
    4884                 :            :                  * unbound and set DISASSOCIATED.  Before this, all workers
    4885                 :            :                  * except for the ones which are still executing works from
    4886                 :            :                  * before the last CPU down must be on the cpu.  After
    4887                 :            :                  * this, they may become diasporas.
    4888                 :            :                  */
    4889         [ #  # ]:          0 :                 for_each_pool_worker(worker, pool)
    4890                 :          0 :                         worker->flags |= WORKER_UNBOUND;
    4891                 :            : 
    4892                 :          0 :                 pool->flags |= POOL_DISASSOCIATED;
    4893                 :            : 
    4894                 :          0 :                 spin_unlock_irq(&pool->lock);
    4895                 :          0 :                 mutex_unlock(&wq_pool_attach_mutex);
    4896                 :            : 
    4897                 :            :                 /*
    4898                 :            :                  * Call schedule() so that we cross rq->lock and thus can
    4899                 :            :                  * guarantee sched callbacks see the %WORKER_UNBOUND flag.
    4900                 :            :                  * This is necessary as scheduler callbacks may be invoked
    4901                 :            :                  * from other cpus.
    4902                 :            :                  */
    4903                 :          0 :                 schedule();
    4904                 :            : 
    4905                 :            :                 /*
    4906                 :            :                  * Sched callbacks are disabled now.  Zap nr_running.
    4907                 :            :                  * After this, nr_running stays zero and need_more_worker()
    4908                 :            :                  * and keep_working() are always true as long as the
    4909                 :            :                  * worklist is not empty.  This pool now behaves as an
    4910                 :            :                  * unbound (in terms of concurrency management) pool which
    4911                 :            :                  * are served by workers tied to the pool.
    4912                 :            :                  */
    4913                 :          0 :                 atomic_set(&pool->nr_running, 0);
    4914                 :            : 
    4915                 :            :                 /*
    4916                 :            :                  * With concurrency management just turned off, a busy
    4917                 :            :                  * worker blocking could lead to lengthy stalls.  Kick off
    4918                 :            :                  * unbound chain execution of currently pending work items.
    4919                 :            :                  */
    4920                 :          0 :                 spin_lock_irq(&pool->lock);
    4921         [ #  # ]:          0 :                 wake_up_worker(pool);
    4922                 :          0 :                 spin_unlock_irq(&pool->lock);
    4923                 :            :         }
    4924                 :          0 : }
    4925                 :            : 
    4926                 :            : /**
    4927                 :            :  * rebind_workers - rebind all workers of a pool to the associated CPU
    4928                 :            :  * @pool: pool of interest
    4929                 :            :  *
    4930                 :            :  * @pool->cpu is coming online.  Rebind all workers to the CPU.
    4931                 :            :  */
    4932                 :          0 : static void rebind_workers(struct worker_pool *pool)
    4933                 :            : {
    4934                 :          0 :         struct worker *worker;
    4935                 :            : 
    4936                 :          0 :         lockdep_assert_held(&wq_pool_attach_mutex);
    4937                 :            : 
    4938                 :            :         /*
    4939                 :            :          * Restore CPU affinity of all workers.  As all idle workers should
    4940                 :            :          * be on the run-queue of the associated CPU before any local
    4941                 :            :          * wake-ups for concurrency management happen, restore CPU affinity
    4942                 :            :          * of all workers first and then clear UNBOUND.  As we're called
    4943                 :            :          * from CPU_ONLINE, the following shouldn't fail.
    4944                 :            :          */
    4945         [ #  # ]:          0 :         for_each_pool_worker(worker, pool)
    4946         [ #  # ]:          0 :                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
    4947                 :            :                                                   pool->attrs->cpumask) < 0);
    4948                 :            : 
    4949                 :          0 :         spin_lock_irq(&pool->lock);
    4950                 :            : 
    4951                 :          0 :         pool->flags &= ~POOL_DISASSOCIATED;
    4952                 :            : 
    4953         [ #  # ]:          0 :         for_each_pool_worker(worker, pool) {
    4954                 :          0 :                 unsigned int worker_flags = worker->flags;
    4955                 :            : 
    4956                 :            :                 /*
    4957                 :            :                  * A bound idle worker should actually be on the runqueue
    4958                 :            :                  * of the associated CPU for local wake-ups targeting it to
    4959                 :            :                  * work.  Kick all idle workers so that they migrate to the
    4960                 :            :                  * associated CPU.  Doing this in the same loop as
    4961                 :            :                  * replacing UNBOUND with REBOUND is safe as no worker will
    4962                 :            :                  * be bound before @pool->lock is released.
    4963                 :            :                  */
    4964         [ #  # ]:          0 :                 if (worker_flags & WORKER_IDLE)
    4965                 :          0 :                         wake_up_process(worker->task);
    4966                 :            : 
    4967                 :            :                 /*
    4968                 :            :                  * We want to clear UNBOUND but can't directly call
    4969                 :            :                  * worker_clr_flags() or adjust nr_running.  Atomically
    4970                 :            :                  * replace UNBOUND with another NOT_RUNNING flag REBOUND.
    4971                 :            :                  * @worker will clear REBOUND using worker_clr_flags() when
    4972                 :            :                  * it initiates the next execution cycle thus restoring
    4973                 :            :                  * concurrency management.  Note that when or whether
    4974                 :            :                  * @worker clears REBOUND doesn't affect correctness.
    4975                 :            :                  *
    4976                 :            :                  * WRITE_ONCE() is necessary because @worker->flags may be
    4977                 :            :                  * tested without holding any lock in
    4978                 :            :                  * wq_worker_running().  Without it, NOT_RUNNING test may
    4979                 :            :                  * fail incorrectly leading to premature concurrency
    4980                 :            :                  * management operations.
    4981                 :            :                  */
    4982         [ #  # ]:          0 :                 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
    4983                 :          0 :                 worker_flags |= WORKER_REBOUND;
    4984                 :          0 :                 worker_flags &= ~WORKER_UNBOUND;
    4985                 :          0 :                 WRITE_ONCE(worker->flags, worker_flags);
    4986                 :            :         }
    4987                 :            : 
    4988                 :          0 :         spin_unlock_irq(&pool->lock);
    4989                 :          0 : }
    4990                 :            : 
    4991                 :            : /**
    4992                 :            :  * restore_unbound_workers_cpumask - restore cpumask of unbound workers
    4993                 :            :  * @pool: unbound pool of interest
    4994                 :            :  * @cpu: the CPU which is coming up
    4995                 :            :  *
    4996                 :            :  * An unbound pool may end up with a cpumask which doesn't have any online
    4997                 :            :  * CPUs.  When a worker of such pool get scheduled, the scheduler resets
    4998                 :            :  * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
    4999                 :            :  * online CPU before, cpus_allowed of all its workers should be restored.
    5000                 :            :  */
    5001                 :          0 : static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
    5002                 :            : {
    5003                 :          0 :         static cpumask_t cpumask;
    5004                 :          0 :         struct worker *worker;
    5005                 :            : 
    5006                 :          0 :         lockdep_assert_held(&wq_pool_attach_mutex);
    5007                 :            : 
    5008                 :            :         /* is @cpu allowed for @pool? */
    5009         [ #  # ]:          0 :         if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
    5010                 :            :                 return;
    5011                 :            : 
    5012                 :          0 :         cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
    5013                 :            : 
    5014                 :            :         /* as we're called from CPU_ONLINE, the following shouldn't fail */
    5015         [ #  # ]:          0 :         for_each_pool_worker(worker, pool)
    5016         [ #  # ]:          0 :                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
    5017                 :            : }
    5018                 :            : 
    5019                 :          0 : int workqueue_prepare_cpu(unsigned int cpu)
    5020                 :            : {
    5021                 :          0 :         struct worker_pool *pool;
    5022                 :            : 
    5023         [ #  # ]:          0 :         for_each_cpu_worker_pool(pool, cpu) {
    5024         [ #  # ]:          0 :                 if (pool->nr_workers)
    5025                 :          0 :                         continue;
    5026         [ #  # ]:          0 :                 if (!create_worker(pool))
    5027                 :            :                         return -ENOMEM;
    5028                 :            :         }
    5029                 :            :         return 0;
    5030                 :            : }
    5031                 :            : 
    5032                 :          0 : int workqueue_online_cpu(unsigned int cpu)
    5033                 :            : {
    5034                 :          0 :         struct worker_pool *pool;
    5035                 :          0 :         struct workqueue_struct *wq;
    5036                 :          0 :         int pi;
    5037                 :            : 
    5038                 :          0 :         mutex_lock(&wq_pool_mutex);
    5039                 :            : 
    5040         [ #  # ]:          0 :         for_each_pool(pool, pi) {
    5041                 :          0 :                 mutex_lock(&wq_pool_attach_mutex);
    5042                 :            : 
    5043         [ #  # ]:          0 :                 if (pool->cpu == cpu)
    5044                 :          0 :                         rebind_workers(pool);
    5045         [ #  # ]:          0 :                 else if (pool->cpu < 0)
    5046                 :          0 :                         restore_unbound_workers_cpumask(pool, cpu);
    5047                 :            : 
    5048                 :          0 :                 mutex_unlock(&wq_pool_attach_mutex);
    5049                 :            :         }
    5050                 :            : 
    5051                 :            :         /* update NUMA affinity of unbound workqueues */
    5052         [ #  # ]:          0 :         list_for_each_entry(wq, &workqueues, list)
    5053                 :          0 :                 wq_update_unbound_numa(wq, cpu, true);
    5054                 :            : 
    5055                 :          0 :         mutex_unlock(&wq_pool_mutex);
    5056                 :          0 :         return 0;
    5057                 :            : }
    5058                 :            : 
    5059                 :          0 : int workqueue_offline_cpu(unsigned int cpu)
    5060                 :            : {
    5061                 :          0 :         struct workqueue_struct *wq;
    5062                 :            : 
    5063                 :            :         /* unbinding per-cpu workers should happen on the local CPU */
    5064   [ #  #  #  # ]:          0 :         if (WARN_ON(cpu != smp_processor_id()))
    5065                 :            :                 return -1;
    5066                 :            : 
    5067                 :          0 :         unbind_workers(cpu);
    5068                 :            : 
    5069                 :            :         /* update NUMA affinity of unbound workqueues */
    5070                 :          0 :         mutex_lock(&wq_pool_mutex);
    5071         [ #  # ]:          0 :         list_for_each_entry(wq, &workqueues, list)
    5072                 :          0 :                 wq_update_unbound_numa(wq, cpu, false);
    5073                 :          0 :         mutex_unlock(&wq_pool_mutex);
    5074                 :            : 
    5075                 :          0 :         return 0;
    5076                 :            : }
    5077                 :            : 
    5078                 :            : struct work_for_cpu {
    5079                 :            :         struct work_struct work;
    5080                 :            :         long (*fn)(void *);
    5081                 :            :         void *arg;
    5082                 :            :         long ret;
    5083                 :            : };
    5084                 :            : 
    5085                 :          0 : static void work_for_cpu_fn(struct work_struct *work)
    5086                 :            : {
    5087                 :          0 :         struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
    5088                 :            : 
    5089                 :          0 :         wfc->ret = wfc->fn(wfc->arg);
    5090                 :          0 : }
    5091                 :            : 
    5092                 :            : /**
    5093                 :            :  * work_on_cpu - run a function in thread context on a particular cpu
    5094                 :            :  * @cpu: the cpu to run on
    5095                 :            :  * @fn: the function to run
    5096                 :            :  * @arg: the function arg
    5097                 :            :  *
    5098                 :            :  * It is up to the caller to ensure that the cpu doesn't go offline.
    5099                 :            :  * The caller must not hold any locks which would prevent @fn from completing.
    5100                 :            :  *
    5101                 :            :  * Return: The value @fn returns.
    5102                 :            :  */
    5103                 :          0 : long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
    5104                 :            : {
    5105                 :          0 :         struct work_for_cpu wfc = { .fn = fn, .arg = arg };
    5106                 :            : 
    5107                 :          0 :         INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
    5108                 :          0 :         schedule_work_on(cpu, &wfc.work);
    5109                 :          0 :         flush_work(&wfc.work);
    5110                 :          0 :         destroy_work_on_stack(&wfc.work);
    5111                 :          0 :         return wfc.ret;
    5112                 :            : }
    5113                 :            : EXPORT_SYMBOL_GPL(work_on_cpu);
    5114                 :            : 
    5115                 :            : /**
    5116                 :            :  * work_on_cpu_safe - run a function in thread context on a particular cpu
    5117                 :            :  * @cpu: the cpu to run on
    5118                 :            :  * @fn:  the function to run
    5119                 :            :  * @arg: the function argument
    5120                 :            :  *
    5121                 :            :  * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
    5122                 :            :  * any locks which would prevent @fn from completing.
    5123                 :            :  *
    5124                 :            :  * Return: The value @fn returns.
    5125                 :            :  */
    5126                 :          0 : long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
    5127                 :            : {
    5128                 :          0 :         long ret = -ENODEV;
    5129                 :            : 
    5130                 :          0 :         get_online_cpus();
    5131         [ #  # ]:          0 :         if (cpu_online(cpu))
    5132                 :          0 :                 ret = work_on_cpu(cpu, fn, arg);
    5133                 :          0 :         put_online_cpus();
    5134                 :          0 :         return ret;
    5135                 :            : }
    5136                 :            : EXPORT_SYMBOL_GPL(work_on_cpu_safe);
    5137                 :            : #endif /* CONFIG_SMP */
    5138                 :            : 
    5139                 :            : #ifdef CONFIG_FREEZER
    5140                 :            : 
    5141                 :            : /**
    5142                 :            :  * freeze_workqueues_begin - begin freezing workqueues
    5143                 :            :  *
    5144                 :            :  * Start freezing workqueues.  After this function returns, all freezable
    5145                 :            :  * workqueues will queue new works to their delayed_works list instead of
    5146                 :            :  * pool->worklist.
    5147                 :            :  *
    5148                 :            :  * CONTEXT:
    5149                 :            :  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
    5150                 :            :  */
    5151                 :          0 : void freeze_workqueues_begin(void)
    5152                 :            : {
    5153                 :          0 :         struct workqueue_struct *wq;
    5154                 :          0 :         struct pool_workqueue *pwq;
    5155                 :            : 
    5156                 :          0 :         mutex_lock(&wq_pool_mutex);
    5157                 :            : 
    5158         [ #  # ]:          0 :         WARN_ON_ONCE(workqueue_freezing);
    5159                 :          0 :         workqueue_freezing = true;
    5160                 :            : 
    5161         [ #  # ]:          0 :         list_for_each_entry(wq, &workqueues, list) {
    5162                 :          0 :                 mutex_lock(&wq->mutex);
    5163         [ #  # ]:          0 :                 for_each_pwq(pwq, wq)
    5164                 :          0 :                         pwq_adjust_max_active(pwq);
    5165                 :          0 :                 mutex_unlock(&wq->mutex);
    5166                 :            :         }
    5167                 :            : 
    5168                 :          0 :         mutex_unlock(&wq_pool_mutex);
    5169                 :          0 : }
    5170                 :            : 
    5171                 :            : /**
    5172                 :            :  * freeze_workqueues_busy - are freezable workqueues still busy?
    5173                 :            :  *
    5174                 :            :  * Check whether freezing is complete.  This function must be called
    5175                 :            :  * between freeze_workqueues_begin() and thaw_workqueues().
    5176                 :            :  *
    5177                 :            :  * CONTEXT:
    5178                 :            :  * Grabs and releases wq_pool_mutex.
    5179                 :            :  *
    5180                 :            :  * Return:
    5181                 :            :  * %true if some freezable workqueues are still busy.  %false if freezing
    5182                 :            :  * is complete.
    5183                 :            :  */
    5184                 :          0 : bool freeze_workqueues_busy(void)
    5185                 :            : {
    5186                 :          0 :         bool busy = false;
    5187                 :          0 :         struct workqueue_struct *wq;
    5188                 :          0 :         struct pool_workqueue *pwq;
    5189                 :            : 
    5190                 :          0 :         mutex_lock(&wq_pool_mutex);
    5191                 :            : 
    5192         [ #  # ]:          0 :         WARN_ON_ONCE(!workqueue_freezing);
    5193                 :            : 
    5194         [ #  # ]:          0 :         list_for_each_entry(wq, &workqueues, list) {
    5195         [ #  # ]:          0 :                 if (!(wq->flags & WQ_FREEZABLE))
    5196                 :          0 :                         continue;
    5197                 :            :                 /*
    5198                 :            :                  * nr_active is monotonically decreasing.  It's safe
    5199                 :            :                  * to peek without lock.
    5200                 :            :                  */
    5201                 :          0 :                 rcu_read_lock();
    5202         [ #  # ]:          0 :                 for_each_pwq(pwq, wq) {
    5203         [ #  # ]:          0 :                         WARN_ON_ONCE(pwq->nr_active < 0);
    5204         [ #  # ]:          0 :                         if (pwq->nr_active) {
    5205                 :          0 :                                 busy = true;
    5206                 :          0 :                                 rcu_read_unlock();
    5207                 :          0 :                                 goto out_unlock;
    5208                 :            :                         }
    5209                 :            :                 }
    5210                 :          0 :                 rcu_read_unlock();
    5211                 :            :         }
    5212                 :          0 : out_unlock:
    5213                 :          0 :         mutex_unlock(&wq_pool_mutex);
    5214                 :          0 :         return busy;
    5215                 :            : }
    5216                 :            : 
    5217                 :            : /**
    5218                 :            :  * thaw_workqueues - thaw workqueues
    5219                 :            :  *
    5220                 :            :  * Thaw workqueues.  Normal queueing is restored and all collected
    5221                 :            :  * frozen works are transferred to their respective pool worklists.
    5222                 :            :  *
    5223                 :            :  * CONTEXT:
    5224                 :            :  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
    5225                 :            :  */
    5226                 :          0 : void thaw_workqueues(void)
    5227                 :            : {
    5228                 :          0 :         struct workqueue_struct *wq;
    5229                 :          0 :         struct pool_workqueue *pwq;
    5230                 :            : 
    5231                 :          0 :         mutex_lock(&wq_pool_mutex);
    5232                 :            : 
    5233         [ #  # ]:          0 :         if (!workqueue_freezing)
    5234                 :          0 :                 goto out_unlock;
    5235                 :            : 
    5236                 :          0 :         workqueue_freezing = false;
    5237                 :            : 
    5238                 :            :         /* restore max_active and repopulate worklist */
    5239         [ #  # ]:          0 :         list_for_each_entry(wq, &workqueues, list) {
    5240                 :          0 :                 mutex_lock(&wq->mutex);
    5241         [ #  # ]:          0 :                 for_each_pwq(pwq, wq)
    5242                 :          0 :                         pwq_adjust_max_active(pwq);
    5243                 :          0 :                 mutex_unlock(&wq->mutex);
    5244                 :            :         }
    5245                 :            : 
    5246                 :          0 : out_unlock:
    5247                 :          0 :         mutex_unlock(&wq_pool_mutex);
    5248                 :          0 : }
    5249                 :            : #endif /* CONFIG_FREEZER */
    5250                 :            : 
    5251                 :          0 : static int workqueue_apply_unbound_cpumask(void)
    5252                 :            : {
    5253                 :          0 :         LIST_HEAD(ctxs);
    5254                 :          0 :         int ret = 0;
    5255                 :          0 :         struct workqueue_struct *wq;
    5256                 :          0 :         struct apply_wqattrs_ctx *ctx, *n;
    5257                 :            : 
    5258                 :          0 :         lockdep_assert_held(&wq_pool_mutex);
    5259                 :            : 
    5260         [ #  # ]:          0 :         list_for_each_entry(wq, &workqueues, list) {
    5261         [ #  # ]:          0 :                 if (!(wq->flags & WQ_UNBOUND))
    5262                 :          0 :                         continue;
    5263                 :            :                 /* creating multiple pwqs breaks ordering guarantee */
    5264         [ #  # ]:          0 :                 if (wq->flags & __WQ_ORDERED)
    5265                 :          0 :                         continue;
    5266                 :            : 
    5267                 :          0 :                 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
    5268         [ #  # ]:          0 :                 if (!ctx) {
    5269                 :            :                         ret = -ENOMEM;
    5270                 :            :                         break;
    5271                 :            :                 }
    5272                 :            : 
    5273                 :          0 :                 list_add_tail(&ctx->list, &ctxs);
    5274                 :            :         }
    5275                 :            : 
    5276         [ #  # ]:          0 :         list_for_each_entry_safe(ctx, n, &ctxs, list) {
    5277         [ #  # ]:          0 :                 if (!ret)
    5278                 :          0 :                         apply_wqattrs_commit(ctx);
    5279                 :          0 :                 apply_wqattrs_cleanup(ctx);
    5280                 :            :         }
    5281                 :            : 
    5282                 :          0 :         return ret;
    5283                 :            : }
    5284                 :            : 
    5285                 :            : /**
    5286                 :            :  *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
    5287                 :            :  *  @cpumask: the cpumask to set
    5288                 :            :  *
    5289                 :            :  *  The low-level workqueues cpumask is a global cpumask that limits
    5290                 :            :  *  the affinity of all unbound workqueues.  This function check the @cpumask
    5291                 :            :  *  and apply it to all unbound workqueues and updates all pwqs of them.
    5292                 :            :  *
    5293                 :            :  *  Retun:      0       - Success
    5294                 :            :  *              -EINVAL - Invalid @cpumask
    5295                 :            :  *              -ENOMEM - Failed to allocate memory for attrs or pwqs.
    5296                 :            :  */
    5297                 :          0 : int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
    5298                 :            : {
    5299                 :          0 :         int ret = -EINVAL;
    5300                 :          0 :         cpumask_var_t saved_cpumask;
    5301                 :            : 
    5302         [ #  # ]:          0 :         if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
    5303                 :            :                 return -ENOMEM;
    5304                 :            : 
    5305                 :            :         /*
    5306                 :            :          * Not excluding isolated cpus on purpose.
    5307                 :            :          * If the user wishes to include them, we allow that.
    5308                 :            :          */
    5309         [ #  # ]:          0 :         cpumask_and(cpumask, cpumask, cpu_possible_mask);
    5310         [ #  # ]:          0 :         if (!cpumask_empty(cpumask)) {
    5311                 :          0 :                 apply_wqattrs_lock();
    5312                 :            : 
    5313                 :            :                 /* save the old wq_unbound_cpumask. */
    5314                 :          0 :                 cpumask_copy(saved_cpumask, wq_unbound_cpumask);
    5315                 :            : 
    5316                 :            :                 /* update wq_unbound_cpumask at first and apply it to wqs. */
    5317                 :          0 :                 cpumask_copy(wq_unbound_cpumask, cpumask);
    5318                 :          0 :                 ret = workqueue_apply_unbound_cpumask();
    5319                 :            : 
    5320                 :            :                 /* restore the wq_unbound_cpumask when failed. */
    5321         [ #  # ]:          0 :                 if (ret < 0)
    5322                 :          0 :                         cpumask_copy(wq_unbound_cpumask, saved_cpumask);
    5323                 :            : 
    5324                 :          0 :                 apply_wqattrs_unlock();
    5325                 :            :         }
    5326                 :            : 
    5327                 :          0 :         free_cpumask_var(saved_cpumask);
    5328                 :          0 :         return ret;
    5329                 :            : }
    5330                 :            : 
    5331                 :            : #ifdef CONFIG_SYSFS
    5332                 :            : /*
    5333                 :            :  * Workqueues with WQ_SYSFS flag set is visible to userland via
    5334                 :            :  * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
    5335                 :            :  * following attributes.
    5336                 :            :  *
    5337                 :            :  *  per_cpu     RO bool : whether the workqueue is per-cpu or unbound
    5338                 :            :  *  max_active  RW int  : maximum number of in-flight work items
    5339                 :            :  *
    5340                 :            :  * Unbound workqueues have the following extra attributes.
    5341                 :            :  *
    5342                 :            :  *  pool_ids    RO int  : the associated pool IDs for each node
    5343                 :            :  *  nice        RW int  : nice value of the workers
    5344                 :            :  *  cpumask     RW mask : bitmask of allowed CPUs for the workers
    5345                 :            :  *  numa        RW bool : whether enable NUMA affinity
    5346                 :            :  */
    5347                 :            : struct wq_device {
    5348                 :            :         struct workqueue_struct         *wq;
    5349                 :            :         struct device                   dev;
    5350                 :            : };
    5351                 :            : 
    5352                 :          0 : static struct workqueue_struct *dev_to_wq(struct device *dev)
    5353                 :            : {
    5354                 :          0 :         struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
    5355                 :            : 
    5356                 :          0 :         return wq_dev->wq;
    5357                 :            : }
    5358                 :            : 
    5359                 :          0 : static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
    5360                 :            :                             char *buf)
    5361                 :            : {
    5362                 :          0 :         struct workqueue_struct *wq = dev_to_wq(dev);
    5363                 :            : 
    5364                 :          0 :         return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
    5365                 :            : }
    5366                 :            : static DEVICE_ATTR_RO(per_cpu);
    5367                 :            : 
    5368                 :          0 : static ssize_t max_active_show(struct device *dev,
    5369                 :            :                                struct device_attribute *attr, char *buf)
    5370                 :            : {
    5371                 :          0 :         struct workqueue_struct *wq = dev_to_wq(dev);
    5372                 :            : 
    5373                 :          0 :         return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
    5374                 :            : }
    5375                 :            : 
    5376                 :          0 : static ssize_t max_active_store(struct device *dev,
    5377                 :            :                                 struct device_attribute *attr, const char *buf,
    5378                 :            :                                 size_t count)
    5379                 :            : {
    5380                 :          0 :         struct workqueue_struct *wq = dev_to_wq(dev);
    5381                 :          0 :         int val;
    5382                 :            : 
    5383   [ #  #  #  # ]:          0 :         if (sscanf(buf, "%d", &val) != 1 || val <= 0)
    5384                 :            :                 return -EINVAL;
    5385                 :            : 
    5386                 :          0 :         workqueue_set_max_active(wq, val);
    5387                 :          0 :         return count;
    5388                 :            : }
    5389                 :            : static DEVICE_ATTR_RW(max_active);
    5390                 :            : 
    5391                 :            : static struct attribute *wq_sysfs_attrs[] = {
    5392                 :            :         &dev_attr_per_cpu.attr,
    5393                 :            :         &dev_attr_max_active.attr,
    5394                 :            :         NULL,
    5395                 :            : };
    5396                 :            : ATTRIBUTE_GROUPS(wq_sysfs);
    5397                 :            : 
    5398                 :          0 : static ssize_t wq_pool_ids_show(struct device *dev,
    5399                 :            :                                 struct device_attribute *attr, char *buf)
    5400                 :            : {
    5401                 :          0 :         struct workqueue_struct *wq = dev_to_wq(dev);
    5402                 :          0 :         const char *delim = "";
    5403                 :          0 :         int node, written = 0;
    5404                 :            : 
    5405                 :          0 :         get_online_cpus();
    5406                 :          0 :         rcu_read_lock();
    5407         [ #  # ]:          0 :         for_each_node(node) {
    5408         [ #  # ]:          0 :                 written += scnprintf(buf + written, PAGE_SIZE - written,
    5409                 :            :                                      "%s%d:%d", delim, node,
    5410                 :          0 :                                      unbound_pwq_by_node(wq, node)->pool->id);
    5411                 :          0 :                 delim = " ";
    5412                 :            :         }
    5413                 :          0 :         written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
    5414                 :          0 :         rcu_read_unlock();
    5415                 :          0 :         put_online_cpus();
    5416                 :            : 
    5417                 :          0 :         return written;
    5418                 :            : }
    5419                 :            : 
    5420                 :          0 : static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
    5421                 :            :                             char *buf)
    5422                 :            : {
    5423                 :          0 :         struct workqueue_struct *wq = dev_to_wq(dev);
    5424                 :          0 :         int written;
    5425                 :            : 
    5426                 :          0 :         mutex_lock(&wq->mutex);
    5427                 :          0 :         written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
    5428                 :          0 :         mutex_unlock(&wq->mutex);
    5429                 :            : 
    5430                 :          0 :         return written;
    5431                 :            : }
    5432                 :            : 
    5433                 :            : /* prepare workqueue_attrs for sysfs store operations */
    5434                 :          0 : static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
    5435                 :            : {
    5436                 :          0 :         struct workqueue_attrs *attrs;
    5437                 :            : 
    5438                 :          0 :         lockdep_assert_held(&wq_pool_mutex);
    5439                 :            : 
    5440                 :          0 :         attrs = alloc_workqueue_attrs();
    5441         [ #  # ]:          0 :         if (!attrs)
    5442                 :            :                 return NULL;
    5443                 :            : 
    5444                 :          0 :         copy_workqueue_attrs(attrs, wq->unbound_attrs);
    5445                 :          0 :         return attrs;
    5446                 :            : }
    5447                 :            : 
    5448                 :          0 : static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
    5449                 :            :                              const char *buf, size_t count)
    5450                 :            : {
    5451                 :          0 :         struct workqueue_struct *wq = dev_to_wq(dev);
    5452                 :          0 :         struct workqueue_attrs *attrs;
    5453                 :          0 :         int ret = -ENOMEM;
    5454                 :            : 
    5455                 :          0 :         apply_wqattrs_lock();
    5456                 :            : 
    5457                 :          0 :         attrs = wq_sysfs_prep_attrs(wq);
    5458         [ #  # ]:          0 :         if (!attrs)
    5459                 :          0 :                 goto out_unlock;
    5460                 :            : 
    5461         [ #  # ]:          0 :         if (sscanf(buf, "%d", &attrs->nice) == 1 &&
    5462   [ #  #  #  # ]:          0 :             attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
    5463                 :          0 :                 ret = apply_workqueue_attrs_locked(wq, attrs);
    5464                 :            :         else
    5465                 :            :                 ret = -EINVAL;
    5466                 :            : 
    5467                 :          0 : out_unlock:
    5468                 :          0 :         apply_wqattrs_unlock();
    5469         [ #  # ]:          0 :         free_workqueue_attrs(attrs);
    5470         [ #  # ]:          0 :         return ret ?: count;
    5471                 :            : }
    5472                 :            : 
    5473                 :          0 : static ssize_t wq_cpumask_show(struct device *dev,
    5474                 :            :                                struct device_attribute *attr, char *buf)
    5475                 :            : {
    5476                 :          0 :         struct workqueue_struct *wq = dev_to_wq(dev);
    5477                 :          0 :         int written;
    5478                 :            : 
    5479                 :          0 :         mutex_lock(&wq->mutex);
    5480                 :          0 :         written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
    5481                 :          0 :                             cpumask_pr_args(wq->unbound_attrs->cpumask));
    5482                 :          0 :         mutex_unlock(&wq->mutex);
    5483                 :          0 :         return written;
    5484                 :            : }
    5485                 :            : 
    5486                 :          0 : static ssize_t wq_cpumask_store(struct device *dev,
    5487                 :            :                                 struct device_attribute *attr,
    5488                 :            :                                 const char *buf, size_t count)
    5489                 :            : {
    5490                 :          0 :         struct workqueue_struct *wq = dev_to_wq(dev);
    5491                 :          0 :         struct workqueue_attrs *attrs;
    5492                 :          0 :         int ret = -ENOMEM;
    5493                 :            : 
    5494                 :          0 :         apply_wqattrs_lock();
    5495                 :            : 
    5496                 :          0 :         attrs = wq_sysfs_prep_attrs(wq);
    5497         [ #  # ]:          0 :         if (!attrs)
    5498                 :          0 :                 goto out_unlock;
    5499                 :            : 
    5500                 :          0 :         ret = cpumask_parse(buf, attrs->cpumask);
    5501         [ #  # ]:          0 :         if (!ret)
    5502                 :          0 :                 ret = apply_workqueue_attrs_locked(wq, attrs);
    5503                 :            : 
    5504                 :          0 : out_unlock:
    5505                 :          0 :         apply_wqattrs_unlock();
    5506         [ #  # ]:          0 :         free_workqueue_attrs(attrs);
    5507         [ #  # ]:          0 :         return ret ?: count;
    5508                 :            : }
    5509                 :            : 
    5510                 :          0 : static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
    5511                 :            :                             char *buf)
    5512                 :            : {
    5513                 :          0 :         struct workqueue_struct *wq = dev_to_wq(dev);
    5514                 :          0 :         int written;
    5515                 :            : 
    5516                 :          0 :         mutex_lock(&wq->mutex);
    5517                 :          0 :         written = scnprintf(buf, PAGE_SIZE, "%d\n",
    5518                 :          0 :                             !wq->unbound_attrs->no_numa);
    5519                 :          0 :         mutex_unlock(&wq->mutex);
    5520                 :            : 
    5521                 :          0 :         return written;
    5522                 :            : }
    5523                 :            : 
    5524                 :          0 : static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
    5525                 :            :                              const char *buf, size_t count)
    5526                 :            : {
    5527                 :          0 :         struct workqueue_struct *wq = dev_to_wq(dev);
    5528                 :          0 :         struct workqueue_attrs *attrs;
    5529                 :          0 :         int v, ret = -ENOMEM;
    5530                 :            : 
    5531                 :          0 :         apply_wqattrs_lock();
    5532                 :            : 
    5533                 :          0 :         attrs = wq_sysfs_prep_attrs(wq);
    5534         [ #  # ]:          0 :         if (!attrs)
    5535                 :          0 :                 goto out_unlock;
    5536                 :            : 
    5537                 :          0 :         ret = -EINVAL;
    5538         [ #  # ]:          0 :         if (sscanf(buf, "%d", &v) == 1) {
    5539                 :          0 :                 attrs->no_numa = !v;
    5540                 :          0 :                 ret = apply_workqueue_attrs_locked(wq, attrs);
    5541                 :            :         }
    5542                 :            : 
    5543                 :          0 : out_unlock:
    5544                 :          0 :         apply_wqattrs_unlock();
    5545         [ #  # ]:          0 :         free_workqueue_attrs(attrs);
    5546         [ #  # ]:          0 :         return ret ?: count;
    5547                 :            : }
    5548                 :            : 
    5549                 :            : static struct device_attribute wq_sysfs_unbound_attrs[] = {
    5550                 :            :         __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
    5551                 :            :         __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
    5552                 :            :         __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
    5553                 :            :         __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
    5554                 :            :         __ATTR_NULL,
    5555                 :            : };
    5556                 :            : 
    5557                 :            : static struct bus_type wq_subsys = {
    5558                 :            :         .name                           = "workqueue",
    5559                 :            :         .dev_groups                     = wq_sysfs_groups,
    5560                 :            : };
    5561                 :            : 
    5562                 :          0 : static ssize_t wq_unbound_cpumask_show(struct device *dev,
    5563                 :            :                 struct device_attribute *attr, char *buf)
    5564                 :            : {
    5565                 :          0 :         int written;
    5566                 :            : 
    5567                 :          0 :         mutex_lock(&wq_pool_mutex);
    5568                 :          0 :         written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
    5569                 :            :                             cpumask_pr_args(wq_unbound_cpumask));
    5570                 :          0 :         mutex_unlock(&wq_pool_mutex);
    5571                 :            : 
    5572                 :          0 :         return written;
    5573                 :            : }
    5574                 :            : 
    5575                 :          0 : static ssize_t wq_unbound_cpumask_store(struct device *dev,
    5576                 :            :                 struct device_attribute *attr, const char *buf, size_t count)
    5577                 :            : {
    5578                 :          0 :         cpumask_var_t cpumask;
    5579                 :          0 :         int ret;
    5580                 :            : 
    5581                 :          0 :         if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
    5582                 :            :                 return -ENOMEM;
    5583                 :            : 
    5584                 :          0 :         ret = cpumask_parse(buf, cpumask);
    5585         [ #  # ]:          0 :         if (!ret)
    5586                 :          0 :                 ret = workqueue_set_unbound_cpumask(cpumask);
    5587                 :            : 
    5588         [ #  # ]:          0 :         free_cpumask_var(cpumask);
    5589         [ #  # ]:          0 :         return ret ? ret : count;
    5590                 :            : }
    5591                 :            : 
    5592                 :            : static struct device_attribute wq_sysfs_cpumask_attr =
    5593                 :            :         __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
    5594                 :            :                wq_unbound_cpumask_store);
    5595                 :            : 
    5596                 :         11 : static int __init wq_sysfs_init(void)
    5597                 :            : {
    5598                 :         11 :         int err;
    5599                 :            : 
    5600                 :         11 :         err = subsys_virtual_register(&wq_subsys, NULL);
    5601         [ +  - ]:         11 :         if (err)
    5602                 :            :                 return err;
    5603                 :            : 
    5604                 :         11 :         return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
    5605                 :            : }
    5606                 :            : core_initcall(wq_sysfs_init);
    5607                 :            : 
    5608                 :          0 : static void wq_device_release(struct device *dev)
    5609                 :            : {
    5610                 :          0 :         struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
    5611                 :            : 
    5612                 :          0 :         kfree(wq_dev);
    5613                 :          0 : }
    5614                 :            : 
    5615                 :            : /**
    5616                 :            :  * workqueue_sysfs_register - make a workqueue visible in sysfs
    5617                 :            :  * @wq: the workqueue to register
    5618                 :            :  *
    5619                 :            :  * Expose @wq in sysfs under /sys/bus/workqueue/devices.
    5620                 :            :  * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
    5621                 :            :  * which is the preferred method.
    5622                 :            :  *
    5623                 :            :  * Workqueue user should use this function directly iff it wants to apply
    5624                 :            :  * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
    5625                 :            :  * apply_workqueue_attrs() may race against userland updating the
    5626                 :            :  * attributes.
    5627                 :            :  *
    5628                 :            :  * Return: 0 on success, -errno on failure.
    5629                 :            :  */
    5630                 :         11 : int workqueue_sysfs_register(struct workqueue_struct *wq)
    5631                 :            : {
    5632                 :         11 :         struct wq_device *wq_dev;
    5633                 :         11 :         int ret;
    5634                 :            : 
    5635                 :            :         /*
    5636                 :            :          * Adjusting max_active or creating new pwqs by applying
    5637                 :            :          * attributes breaks ordering guarantee.  Disallow exposing ordered
    5638                 :            :          * workqueues.
    5639                 :            :          */
    5640   [ -  +  +  - ]:         11 :         if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
    5641                 :            :                 return -EINVAL;
    5642                 :            : 
    5643                 :         11 :         wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
    5644         [ +  - ]:         11 :         if (!wq_dev)
    5645                 :            :                 return -ENOMEM;
    5646                 :            : 
    5647                 :         11 :         wq_dev->wq = wq;
    5648                 :         11 :         wq_dev->dev.bus = &wq_subsys;
    5649                 :         11 :         wq_dev->dev.release = wq_device_release;
    5650                 :         11 :         dev_set_name(&wq_dev->dev, "%s", wq->name);
    5651                 :            : 
    5652                 :            :         /*
    5653                 :            :          * unbound_attrs are created separately.  Suppress uevent until
    5654                 :            :          * everything is ready.
    5655                 :            :          */
    5656                 :         11 :         dev_set_uevent_suppress(&wq_dev->dev, true);
    5657                 :            : 
    5658                 :         11 :         ret = device_register(&wq_dev->dev);
    5659         [ -  + ]:         11 :         if (ret) {
    5660                 :          0 :                 put_device(&wq_dev->dev);
    5661                 :          0 :                 wq->wq_dev = NULL;
    5662                 :          0 :                 return ret;
    5663                 :            :         }
    5664                 :            : 
    5665         [ +  - ]:         11 :         if (wq->flags & WQ_UNBOUND) {
    5666                 :            :                 struct device_attribute *attr;
    5667                 :            : 
    5668         [ +  + ]:         55 :                 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
    5669                 :         44 :                         ret = device_create_file(&wq_dev->dev, attr);
    5670         [ -  + ]:         44 :                         if (ret) {
    5671                 :          0 :                                 device_unregister(&wq_dev->dev);
    5672                 :          0 :                                 wq->wq_dev = NULL;
    5673                 :          0 :                                 return ret;
    5674                 :            :                         }
    5675                 :            :                 }
    5676                 :            :         }
    5677                 :            : 
    5678                 :         11 :         dev_set_uevent_suppress(&wq_dev->dev, false);
    5679                 :         11 :         kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
    5680                 :         11 :         return 0;
    5681                 :            : }
    5682                 :            : 
    5683                 :            : /**
    5684                 :            :  * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
    5685                 :            :  * @wq: the workqueue to unregister
    5686                 :            :  *
    5687                 :            :  * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
    5688                 :            :  */
    5689                 :         11 : static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
    5690                 :            : {
    5691                 :         11 :         struct wq_device *wq_dev = wq->wq_dev;
    5692                 :            : 
    5693         [ -  + ]:         11 :         if (!wq->wq_dev)
    5694                 :            :                 return;
    5695                 :            : 
    5696                 :          0 :         wq->wq_dev = NULL;
    5697                 :          0 :         device_unregister(&wq_dev->dev);
    5698                 :            : }
    5699                 :            : #else   /* CONFIG_SYSFS */
    5700                 :            : static void workqueue_sysfs_unregister(struct workqueue_struct *wq)     { }
    5701                 :            : #endif  /* CONFIG_SYSFS */
    5702                 :            : 
    5703                 :            : /*
    5704                 :            :  * Workqueue watchdog.
    5705                 :            :  *
    5706                 :            :  * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
    5707                 :            :  * flush dependency, a concurrency managed work item which stays RUNNING
    5708                 :            :  * indefinitely.  Workqueue stalls can be very difficult to debug as the
    5709                 :            :  * usual warning mechanisms don't trigger and internal workqueue state is
    5710                 :            :  * largely opaque.
    5711                 :            :  *
    5712                 :            :  * Workqueue watchdog monitors all worker pools periodically and dumps
    5713                 :            :  * state if some pools failed to make forward progress for a while where
    5714                 :            :  * forward progress is defined as the first item on ->worklist changing.
    5715                 :            :  *
    5716                 :            :  * This mechanism is controlled through the kernel parameter
    5717                 :            :  * "workqueue.watchdog_thresh" which can be updated at runtime through the
    5718                 :            :  * corresponding sysfs parameter file.
    5719                 :            :  */
    5720                 :            : #ifdef CONFIG_WQ_WATCHDOG
    5721                 :            : 
    5722                 :            : static unsigned long wq_watchdog_thresh = 30;
    5723                 :            : static struct timer_list wq_watchdog_timer;
    5724                 :            : 
    5725                 :            : static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
    5726                 :            : static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
    5727                 :            : 
    5728                 :            : static void wq_watchdog_reset_touched(void)
    5729                 :            : {
    5730                 :            :         int cpu;
    5731                 :            : 
    5732                 :            :         wq_watchdog_touched = jiffies;
    5733                 :            :         for_each_possible_cpu(cpu)
    5734                 :            :                 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
    5735                 :            : }
    5736                 :            : 
    5737                 :            : static void wq_watchdog_timer_fn(struct timer_list *unused)
    5738                 :            : {
    5739                 :            :         unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
    5740                 :            :         bool lockup_detected = false;
    5741                 :            :         struct worker_pool *pool;
    5742                 :            :         int pi;
    5743                 :            : 
    5744                 :            :         if (!thresh)
    5745                 :            :                 return;
    5746                 :            : 
    5747                 :            :         rcu_read_lock();
    5748                 :            : 
    5749                 :            :         for_each_pool(pool, pi) {
    5750                 :            :                 unsigned long pool_ts, touched, ts;
    5751                 :            : 
    5752                 :            :                 if (list_empty(&pool->worklist))
    5753                 :            :                         continue;
    5754                 :            : 
    5755                 :            :                 /* get the latest of pool and touched timestamps */
    5756                 :            :                 pool_ts = READ_ONCE(pool->watchdog_ts);
    5757                 :            :                 touched = READ_ONCE(wq_watchdog_touched);
    5758                 :            : 
    5759                 :            :                 if (time_after(pool_ts, touched))
    5760                 :            :                         ts = pool_ts;
    5761                 :            :                 else
    5762                 :            :                         ts = touched;
    5763                 :            : 
    5764                 :            :                 if (pool->cpu >= 0) {
    5765                 :            :                         unsigned long cpu_touched =
    5766                 :            :                                 READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
    5767                 :            :                                                   pool->cpu));
    5768                 :            :                         if (time_after(cpu_touched, ts))
    5769                 :            :                                 ts = cpu_touched;
    5770                 :            :                 }
    5771                 :            : 
    5772                 :            :                 /* did we stall? */
    5773                 :            :                 if (time_after(jiffies, ts + thresh)) {
    5774                 :            :                         lockup_detected = true;
    5775                 :            :                         pr_emerg("BUG: workqueue lockup - pool");
    5776                 :            :                         pr_cont_pool_info(pool);
    5777                 :            :                         pr_cont(" stuck for %us!\n",
    5778                 :            :                                 jiffies_to_msecs(jiffies - pool_ts) / 1000);
    5779                 :            :                 }
    5780                 :            :         }
    5781                 :            : 
    5782                 :            :         rcu_read_unlock();
    5783                 :            : 
    5784                 :            :         if (lockup_detected)
    5785                 :            :                 show_workqueue_state();
    5786                 :            : 
    5787                 :            :         wq_watchdog_reset_touched();
    5788                 :            :         mod_timer(&wq_watchdog_timer, jiffies + thresh);
    5789                 :            : }
    5790                 :            : 
    5791                 :            : notrace void wq_watchdog_touch(int cpu)
    5792                 :            : {
    5793                 :            :         if (cpu >= 0)
    5794                 :            :                 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
    5795                 :            :         else
    5796                 :            :                 wq_watchdog_touched = jiffies;
    5797                 :            : }
    5798                 :            : 
    5799                 :            : static void wq_watchdog_set_thresh(unsigned long thresh)
    5800                 :            : {
    5801                 :            :         wq_watchdog_thresh = 0;
    5802                 :            :         del_timer_sync(&wq_watchdog_timer);
    5803                 :            : 
    5804                 :            :         if (thresh) {
    5805                 :            :                 wq_watchdog_thresh = thresh;
    5806                 :            :                 wq_watchdog_reset_touched();
    5807                 :            :                 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
    5808                 :            :         }
    5809                 :            : }
    5810                 :            : 
    5811                 :            : static int wq_watchdog_param_set_thresh(const char *val,
    5812                 :            :                                         const struct kernel_param *kp)
    5813                 :            : {
    5814                 :            :         unsigned long thresh;
    5815                 :            :         int ret;
    5816                 :            : 
    5817                 :            :         ret = kstrtoul(val, 0, &thresh);
    5818                 :            :         if (ret)
    5819                 :            :                 return ret;
    5820                 :            : 
    5821                 :            :         if (system_wq)
    5822                 :            :                 wq_watchdog_set_thresh(thresh);
    5823                 :            :         else
    5824                 :            :                 wq_watchdog_thresh = thresh;
    5825                 :            : 
    5826                 :            :         return 0;
    5827                 :            : }
    5828                 :            : 
    5829                 :            : static const struct kernel_param_ops wq_watchdog_thresh_ops = {
    5830                 :            :         .set    = wq_watchdog_param_set_thresh,
    5831                 :            :         .get    = param_get_ulong,
    5832                 :            : };
    5833                 :            : 
    5834                 :            : module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
    5835                 :            :                 0644);
    5836                 :            : 
    5837                 :            : static void wq_watchdog_init(void)
    5838                 :            : {
    5839                 :            :         timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
    5840                 :            :         wq_watchdog_set_thresh(wq_watchdog_thresh);
    5841                 :            : }
    5842                 :            : 
    5843                 :            : #else   /* CONFIG_WQ_WATCHDOG */
    5844                 :            : 
    5845                 :         11 : static inline void wq_watchdog_init(void) { }
    5846                 :            : 
    5847                 :            : #endif  /* CONFIG_WQ_WATCHDOG */
    5848                 :            : 
    5849                 :         11 : static void __init wq_numa_init(void)
    5850                 :            : {
    5851                 :         11 :         cpumask_var_t *tbl;
    5852                 :         11 :         int node, cpu;
    5853                 :            : 
    5854         [ -  + ]:         11 :         if (num_possible_nodes() <= 1)
    5855                 :            :                 return;
    5856                 :            : 
    5857         [ #  # ]:          0 :         if (wq_disable_numa) {
    5858                 :          0 :                 pr_info("workqueue: NUMA affinity support disabled\n");
    5859                 :          0 :                 return;
    5860                 :            :         }
    5861                 :            : 
    5862                 :          0 :         wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
    5863         [ #  # ]:          0 :         BUG_ON(!wq_update_unbound_numa_attrs_buf);
    5864                 :            : 
    5865                 :            :         /*
    5866                 :            :          * We want masks of possible CPUs of each node which isn't readily
    5867                 :            :          * available.  Build one from cpu_to_node() which should have been
    5868                 :            :          * fully initialized by now.
    5869                 :            :          */
    5870                 :          0 :         tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
    5871         [ #  # ]:          0 :         BUG_ON(!tbl);
    5872                 :            : 
    5873         [ #  # ]:          0 :         for_each_node(node)
    5874                 :          0 :                 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
    5875                 :            :                                 node_online(node) ? node : NUMA_NO_NODE));
    5876                 :            : 
    5877         [ #  # ]:          0 :         for_each_possible_cpu(cpu) {
    5878         [ #  # ]:          0 :                 node = cpu_to_node(cpu);
    5879   [ #  #  #  # ]:          0 :                 if (WARN_ON(node == NUMA_NO_NODE)) {
    5880                 :          0 :                         pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
    5881                 :            :                         /* happens iff arch is bonkers, let's just proceed */
    5882                 :          0 :                         return;
    5883                 :            :                 }
    5884                 :          0 :                 cpumask_set_cpu(cpu, tbl[node]);
    5885                 :            :         }
    5886                 :            : 
    5887                 :          0 :         wq_numa_possible_cpumask = tbl;
    5888                 :          0 :         wq_numa_enabled = true;
    5889                 :            : }
    5890                 :            : 
    5891                 :            : /**
    5892                 :            :  * workqueue_init_early - early init for workqueue subsystem
    5893                 :            :  *
    5894                 :            :  * This is the first half of two-staged workqueue subsystem initialization
    5895                 :            :  * and invoked as soon as the bare basics - memory allocation, cpumasks and
    5896                 :            :  * idr are up.  It sets up all the data structures and system workqueues
    5897                 :            :  * and allows early boot code to create workqueues and queue/cancel work
    5898                 :            :  * items.  Actual work item execution starts only after kthreads can be
    5899                 :            :  * created and scheduled right before early initcalls.
    5900                 :            :  */
    5901                 :         11 : int __init workqueue_init_early(void)
    5902                 :            : {
    5903                 :         11 :         int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
    5904                 :         11 :         int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
    5905                 :         11 :         int i, cpu;
    5906                 :            : 
    5907                 :         11 :         WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
    5908                 :            : 
    5909                 :         11 :         BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
    5910                 :         11 :         cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
    5911                 :            : 
    5912                 :         11 :         pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
    5913                 :            : 
    5914                 :            :         /* initialize CPU pools */
    5915         [ +  + ]:         22 :         for_each_possible_cpu(cpu) {
    5916                 :         11 :                 struct worker_pool *pool;
    5917                 :            : 
    5918                 :         11 :                 i = 0;
    5919         [ +  + ]:         33 :                 for_each_cpu_worker_pool(pool, cpu) {
    5920         [ -  + ]:         22 :                         BUG_ON(init_worker_pool(pool));
    5921                 :         22 :                         pool->cpu = cpu;
    5922                 :         22 :                         cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
    5923                 :         22 :                         pool->attrs->nice = std_nice[i++];
    5924                 :         22 :                         pool->node = cpu_to_node(cpu);
    5925                 :            : 
    5926                 :            :                         /* alloc pool ID */
    5927                 :         22 :                         mutex_lock(&wq_pool_mutex);
    5928         [ -  + ]:         44 :                         BUG_ON(worker_pool_assign_id(pool));
    5929                 :         22 :                         mutex_unlock(&wq_pool_mutex);
    5930                 :            :                 }
    5931                 :            :         }
    5932                 :            : 
    5933                 :            :         /* create default unbound and ordered wq attrs */
    5934         [ +  + ]:         33 :         for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
    5935                 :         22 :                 struct workqueue_attrs *attrs;
    5936                 :            : 
    5937         [ -  + ]:         22 :                 BUG_ON(!(attrs = alloc_workqueue_attrs()));
    5938                 :         22 :                 attrs->nice = std_nice[i];
    5939                 :         22 :                 unbound_std_wq_attrs[i] = attrs;
    5940                 :            : 
    5941                 :            :                 /*
    5942                 :            :                  * An ordered wq should have only one pwq as ordering is
    5943                 :            :                  * guaranteed by max_active which is enforced by pwqs.
    5944                 :            :                  * Turn off NUMA so that dfl_pwq is used for all nodes.
    5945                 :            :                  */
    5946         [ -  + ]:         22 :                 BUG_ON(!(attrs = alloc_workqueue_attrs()));
    5947                 :         22 :                 attrs->nice = std_nice[i];
    5948                 :         22 :                 attrs->no_numa = true;
    5949                 :         22 :                 ordered_wq_attrs[i] = attrs;
    5950                 :            :         }
    5951                 :            : 
    5952                 :         11 :         system_wq = alloc_workqueue("events", 0, 0);
    5953                 :         11 :         system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
    5954                 :         11 :         system_long_wq = alloc_workqueue("events_long", 0, 0);
    5955                 :         22 :         system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
    5956                 :         11 :                                             WQ_UNBOUND_MAX_ACTIVE);
    5957                 :         11 :         system_freezable_wq = alloc_workqueue("events_freezable",
    5958                 :            :                                               WQ_FREEZABLE, 0);
    5959                 :         11 :         system_power_efficient_wq = alloc_workqueue("events_power_efficient",
    5960                 :            :                                               WQ_POWER_EFFICIENT, 0);
    5961                 :         11 :         system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
    5962                 :            :                                               WQ_FREEZABLE | WQ_POWER_EFFICIENT,
    5963                 :            :                                               0);
    5964   [ +  -  +  -  :         11 :         BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
          +  -  +  -  +  
             -  +  -  -  
                      + ]
    5965                 :            :                !system_unbound_wq || !system_freezable_wq ||
    5966                 :            :                !system_power_efficient_wq ||
    5967                 :            :                !system_freezable_power_efficient_wq);
    5968                 :            : 
    5969                 :         11 :         return 0;
    5970                 :            : }
    5971                 :            : 
    5972                 :            : /**
    5973                 :            :  * workqueue_init - bring workqueue subsystem fully online
    5974                 :            :  *
    5975                 :            :  * This is the latter half of two-staged workqueue subsystem initialization
    5976                 :            :  * and invoked as soon as kthreads can be created and scheduled.
    5977                 :            :  * Workqueues have been created and work items queued on them, but there
    5978                 :            :  * are no kworkers executing the work items yet.  Populate the worker pools
    5979                 :            :  * with the initial workers and enable future kworker creations.
    5980                 :            :  */
    5981                 :         11 : int __init workqueue_init(void)
    5982                 :            : {
    5983                 :         11 :         struct workqueue_struct *wq;
    5984                 :         11 :         struct worker_pool *pool;
    5985                 :         11 :         int cpu, bkt;
    5986                 :            : 
    5987                 :            :         /*
    5988                 :            :          * It'd be simpler to initialize NUMA in workqueue_init_early() but
    5989                 :            :          * CPU to node mapping may not be available that early on some
    5990                 :            :          * archs such as power and arm64.  As per-cpu pools created
    5991                 :            :          * previously could be missing node hint and unbound pools NUMA
    5992                 :            :          * affinity, fix them up.
    5993                 :            :          *
    5994                 :            :          * Also, while iterating workqueues, create rescuers if requested.
    5995                 :            :          */
    5996                 :         11 :         wq_numa_init();
    5997                 :            : 
    5998                 :         11 :         mutex_lock(&wq_pool_mutex);
    5999                 :            : 
    6000         [ +  + ]:         33 :         for_each_possible_cpu(cpu) {
    6001         [ +  + ]:         33 :                 for_each_cpu_worker_pool(pool, cpu) {
    6002                 :         22 :                         pool->node = cpu_to_node(cpu);
    6003                 :            :                 }
    6004                 :            :         }
    6005                 :            : 
    6006         [ +  + ]:        110 :         list_for_each_entry(wq, &workqueues, list) {
    6007                 :         99 :                 wq_update_unbound_numa(wq, smp_processor_id(), true);
    6008         [ -  + ]:         99 :                 WARN(init_rescuer(wq),
    6009                 :            :                      "workqueue: failed to create early rescuer for %s",
    6010                 :            :                      wq->name);
    6011                 :            :         }
    6012                 :            : 
    6013                 :         11 :         mutex_unlock(&wq_pool_mutex);
    6014                 :            : 
    6015                 :            :         /* create the initial workers */
    6016         [ +  + ]:         33 :         for_each_online_cpu(cpu) {
    6017         [ +  + ]:         33 :                 for_each_cpu_worker_pool(pool, cpu) {
    6018                 :         22 :                         pool->flags &= ~POOL_DISASSOCIATED;
    6019         [ -  + ]:         22 :                         BUG_ON(!create_worker(pool));
    6020                 :            :                 }
    6021                 :            :         }
    6022                 :            : 
    6023   [ +  +  +  +  :        726 :         hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
                   +  + ]
    6024   [ -  +  -  + ]:         11 :                 BUG_ON(!create_worker(pool));
    6025                 :            : 
    6026                 :         11 :         wq_online = true;
    6027                 :         11 :         wq_watchdog_init();
    6028                 :            : 
    6029                 :         11 :         return 0;
    6030                 :            : }

Generated by: LCOV version 1.14