LCOV - code coverage report
Current view: top level - kernel - kthread.c (source / functions) Hit Total Coverage
Test: combined.info Lines: 155 416 37.3 %
Date: 2022-03-28 13:20:08 Functions: 18 42 42.9 %
Branches: 33 172 19.2 %

           Branch data     Line data    Source code
       1                 :            : // SPDX-License-Identifier: GPL-2.0-only
       2                 :            : /* Kernel thread helper functions.
       3                 :            :  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
       4                 :            :  *
       5                 :            :  * Creation is done via kthreadd, so that we get a clean environment
       6                 :            :  * even if we're invoked from userspace (think modprobe, hotplug cpu,
       7                 :            :  * etc.).
       8                 :            :  */
       9                 :            : #include <uapi/linux/sched/types.h>
      10                 :            : #include <linux/sched.h>
      11                 :            : #include <linux/sched/task.h>
      12                 :            : #include <linux/kthread.h>
      13                 :            : #include <linux/completion.h>
      14                 :            : #include <linux/err.h>
      15                 :            : #include <linux/cgroup.h>
      16                 :            : #include <linux/cpuset.h>
      17                 :            : #include <linux/unistd.h>
      18                 :            : #include <linux/file.h>
      19                 :            : #include <linux/export.h>
      20                 :            : #include <linux/mutex.h>
      21                 :            : #include <linux/slab.h>
      22                 :            : #include <linux/freezer.h>
      23                 :            : #include <linux/ptrace.h>
      24                 :            : #include <linux/uaccess.h>
      25                 :            : #include <linux/numa.h>
      26                 :            : #include <trace/events/sched.h>
      27                 :            : 
      28                 :            : static DEFINE_SPINLOCK(kthread_create_lock);
      29                 :            : static LIST_HEAD(kthread_create_list);
      30                 :            : struct task_struct *kthreadd_task;
      31                 :            : 
      32                 :            : struct kthread_create_info
      33                 :            : {
      34                 :            :         /* Information passed to kthread() from kthreadd. */
      35                 :            :         int (*threadfn)(void *data);
      36                 :            :         void *data;
      37                 :            :         int node;
      38                 :            : 
      39                 :            :         /* Result passed back to kthread_create() from kthreadd. */
      40                 :            :         struct task_struct *result;
      41                 :            :         struct completion *done;
      42                 :            : 
      43                 :            :         struct list_head list;
      44                 :            : };
      45                 :            : 
      46                 :            : struct kthread {
      47                 :            :         unsigned long flags;
      48                 :            :         unsigned int cpu;
      49                 :            :         void *data;
      50                 :            :         struct completion parked;
      51                 :            :         struct completion exited;
      52                 :            : #ifdef CONFIG_BLK_CGROUP
      53                 :            :         struct cgroup_subsys_state *blkcg_css;
      54                 :            : #endif
      55                 :            : };
      56                 :            : 
      57                 :            : enum KTHREAD_BITS {
      58                 :            :         KTHREAD_IS_PER_CPU = 0,
      59                 :            :         KTHREAD_SHOULD_STOP,
      60                 :            :         KTHREAD_SHOULD_PARK,
      61                 :            : };
      62                 :            : 
      63                 :       2067 : static inline void set_kthread_struct(void *kthread)
      64                 :            : {
      65                 :            :         /*
      66                 :            :          * We abuse ->set_child_tid to avoid the new member and because it
      67                 :            :          * can't be wrongly copied by copy_process(). We also rely on fact
      68                 :            :          * that the caller can't exec, so PF_KTHREAD can't be cleared.
      69                 :            :          */
      70                 :       2067 :         current->set_child_tid = (__force void __user *)kthread;
      71                 :            : }
      72                 :            : 
      73                 :     196593 : static inline struct kthread *to_kthread(struct task_struct *k)
      74                 :            : {
      75   [ -  -  -  + ]:      68831 :         WARN_ON(!(k->flags & PF_KTHREAD));
      76                 :     196593 :         return (__force void *)k->set_child_tid;
      77                 :            : }
      78                 :            : 
      79                 :       1530 : void free_kthread_struct(struct task_struct *k)
      80                 :            : {
      81                 :       1530 :         struct kthread *kthread;
      82                 :            : 
      83                 :            :         /*
      84                 :            :          * Can be NULL if this kthread was created by kernel_thread()
      85                 :            :          * or if kmalloc() in kthread() failed.
      86                 :            :          */
      87         [ -  + ]:       1530 :         kthread = to_kthread(k);
      88                 :            : #ifdef CONFIG_BLK_CGROUP
      89                 :            :         WARN_ON_ONCE(kthread && kthread->blkcg_css);
      90                 :            : #endif
      91                 :       1530 :         kfree(kthread);
      92                 :       1530 : }
      93                 :            : 
      94                 :            : /**
      95                 :            :  * kthread_should_stop - should this kthread return now?
      96                 :            :  *
      97                 :            :  * When someone calls kthread_stop() on your kthread, it will be woken
      98                 :            :  * and this will return true.  You should then return, and your return
      99                 :            :  * value will be passed through to kthread_stop().
     100                 :            :  */
     101                 :      68831 : bool kthread_should_stop(void)
     102                 :            : {
     103         [ -  + ]:      68831 :         return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
     104                 :            : }
     105                 :            : EXPORT_SYMBOL(kthread_should_stop);
     106                 :            : 
     107                 :      71898 : bool __kthread_should_park(struct task_struct *k)
     108                 :            : {
     109         [ -  + ]:      71898 :         return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
     110                 :            : }
     111                 :            : EXPORT_SYMBOL_GPL(__kthread_should_park);
     112                 :            : 
     113                 :            : /**
     114                 :            :  * kthread_should_park - should this kthread park now?
     115                 :            :  *
     116                 :            :  * When someone calls kthread_park() on your kthread, it will be woken
     117                 :            :  * and this will return true.  You should then do the necessary
     118                 :            :  * cleanup and call kthread_parkme()
     119                 :            :  *
     120                 :            :  * Similar to kthread_should_stop(), but this keeps the thread alive
     121                 :            :  * and in a park position. kthread_unpark() "restarts" the thread and
     122                 :            :  * calls the thread function again.
     123                 :            :  */
     124                 :      67301 : bool kthread_should_park(void)
     125                 :            : {
     126                 :      67301 :         return __kthread_should_park(current);
     127                 :            : }
     128                 :            : EXPORT_SYMBOL_GPL(kthread_should_park);
     129                 :            : 
     130                 :            : /**
     131                 :            :  * kthread_freezable_should_stop - should this freezable kthread return now?
     132                 :            :  * @was_frozen: optional out parameter, indicates whether %current was frozen
     133                 :            :  *
     134                 :            :  * kthread_should_stop() for freezable kthreads, which will enter
     135                 :            :  * refrigerator if necessary.  This function is safe from kthread_stop() /
     136                 :            :  * freezer deadlock and freezable kthreads should use this function instead
     137                 :            :  * of calling try_to_freeze() directly.
     138                 :            :  */
     139                 :          0 : bool kthread_freezable_should_stop(bool *was_frozen)
     140                 :            : {
     141                 :          0 :         bool frozen = false;
     142                 :            : 
     143                 :          0 :         might_sleep();
     144                 :            : 
     145         [ #  # ]:          0 :         if (unlikely(freezing(current)))
     146                 :          0 :                 frozen = __refrigerator(true);
     147                 :            : 
     148         [ #  # ]:          0 :         if (was_frozen)
     149                 :          0 :                 *was_frozen = frozen;
     150                 :            : 
     151                 :          0 :         return kthread_should_stop();
     152                 :            : }
     153                 :            : EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
     154                 :            : 
     155                 :            : /**
     156                 :            :  * kthread_data - return data value specified on kthread creation
     157                 :            :  * @task: kthread task in question
     158                 :            :  *
     159                 :            :  * Return the data value specified when kthread @task was created.
     160                 :            :  * The caller is responsible for ensuring the validity of @task when
     161                 :            :  * calling this function.
     162                 :            :  */
     163                 :      53926 : void *kthread_data(struct task_struct *task)
     164                 :            : {
     165         [ -  + ]:      53926 :         return to_kthread(task)->data;
     166                 :            : }
     167                 :            : 
     168                 :            : /**
     169                 :            :  * kthread_probe_data - speculative version of kthread_data()
     170                 :            :  * @task: possible kthread task in question
     171                 :            :  *
     172                 :            :  * @task could be a kthread task.  Return the data value specified when it
     173                 :            :  * was created if accessible.  If @task isn't a kthread task or its data is
     174                 :            :  * inaccessible for any reason, %NULL is returned.  This function requires
     175                 :            :  * that @task itself is safe to dereference.
     176                 :            :  */
     177                 :         48 : void *kthread_probe_data(struct task_struct *task)
     178                 :            : {
     179         [ -  + ]:         48 :         struct kthread *kthread = to_kthread(task);
     180                 :         48 :         void *data = NULL;
     181                 :            : 
     182                 :         48 :         probe_kernel_read(&data, &kthread->data, sizeof(data));
     183                 :         48 :         return data;
     184                 :            : }
     185                 :            : 
     186                 :       2067 : static void __kthread_parkme(struct kthread *self)
     187                 :            : {
     188                 :       2247 :         for (;;) {
     189                 :            :                 /*
     190                 :            :                  * TASK_PARKED is a special state; we must serialize against
     191                 :            :                  * possible pending wakeups to avoid store-store collisions on
     192                 :            :                  * task->state.
     193                 :            :                  *
     194                 :            :                  * Such a collision might possibly result in the task state
     195                 :            :                  * changin from TASK_PARKED and us failing the
     196                 :            :                  * wait_task_inactive() in kthread_park().
     197                 :            :                  */
     198                 :       2157 :                 set_special_state(TASK_PARKED);
     199         [ +  + ]:       2157 :                 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
     200                 :            :                         break;
     201                 :            : 
     202                 :         90 :                 complete(&self->parked);
     203                 :         90 :                 schedule();
     204                 :            :         }
     205                 :       2067 :         __set_current_state(TASK_RUNNING);
     206                 :       2067 : }
     207                 :            : 
     208                 :          0 : void kthread_parkme(void)
     209                 :            : {
     210         [ #  # ]:          0 :         __kthread_parkme(to_kthread(current));
     211                 :          0 : }
     212                 :            : EXPORT_SYMBOL_GPL(kthread_parkme);
     213                 :            : 
     214                 :       2067 : static int kthread(void *_create)
     215                 :            : {
     216                 :            :         /* Copy data: it's on kthread's stack */
     217                 :       2067 :         struct kthread_create_info *create = _create;
     218                 :       2067 :         int (*threadfn)(void *data) = create->threadfn;
     219                 :       2067 :         void *data = create->data;
     220                 :       2067 :         struct completion *done;
     221                 :       2067 :         struct kthread *self;
     222                 :       2067 :         int ret;
     223                 :            : 
     224                 :       2067 :         self = kzalloc(sizeof(*self), GFP_KERNEL);
     225                 :       2067 :         set_kthread_struct(self);
     226                 :            : 
     227                 :            :         /* If user was SIGKILLed, I release the structure. */
     228                 :       2067 :         done = xchg(&create->done, NULL);
     229         [ -  + ]:       2067 :         if (!done) {
     230                 :          0 :                 kfree(create);
     231                 :          0 :                 do_exit(-EINTR);
     232                 :            :         }
     233                 :            : 
     234         [ -  + ]:       2067 :         if (!self) {
     235                 :          0 :                 create->result = ERR_PTR(-ENOMEM);
     236                 :          0 :                 complete(done);
     237                 :          0 :                 do_exit(-ENOMEM);
     238                 :            :         }
     239                 :            : 
     240                 :       2067 :         self->data = data;
     241                 :       2067 :         init_completion(&self->exited);
     242                 :       2067 :         init_completion(&self->parked);
     243                 :       2067 :         current->vfork_done = &self->exited;
     244                 :            : 
     245                 :            :         /* OK, tell user we're spawned, wait for stop or wakeup */
     246                 :       2067 :         __set_current_state(TASK_UNINTERRUPTIBLE);
     247                 :       2067 :         create->result = current;
     248                 :       2067 :         complete(done);
     249                 :       2067 :         schedule();
     250                 :            : 
     251                 :       2067 :         ret = -EINTR;
     252         [ +  - ]:       2067 :         if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
     253                 :       2067 :                 cgroup_kthread_ready();
     254                 :       2067 :                 __kthread_parkme(self);
     255                 :       2067 :                 ret = threadfn(data);
     256                 :            :         }
     257                 :          0 :         do_exit(ret);
     258                 :            : }
     259                 :            : 
     260                 :            : /* called from do_fork() to get node information for about to be created task */
     261                 :      15273 : int tsk_fork_get_node(struct task_struct *tsk)
     262                 :            : {
     263                 :            : #ifdef CONFIG_NUMA
     264         [ +  + ]:      15273 :         if (tsk == kthreadd_task)
     265                 :       2067 :                 return tsk->pref_node_fork;
     266                 :            : #endif
     267                 :            :         return NUMA_NO_NODE;
     268                 :            : }
     269                 :            : 
     270                 :       2067 : static void create_kthread(struct kthread_create_info *create)
     271                 :            : {
     272                 :       2067 :         int pid;
     273                 :            : 
     274                 :            : #ifdef CONFIG_NUMA
     275                 :       2067 :         current->pref_node_fork = create->node;
     276                 :            : #endif
     277                 :            :         /* We want our own signal handler (we take no signals by default). */
     278                 :       2067 :         pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
     279         [ -  + ]:       2067 :         if (pid < 0) {
     280                 :            :                 /* If user was SIGKILLed, I release the structure. */
     281                 :          0 :                 struct completion *done = xchg(&create->done, NULL);
     282                 :            : 
     283         [ #  # ]:          0 :                 if (!done) {
     284                 :          0 :                         kfree(create);
     285                 :          0 :                         return;
     286                 :            :                 }
     287                 :          0 :                 create->result = ERR_PTR(pid);
     288                 :          0 :                 complete(done);
     289                 :            :         }
     290                 :            : }
     291                 :            : 
     292                 :            : static __printf(4, 0)
     293                 :       2067 : struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
     294                 :            :                                                     void *data, int node,
     295                 :            :                                                     const char namefmt[],
     296                 :            :                                                     va_list args)
     297                 :            : {
     298                 :       2067 :         DECLARE_COMPLETION_ONSTACK(done);
     299                 :       2067 :         struct task_struct *task;
     300                 :       2067 :         struct kthread_create_info *create = kmalloc(sizeof(*create),
     301                 :            :                                                      GFP_KERNEL);
     302                 :            : 
     303         [ +  - ]:       2067 :         if (!create)
     304                 :            :                 return ERR_PTR(-ENOMEM);
     305                 :       2067 :         create->threadfn = threadfn;
     306                 :       2067 :         create->data = data;
     307                 :       2067 :         create->node = node;
     308                 :       2067 :         create->done = &done;
     309                 :            : 
     310                 :       2067 :         spin_lock(&kthread_create_lock);
     311                 :       2067 :         list_add_tail(&create->list, &kthread_create_list);
     312                 :       2067 :         spin_unlock(&kthread_create_lock);
     313                 :            : 
     314                 :       2067 :         wake_up_process(kthreadd_task);
     315                 :            :         /*
     316                 :            :          * Wait for completion in killable state, for I might be chosen by
     317                 :            :          * the OOM killer while kthreadd is trying to allocate memory for
     318                 :            :          * new kernel thread.
     319                 :            :          */
     320         [ -  + ]:       2067 :         if (unlikely(wait_for_completion_killable(&done))) {
     321                 :            :                 /*
     322                 :            :                  * If I was SIGKILLed before kthreadd (or new kernel thread)
     323                 :            :                  * calls complete(), leave the cleanup of this structure to
     324                 :            :                  * that thread.
     325                 :            :                  */
     326         [ #  # ]:          0 :                 if (xchg(&create->done, NULL))
     327                 :            :                         return ERR_PTR(-EINTR);
     328                 :            :                 /*
     329                 :            :                  * kthreadd (or new kernel thread) will call complete()
     330                 :            :                  * shortly.
     331                 :            :                  */
     332                 :          0 :                 wait_for_completion(&done);
     333                 :            :         }
     334                 :       2067 :         task = create->result;
     335         [ +  - ]:       2067 :         if (!IS_ERR(task)) {
     336                 :       2067 :                 static const struct sched_param param = { .sched_priority = 0 };
     337                 :       2067 :                 char name[TASK_COMM_LEN];
     338                 :            : 
     339                 :            :                 /*
     340                 :            :                  * task is already visible to other tasks, so updating
     341                 :            :                  * COMM must be protected.
     342                 :            :                  */
     343                 :       2067 :                 vsnprintf(name, sizeof(name), namefmt, args);
     344                 :       2067 :                 set_task_comm(task, name);
     345                 :            :                 /*
     346                 :            :                  * root may have changed our (kthreadd's) priority or CPU mask.
     347                 :            :                  * The kernel thread should not inherit these properties.
     348                 :            :                  */
     349                 :       2067 :                 sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
     350                 :       2067 :                 set_cpus_allowed_ptr(task, cpu_all_mask);
     351                 :            :         }
     352                 :       2067 :         kfree(create);
     353                 :       2067 :         return task;
     354                 :            : }
     355                 :            : 
     356                 :            : /**
     357                 :            :  * kthread_create_on_node - create a kthread.
     358                 :            :  * @threadfn: the function to run until signal_pending(current).
     359                 :            :  * @data: data ptr for @threadfn.
     360                 :            :  * @node: task and thread structures for the thread are allocated on this node
     361                 :            :  * @namefmt: printf-style name for the thread.
     362                 :            :  *
     363                 :            :  * Description: This helper function creates and names a kernel
     364                 :            :  * thread.  The thread will be stopped: use wake_up_process() to start
     365                 :            :  * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
     366                 :            :  * is affine to all CPUs.
     367                 :            :  *
     368                 :            :  * If thread is going to be bound on a particular cpu, give its node
     369                 :            :  * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
     370                 :            :  * When woken, the thread will run @threadfn() with @data as its
     371                 :            :  * argument. @threadfn() can either call do_exit() directly if it is a
     372                 :            :  * standalone thread for which no one will call kthread_stop(), or
     373                 :            :  * return when 'kthread_should_stop()' is true (which means
     374                 :            :  * kthread_stop() has been called).  The return value should be zero
     375                 :            :  * or a negative error number; it will be passed to kthread_stop().
     376                 :            :  *
     377                 :            :  * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
     378                 :            :  */
     379                 :       2067 : struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
     380                 :            :                                            void *data, int node,
     381                 :            :                                            const char namefmt[],
     382                 :            :                                            ...)
     383                 :            : {
     384                 :       2067 :         struct task_struct *task;
     385                 :       2067 :         va_list args;
     386                 :            : 
     387                 :       2067 :         va_start(args, namefmt);
     388                 :       2067 :         task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
     389                 :       2067 :         va_end(args);
     390                 :            : 
     391                 :       2067 :         return task;
     392                 :            : }
     393                 :            : EXPORT_SYMBOL(kthread_create_on_node);
     394                 :            : 
     395                 :       1107 : static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
     396                 :            : {
     397                 :       1107 :         unsigned long flags;
     398                 :            : 
     399         [ -  + ]:       1107 :         if (!wait_task_inactive(p, state)) {
     400                 :          0 :                 WARN_ON(1);
     401                 :          0 :                 return;
     402                 :            :         }
     403                 :            : 
     404                 :            :         /* It's safe because the task is inactive. */
     405                 :       1107 :         raw_spin_lock_irqsave(&p->pi_lock, flags);
     406                 :       1107 :         do_set_cpus_allowed(p, mask);
     407                 :       1107 :         p->flags |= PF_NO_SETAFFINITY;
     408                 :       1107 :         raw_spin_unlock_irqrestore(&p->pi_lock, flags);
     409                 :            : }
     410                 :            : 
     411                 :        180 : static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
     412                 :            : {
     413                 :        180 :         __kthread_bind_mask(p, cpumask_of(cpu), state);
     414                 :         90 : }
     415                 :            : 
     416                 :        927 : void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
     417                 :            : {
     418                 :        927 :         __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
     419                 :        927 : }
     420                 :            : 
     421                 :            : /**
     422                 :            :  * kthread_bind - bind a just-created kthread to a cpu.
     423                 :            :  * @p: thread created by kthread_create().
     424                 :            :  * @cpu: cpu (might not be online, must be possible) for @k to run on.
     425                 :            :  *
     426                 :            :  * Description: This function is equivalent to set_cpus_allowed(),
     427                 :            :  * except that @cpu doesn't need to be online, and the thread must be
     428                 :            :  * stopped (i.e., just returned from kthread_create()).
     429                 :            :  */
     430                 :         90 : void kthread_bind(struct task_struct *p, unsigned int cpu)
     431                 :            : {
     432                 :          0 :         __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
     433                 :          0 : }
     434                 :            : EXPORT_SYMBOL(kthread_bind);
     435                 :            : 
     436                 :            : /**
     437                 :            :  * kthread_create_on_cpu - Create a cpu bound kthread
     438                 :            :  * @threadfn: the function to run until signal_pending(current).
     439                 :            :  * @data: data ptr for @threadfn.
     440                 :            :  * @cpu: The cpu on which the thread should be bound,
     441                 :            :  * @namefmt: printf-style name for the thread. Format is restricted
     442                 :            :  *           to "name.*%u". Code fills in cpu number.
     443                 :            :  *
     444                 :            :  * Description: This helper function creates and names a kernel thread
     445                 :            :  * The thread will be woken and put into park mode.
     446                 :            :  */
     447                 :         90 : struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
     448                 :            :                                           void *data, unsigned int cpu,
     449                 :            :                                           const char *namefmt)
     450                 :            : {
     451                 :         90 :         struct task_struct *p;
     452                 :            : 
     453                 :         90 :         p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
     454                 :            :                                    cpu);
     455         [ +  - ]:         90 :         if (IS_ERR(p))
     456                 :            :                 return p;
     457                 :         90 :         kthread_bind(p, cpu);
     458                 :            :         /* CPU hotplug need to bind once again when unparking the thread. */
     459         [ -  + ]:         90 :         set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
     460         [ -  + ]:         90 :         to_kthread(p)->cpu = cpu;
     461                 :         90 :         return p;
     462                 :            : }
     463                 :            : 
     464                 :            : /**
     465                 :            :  * kthread_unpark - unpark a thread created by kthread_create().
     466                 :            :  * @k:          thread created by kthread_create().
     467                 :            :  *
     468                 :            :  * Sets kthread_should_park() for @k to return false, wakes it, and
     469                 :            :  * waits for it to return. If the thread is marked percpu then its
     470                 :            :  * bound to the cpu again.
     471                 :            :  */
     472                 :         90 : void kthread_unpark(struct task_struct *k)
     473                 :            : {
     474         [ -  + ]:         90 :         struct kthread *kthread = to_kthread(k);
     475                 :            : 
     476                 :            :         /*
     477                 :            :          * Newly created kthread was parked when the CPU was offline.
     478                 :            :          * The binding was lost and we need to set it again.
     479                 :            :          */
     480         [ +  - ]:         90 :         if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
     481                 :         90 :                 __kthread_bind(k, kthread->cpu, TASK_PARKED);
     482                 :            : 
     483                 :         90 :         clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
     484                 :            :         /*
     485                 :            :          * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
     486                 :            :          */
     487                 :         90 :         wake_up_state(k, TASK_PARKED);
     488                 :         90 : }
     489                 :            : EXPORT_SYMBOL_GPL(kthread_unpark);
     490                 :            : 
     491                 :            : /**
     492                 :            :  * kthread_park - park a thread created by kthread_create().
     493                 :            :  * @k: thread created by kthread_create().
     494                 :            :  *
     495                 :            :  * Sets kthread_should_park() for @k to return true, wakes it, and
     496                 :            :  * waits for it to return. This can also be called after kthread_create()
     497                 :            :  * instead of calling wake_up_process(): the thread will park without
     498                 :            :  * calling threadfn().
     499                 :            :  *
     500                 :            :  * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
     501                 :            :  * If called by the kthread itself just the park bit is set.
     502                 :            :  */
     503                 :         90 : int kthread_park(struct task_struct *k)
     504                 :            : {
     505         [ -  + ]:         90 :         struct kthread *kthread = to_kthread(k);
     506                 :            : 
     507   [ -  +  +  - ]:         90 :         if (WARN_ON(k->flags & PF_EXITING))
     508                 :            :                 return -ENOSYS;
     509                 :            : 
     510   [ -  +  +  - ]:         90 :         if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
     511                 :            :                 return -EBUSY;
     512                 :            : 
     513                 :         90 :         set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
     514         [ +  - ]:         90 :         if (k != current) {
     515                 :         90 :                 wake_up_process(k);
     516                 :            :                 /*
     517                 :            :                  * Wait for __kthread_parkme() to complete(), this means we
     518                 :            :                  * _will_ have TASK_PARKED and are about to call schedule().
     519                 :            :                  */
     520                 :         90 :                 wait_for_completion(&kthread->parked);
     521                 :            :                 /*
     522                 :            :                  * Now wait for that schedule() to complete and the task to
     523                 :            :                  * get scheduled out.
     524                 :            :                  */
     525         [ -  + ]:         90 :                 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
     526                 :            :         }
     527                 :            : 
     528                 :            :         return 0;
     529                 :            : }
     530                 :            : EXPORT_SYMBOL_GPL(kthread_park);
     531                 :            : 
     532                 :            : /**
     533                 :            :  * kthread_stop - stop a thread created by kthread_create().
     534                 :            :  * @k: thread created by kthread_create().
     535                 :            :  *
     536                 :            :  * Sets kthread_should_stop() for @k to return true, wakes it, and
     537                 :            :  * waits for it to exit. This can also be called after kthread_create()
     538                 :            :  * instead of calling wake_up_process(): the thread will exit without
     539                 :            :  * calling threadfn().
     540                 :            :  *
     541                 :            :  * If threadfn() may call do_exit() itself, the caller must ensure
     542                 :            :  * task_struct can't go away.
     543                 :            :  *
     544                 :            :  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
     545                 :            :  * was never called.
     546                 :            :  */
     547                 :          0 : int kthread_stop(struct task_struct *k)
     548                 :            : {
     549                 :          0 :         struct kthread *kthread;
     550                 :          0 :         int ret;
     551                 :            : 
     552                 :          0 :         trace_sched_kthread_stop(k);
     553                 :            : 
     554                 :          0 :         get_task_struct(k);
     555         [ #  # ]:          0 :         kthread = to_kthread(k);
     556                 :          0 :         set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
     557                 :          0 :         kthread_unpark(k);
     558                 :          0 :         wake_up_process(k);
     559                 :          0 :         wait_for_completion(&kthread->exited);
     560                 :          0 :         ret = k->exit_code;
     561                 :          0 :         put_task_struct(k);
     562                 :            : 
     563                 :          0 :         trace_sched_kthread_stop_ret(ret);
     564                 :          0 :         return ret;
     565                 :            : }
     566                 :            : EXPORT_SYMBOL(kthread_stop);
     567                 :            : 
     568                 :         30 : int kthreadd(void *unused)
     569                 :            : {
     570                 :         30 :         struct task_struct *tsk = current;
     571                 :            : 
     572                 :            :         /* Setup a clean context for our children to inherit. */
     573                 :         30 :         set_task_comm(tsk, "kthreadd");
     574                 :         30 :         ignore_signals(tsk);
     575                 :         30 :         set_cpus_allowed_ptr(tsk, cpu_all_mask);
     576                 :         30 :         set_mems_allowed(node_states[N_MEMORY]);
     577                 :            : 
     578                 :         30 :         current->flags |= PF_NOFREEZE;
     579                 :         30 :         cgroup_init_kthreadd();
     580                 :            : 
     581                 :       2087 :         for (;;) {
     582                 :       2087 :                 set_current_state(TASK_INTERRUPTIBLE);
     583         [ +  - ]:       2087 :                 if (list_empty(&kthread_create_list))
     584                 :       2087 :                         schedule();
     585                 :       2057 :                 __set_current_state(TASK_RUNNING);
     586                 :            : 
     587                 :       2057 :                 spin_lock(&kthread_create_lock);
     588         [ +  + ]:       4124 :                 while (!list_empty(&kthread_create_list)) {
     589                 :       2067 :                         struct kthread_create_info *create;
     590                 :            : 
     591                 :       2067 :                         create = list_entry(kthread_create_list.next,
     592                 :            :                                             struct kthread_create_info, list);
     593                 :       2067 :                         list_del_init(&create->list);
     594                 :       2067 :                         spin_unlock(&kthread_create_lock);
     595                 :            : 
     596                 :       2067 :                         create_kthread(create);
     597                 :            : 
     598                 :       2067 :                         spin_lock(&kthread_create_lock);
     599                 :            :                 }
     600                 :       2057 :                 spin_unlock(&kthread_create_lock);
     601                 :            :         }
     602                 :            : 
     603                 :            :         return 0;
     604                 :            : }
     605                 :            : 
     606                 :          0 : void __kthread_init_worker(struct kthread_worker *worker,
     607                 :            :                                 const char *name,
     608                 :            :                                 struct lock_class_key *key)
     609                 :            : {
     610                 :          0 :         memset(worker, 0, sizeof(struct kthread_worker));
     611                 :          0 :         raw_spin_lock_init(&worker->lock);
     612                 :          0 :         lockdep_set_class_and_name(&worker->lock, key, name);
     613                 :          0 :         INIT_LIST_HEAD(&worker->work_list);
     614                 :          0 :         INIT_LIST_HEAD(&worker->delayed_work_list);
     615                 :          0 : }
     616                 :            : EXPORT_SYMBOL_GPL(__kthread_init_worker);
     617                 :            : 
     618                 :            : /**
     619                 :            :  * kthread_worker_fn - kthread function to process kthread_worker
     620                 :            :  * @worker_ptr: pointer to initialized kthread_worker
     621                 :            :  *
     622                 :            :  * This function implements the main cycle of kthread worker. It processes
     623                 :            :  * work_list until it is stopped with kthread_stop(). It sleeps when the queue
     624                 :            :  * is empty.
     625                 :            :  *
     626                 :            :  * The works are not allowed to keep any locks, disable preemption or interrupts
     627                 :            :  * when they finish. There is defined a safe point for freezing when one work
     628                 :            :  * finishes and before a new one is started.
     629                 :            :  *
     630                 :            :  * Also the works must not be handled by more than one worker at the same time,
     631                 :            :  * see also kthread_queue_work().
     632                 :            :  */
     633                 :          0 : int kthread_worker_fn(void *worker_ptr)
     634                 :            : {
     635                 :          0 :         struct kthread_worker *worker = worker_ptr;
     636                 :          0 :         struct kthread_work *work;
     637                 :            : 
     638                 :            :         /*
     639                 :            :          * FIXME: Update the check and remove the assignment when all kthread
     640                 :            :          * worker users are created using kthread_create_worker*() functions.
     641                 :            :          */
     642   [ #  #  #  #  :          0 :         WARN_ON(worker->task && worker->task != current);
                   #  # ]
     643         [ #  # ]:          0 :         worker->task = current;
     644                 :            : 
     645         [ #  # ]:          0 :         if (worker->flags & KTW_FREEZABLE)
     646                 :          0 :                 set_freezable();
     647                 :            : 
     648                 :          0 : repeat:
     649                 :          0 :         set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */
     650                 :            : 
     651         [ #  # ]:          0 :         if (kthread_should_stop()) {
     652                 :          0 :                 __set_current_state(TASK_RUNNING);
     653                 :          0 :                 raw_spin_lock_irq(&worker->lock);
     654                 :          0 :                 worker->task = NULL;
     655                 :          0 :                 raw_spin_unlock_irq(&worker->lock);
     656                 :          0 :                 return 0;
     657                 :            :         }
     658                 :            : 
     659                 :          0 :         work = NULL;
     660                 :          0 :         raw_spin_lock_irq(&worker->lock);
     661         [ #  # ]:          0 :         if (!list_empty(&worker->work_list)) {
     662                 :          0 :                 work = list_first_entry(&worker->work_list,
     663                 :            :                                         struct kthread_work, node);
     664                 :          0 :                 list_del_init(&work->node);
     665                 :            :         }
     666                 :          0 :         worker->current_work = work;
     667                 :          0 :         raw_spin_unlock_irq(&worker->lock);
     668                 :            : 
     669         [ #  # ]:          0 :         if (work) {
     670                 :          0 :                 __set_current_state(TASK_RUNNING);
     671                 :          0 :                 work->func(work);
     672         [ #  # ]:          0 :         } else if (!freezing(current))
     673                 :          0 :                 schedule();
     674                 :            : 
     675                 :          0 :         try_to_freeze();
     676                 :          0 :         cond_resched();
     677                 :          0 :         goto repeat;
     678                 :            : }
     679                 :            : EXPORT_SYMBOL_GPL(kthread_worker_fn);
     680                 :            : 
     681                 :            : static __printf(3, 0) struct kthread_worker *
     682                 :          0 : __kthread_create_worker(int cpu, unsigned int flags,
     683                 :            :                         const char namefmt[], va_list args)
     684                 :            : {
     685                 :          0 :         struct kthread_worker *worker;
     686                 :          0 :         struct task_struct *task;
     687                 :          0 :         int node = NUMA_NO_NODE;
     688                 :            : 
     689                 :          0 :         worker = kzalloc(sizeof(*worker), GFP_KERNEL);
     690         [ #  # ]:          0 :         if (!worker)
     691                 :            :                 return ERR_PTR(-ENOMEM);
     692                 :            : 
     693         [ #  # ]:          0 :         kthread_init_worker(worker);
     694                 :            : 
     695         [ #  # ]:          0 :         if (cpu >= 0)
     696                 :          0 :                 node = cpu_to_node(cpu);
     697                 :            : 
     698                 :          0 :         task = __kthread_create_on_node(kthread_worker_fn, worker,
     699                 :            :                                                 node, namefmt, args);
     700         [ #  # ]:          0 :         if (IS_ERR(task))
     701                 :          0 :                 goto fail_task;
     702                 :            : 
     703         [ #  # ]:          0 :         if (cpu >= 0)
     704                 :          0 :                 kthread_bind(task, cpu);
     705                 :            : 
     706                 :          0 :         worker->flags = flags;
     707                 :          0 :         worker->task = task;
     708                 :          0 :         wake_up_process(task);
     709                 :          0 :         return worker;
     710                 :            : 
     711                 :            : fail_task:
     712                 :          0 :         kfree(worker);
     713                 :          0 :         return ERR_CAST(task);
     714                 :            : }
     715                 :            : 
     716                 :            : /**
     717                 :            :  * kthread_create_worker - create a kthread worker
     718                 :            :  * @flags: flags modifying the default behavior of the worker
     719                 :            :  * @namefmt: printf-style name for the kthread worker (task).
     720                 :            :  *
     721                 :            :  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
     722                 :            :  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
     723                 :            :  * when the worker was SIGKILLed.
     724                 :            :  */
     725                 :            : struct kthread_worker *
     726                 :          0 : kthread_create_worker(unsigned int flags, const char namefmt[], ...)
     727                 :            : {
     728                 :          0 :         struct kthread_worker *worker;
     729                 :          0 :         va_list args;
     730                 :            : 
     731                 :          0 :         va_start(args, namefmt);
     732                 :          0 :         worker = __kthread_create_worker(-1, flags, namefmt, args);
     733                 :          0 :         va_end(args);
     734                 :            : 
     735                 :          0 :         return worker;
     736                 :            : }
     737                 :            : EXPORT_SYMBOL(kthread_create_worker);
     738                 :            : 
     739                 :            : /**
     740                 :            :  * kthread_create_worker_on_cpu - create a kthread worker and bind it
     741                 :            :  *      it to a given CPU and the associated NUMA node.
     742                 :            :  * @cpu: CPU number
     743                 :            :  * @flags: flags modifying the default behavior of the worker
     744                 :            :  * @namefmt: printf-style name for the kthread worker (task).
     745                 :            :  *
     746                 :            :  * Use a valid CPU number if you want to bind the kthread worker
     747                 :            :  * to the given CPU and the associated NUMA node.
     748                 :            :  *
     749                 :            :  * A good practice is to add the cpu number also into the worker name.
     750                 :            :  * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
     751                 :            :  *
     752                 :            :  * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
     753                 :            :  * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
     754                 :            :  * when the worker was SIGKILLed.
     755                 :            :  */
     756                 :            : struct kthread_worker *
     757                 :          0 : kthread_create_worker_on_cpu(int cpu, unsigned int flags,
     758                 :            :                              const char namefmt[], ...)
     759                 :            : {
     760                 :          0 :         struct kthread_worker *worker;
     761                 :          0 :         va_list args;
     762                 :            : 
     763                 :          0 :         va_start(args, namefmt);
     764                 :          0 :         worker = __kthread_create_worker(cpu, flags, namefmt, args);
     765                 :          0 :         va_end(args);
     766                 :            : 
     767                 :          0 :         return worker;
     768                 :            : }
     769                 :            : EXPORT_SYMBOL(kthread_create_worker_on_cpu);
     770                 :            : 
     771                 :            : /*
     772                 :            :  * Returns true when the work could not be queued at the moment.
     773                 :            :  * It happens when it is already pending in a worker list
     774                 :            :  * or when it is being cancelled.
     775                 :            :  */
     776                 :          0 : static inline bool queuing_blocked(struct kthread_worker *worker,
     777                 :            :                                    struct kthread_work *work)
     778                 :            : {
     779                 :          0 :         lockdep_assert_held(&worker->lock);
     780                 :            : 
     781   [ #  #  #  # ]:          0 :         return !list_empty(&work->node) || work->canceling;
     782                 :            : }
     783                 :            : 
     784                 :          0 : static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
     785                 :            :                                              struct kthread_work *work)
     786                 :            : {
     787                 :          0 :         lockdep_assert_held(&worker->lock);
     788         [ #  # ]:          0 :         WARN_ON_ONCE(!list_empty(&work->node));
     789                 :            :         /* Do not use a work with >1 worker, see kthread_queue_work() */
     790   [ #  #  #  #  :          0 :         WARN_ON_ONCE(work->worker && work->worker != worker);
                   #  # ]
     791                 :          0 : }
     792                 :            : 
     793                 :            : /* insert @work before @pos in @worker */
     794                 :          0 : static void kthread_insert_work(struct kthread_worker *worker,
     795                 :            :                                 struct kthread_work *work,
     796                 :            :                                 struct list_head *pos)
     797                 :            : {
     798                 :          0 :         kthread_insert_work_sanity_check(worker, work);
     799                 :            : 
     800         [ #  # ]:          0 :         list_add_tail(&work->node, pos);
     801                 :          0 :         work->worker = worker;
     802   [ #  #  #  # ]:          0 :         if (!worker->current_work && likely(worker->task))
     803                 :          0 :                 wake_up_process(worker->task);
     804                 :          0 : }
     805                 :            : 
     806                 :            : /**
     807                 :            :  * kthread_queue_work - queue a kthread_work
     808                 :            :  * @worker: target kthread_worker
     809                 :            :  * @work: kthread_work to queue
     810                 :            :  *
     811                 :            :  * Queue @work to work processor @task for async execution.  @task
     812                 :            :  * must have been created with kthread_worker_create().  Returns %true
     813                 :            :  * if @work was successfully queued, %false if it was already pending.
     814                 :            :  *
     815                 :            :  * Reinitialize the work if it needs to be used by another worker.
     816                 :            :  * For example, when the worker was stopped and started again.
     817                 :            :  */
     818                 :          0 : bool kthread_queue_work(struct kthread_worker *worker,
     819                 :            :                         struct kthread_work *work)
     820                 :            : {
     821                 :          0 :         bool ret = false;
     822                 :          0 :         unsigned long flags;
     823                 :            : 
     824                 :          0 :         raw_spin_lock_irqsave(&worker->lock, flags);
     825   [ #  #  #  # ]:          0 :         if (!queuing_blocked(worker, work)) {
     826                 :          0 :                 kthread_insert_work(worker, work, &worker->work_list);
     827                 :          0 :                 ret = true;
     828                 :            :         }
     829                 :          0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
     830                 :          0 :         return ret;
     831                 :            : }
     832                 :            : EXPORT_SYMBOL_GPL(kthread_queue_work);
     833                 :            : 
     834                 :            : /**
     835                 :            :  * kthread_delayed_work_timer_fn - callback that queues the associated kthread
     836                 :            :  *      delayed work when the timer expires.
     837                 :            :  * @t: pointer to the expired timer
     838                 :            :  *
     839                 :            :  * The format of the function is defined by struct timer_list.
     840                 :            :  * It should have been called from irqsafe timer with irq already off.
     841                 :            :  */
     842                 :          0 : void kthread_delayed_work_timer_fn(struct timer_list *t)
     843                 :            : {
     844                 :          0 :         struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
     845                 :          0 :         struct kthread_work *work = &dwork->work;
     846                 :          0 :         struct kthread_worker *worker = work->worker;
     847                 :          0 :         unsigned long flags;
     848                 :            : 
     849                 :            :         /*
     850                 :            :          * This might happen when a pending work is reinitialized.
     851                 :            :          * It means that it is used a wrong way.
     852                 :            :          */
     853   [ #  #  #  # ]:          0 :         if (WARN_ON_ONCE(!worker))
     854                 :            :                 return;
     855                 :            : 
     856                 :          0 :         raw_spin_lock_irqsave(&worker->lock, flags);
     857                 :            :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
     858         [ #  # ]:          0 :         WARN_ON_ONCE(work->worker != worker);
     859                 :            : 
     860                 :            :         /* Move the work from worker->delayed_work_list. */
     861         [ #  # ]:          0 :         WARN_ON_ONCE(list_empty(&work->node));
     862                 :          0 :         list_del_init(&work->node);
     863                 :          0 :         kthread_insert_work(worker, work, &worker->work_list);
     864                 :            : 
     865                 :          0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
     866                 :            : }
     867                 :            : EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
     868                 :            : 
     869                 :          0 : static void __kthread_queue_delayed_work(struct kthread_worker *worker,
     870                 :            :                                          struct kthread_delayed_work *dwork,
     871                 :            :                                          unsigned long delay)
     872                 :            : {
     873                 :          0 :         struct timer_list *timer = &dwork->timer;
     874                 :          0 :         struct kthread_work *work = &dwork->work;
     875                 :            : 
     876         [ #  # ]:          0 :         WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
     877                 :            : 
     878                 :            :         /*
     879                 :            :          * If @delay is 0, queue @dwork->work immediately.  This is for
     880                 :            :          * both optimization and correctness.  The earliest @timer can
     881                 :            :          * expire is on the closest next tick and delayed_work users depend
     882                 :            :          * on that there's no such delay when @delay is 0.
     883                 :            :          */
     884         [ #  # ]:          0 :         if (!delay) {
     885                 :          0 :                 kthread_insert_work(worker, work, &worker->work_list);
     886                 :          0 :                 return;
     887                 :            :         }
     888                 :            : 
     889                 :            :         /* Be paranoid and try to detect possible races already now. */
     890                 :          0 :         kthread_insert_work_sanity_check(worker, work);
     891                 :            : 
     892                 :          0 :         list_add(&work->node, &worker->delayed_work_list);
     893                 :          0 :         work->worker = worker;
     894                 :          0 :         timer->expires = jiffies + delay;
     895                 :          0 :         add_timer(timer);
     896                 :            : }
     897                 :            : 
     898                 :            : /**
     899                 :            :  * kthread_queue_delayed_work - queue the associated kthread work
     900                 :            :  *      after a delay.
     901                 :            :  * @worker: target kthread_worker
     902                 :            :  * @dwork: kthread_delayed_work to queue
     903                 :            :  * @delay: number of jiffies to wait before queuing
     904                 :            :  *
     905                 :            :  * If the work has not been pending it starts a timer that will queue
     906                 :            :  * the work after the given @delay. If @delay is zero, it queues the
     907                 :            :  * work immediately.
     908                 :            :  *
     909                 :            :  * Return: %false if the @work has already been pending. It means that
     910                 :            :  * either the timer was running or the work was queued. It returns %true
     911                 :            :  * otherwise.
     912                 :            :  */
     913                 :          0 : bool kthread_queue_delayed_work(struct kthread_worker *worker,
     914                 :            :                                 struct kthread_delayed_work *dwork,
     915                 :            :                                 unsigned long delay)
     916                 :            : {
     917                 :          0 :         struct kthread_work *work = &dwork->work;
     918                 :          0 :         unsigned long flags;
     919                 :          0 :         bool ret = false;
     920                 :            : 
     921                 :          0 :         raw_spin_lock_irqsave(&worker->lock, flags);
     922                 :            : 
     923   [ #  #  #  # ]:          0 :         if (!queuing_blocked(worker, work)) {
     924                 :          0 :                 __kthread_queue_delayed_work(worker, dwork, delay);
     925                 :          0 :                 ret = true;
     926                 :            :         }
     927                 :            : 
     928                 :          0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
     929                 :          0 :         return ret;
     930                 :            : }
     931                 :            : EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
     932                 :            : 
     933                 :            : struct kthread_flush_work {
     934                 :            :         struct kthread_work     work;
     935                 :            :         struct completion       done;
     936                 :            : };
     937                 :            : 
     938                 :          0 : static void kthread_flush_work_fn(struct kthread_work *work)
     939                 :            : {
     940                 :          0 :         struct kthread_flush_work *fwork =
     941                 :          0 :                 container_of(work, struct kthread_flush_work, work);
     942                 :          0 :         complete(&fwork->done);
     943                 :          0 : }
     944                 :            : 
     945                 :            : /**
     946                 :            :  * kthread_flush_work - flush a kthread_work
     947                 :            :  * @work: work to flush
     948                 :            :  *
     949                 :            :  * If @work is queued or executing, wait for it to finish execution.
     950                 :            :  */
     951                 :          0 : void kthread_flush_work(struct kthread_work *work)
     952                 :            : {
     953                 :          0 :         struct kthread_flush_work fwork = {
     954                 :            :                 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
     955                 :          0 :                 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
     956                 :            :         };
     957                 :          0 :         struct kthread_worker *worker;
     958                 :          0 :         bool noop = false;
     959                 :            : 
     960                 :          0 :         worker = work->worker;
     961         [ #  # ]:          0 :         if (!worker)
     962                 :          0 :                 return;
     963                 :            : 
     964                 :          0 :         raw_spin_lock_irq(&worker->lock);
     965                 :            :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
     966         [ #  # ]:          0 :         WARN_ON_ONCE(work->worker != worker);
     967                 :            : 
     968         [ #  # ]:          0 :         if (!list_empty(&work->node))
     969                 :          0 :                 kthread_insert_work(worker, &fwork.work, work->node.next);
     970         [ #  # ]:          0 :         else if (worker->current_work == work)
     971                 :          0 :                 kthread_insert_work(worker, &fwork.work,
     972                 :            :                                     worker->work_list.next);
     973                 :            :         else
     974                 :            :                 noop = true;
     975                 :            : 
     976                 :          0 :         raw_spin_unlock_irq(&worker->lock);
     977                 :            : 
     978         [ #  # ]:          0 :         if (!noop)
     979                 :          0 :                 wait_for_completion(&fwork.done);
     980                 :            : }
     981                 :            : EXPORT_SYMBOL_GPL(kthread_flush_work);
     982                 :            : 
     983                 :            : /*
     984                 :            :  * This function removes the work from the worker queue. Also it makes sure
     985                 :            :  * that it won't get queued later via the delayed work's timer.
     986                 :            :  *
     987                 :            :  * The work might still be in use when this function finishes. See the
     988                 :            :  * current_work proceed by the worker.
     989                 :            :  *
     990                 :            :  * Return: %true if @work was pending and successfully canceled,
     991                 :            :  *      %false if @work was not pending
     992                 :            :  */
     993                 :          0 : static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
     994                 :            :                                   unsigned long *flags)
     995                 :            : {
     996                 :            :         /* Try to cancel the timer if exists. */
     997         [ #  # ]:          0 :         if (is_dwork) {
     998                 :          0 :                 struct kthread_delayed_work *dwork =
     999                 :          0 :                         container_of(work, struct kthread_delayed_work, work);
    1000                 :          0 :                 struct kthread_worker *worker = work->worker;
    1001                 :            : 
    1002                 :            :                 /*
    1003                 :            :                  * del_timer_sync() must be called to make sure that the timer
    1004                 :            :                  * callback is not running. The lock must be temporary released
    1005                 :            :                  * to avoid a deadlock with the callback. In the meantime,
    1006                 :            :                  * any queuing is blocked by setting the canceling counter.
    1007                 :            :                  */
    1008                 :          0 :                 work->canceling++;
    1009                 :          0 :                 raw_spin_unlock_irqrestore(&worker->lock, *flags);
    1010                 :          0 :                 del_timer_sync(&dwork->timer);
    1011                 :          0 :                 raw_spin_lock_irqsave(&worker->lock, *flags);
    1012                 :          0 :                 work->canceling--;
    1013                 :            :         }
    1014                 :            : 
    1015                 :            :         /*
    1016                 :            :          * Try to remove the work from a worker list. It might either
    1017                 :            :          * be from worker->work_list or from worker->delayed_work_list.
    1018                 :            :          */
    1019         [ #  # ]:          0 :         if (!list_empty(&work->node)) {
    1020                 :          0 :                 list_del_init(&work->node);
    1021                 :          0 :                 return true;
    1022                 :            :         }
    1023                 :            : 
    1024                 :            :         return false;
    1025                 :            : }
    1026                 :            : 
    1027                 :            : /**
    1028                 :            :  * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
    1029                 :            :  * @worker: kthread worker to use
    1030                 :            :  * @dwork: kthread delayed work to queue
    1031                 :            :  * @delay: number of jiffies to wait before queuing
    1032                 :            :  *
    1033                 :            :  * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
    1034                 :            :  * modify @dwork's timer so that it expires after @delay. If @delay is zero,
    1035                 :            :  * @work is guaranteed to be queued immediately.
    1036                 :            :  *
    1037                 :            :  * Return: %true if @dwork was pending and its timer was modified,
    1038                 :            :  * %false otherwise.
    1039                 :            :  *
    1040                 :            :  * A special case is when the work is being canceled in parallel.
    1041                 :            :  * It might be caused either by the real kthread_cancel_delayed_work_sync()
    1042                 :            :  * or yet another kthread_mod_delayed_work() call. We let the other command
    1043                 :            :  * win and return %false here. The caller is supposed to synchronize these
    1044                 :            :  * operations a reasonable way.
    1045                 :            :  *
    1046                 :            :  * This function is safe to call from any context including IRQ handler.
    1047                 :            :  * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
    1048                 :            :  * for details.
    1049                 :            :  */
    1050                 :          0 : bool kthread_mod_delayed_work(struct kthread_worker *worker,
    1051                 :            :                               struct kthread_delayed_work *dwork,
    1052                 :            :                               unsigned long delay)
    1053                 :            : {
    1054                 :          0 :         struct kthread_work *work = &dwork->work;
    1055                 :          0 :         unsigned long flags;
    1056                 :          0 :         int ret = false;
    1057                 :            : 
    1058                 :          0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1059                 :            : 
    1060                 :            :         /* Do not bother with canceling when never queued. */
    1061         [ #  # ]:          0 :         if (!work->worker)
    1062                 :          0 :                 goto fast_queue;
    1063                 :            : 
    1064                 :            :         /* Work must not be used with >1 worker, see kthread_queue_work() */
    1065         [ #  # ]:          0 :         WARN_ON_ONCE(work->worker != worker);
    1066                 :            : 
    1067                 :            :         /* Do not fight with another command that is canceling this work. */
    1068         [ #  # ]:          0 :         if (work->canceling)
    1069                 :          0 :                 goto out;
    1070                 :            : 
    1071                 :          0 :         ret = __kthread_cancel_work(work, true, &flags);
    1072                 :          0 : fast_queue:
    1073                 :          0 :         __kthread_queue_delayed_work(worker, dwork, delay);
    1074                 :          0 : out:
    1075                 :          0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1076                 :          0 :         return ret;
    1077                 :            : }
    1078                 :            : EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
    1079                 :            : 
    1080                 :          0 : static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
    1081                 :            : {
    1082                 :          0 :         struct kthread_worker *worker = work->worker;
    1083                 :          0 :         unsigned long flags;
    1084                 :          0 :         int ret = false;
    1085                 :            : 
    1086         [ #  # ]:          0 :         if (!worker)
    1087                 :          0 :                 goto out;
    1088                 :            : 
    1089                 :          0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1090                 :            :         /* Work must not be used with >1 worker, see kthread_queue_work(). */
    1091         [ #  # ]:          0 :         WARN_ON_ONCE(work->worker != worker);
    1092                 :            : 
    1093                 :          0 :         ret = __kthread_cancel_work(work, is_dwork, &flags);
    1094                 :            : 
    1095         [ #  # ]:          0 :         if (worker->current_work != work)
    1096                 :          0 :                 goto out_fast;
    1097                 :            : 
    1098                 :            :         /*
    1099                 :            :          * The work is in progress and we need to wait with the lock released.
    1100                 :            :          * In the meantime, block any queuing by setting the canceling counter.
    1101                 :            :          */
    1102                 :          0 :         work->canceling++;
    1103                 :          0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1104                 :          0 :         kthread_flush_work(work);
    1105                 :          0 :         raw_spin_lock_irqsave(&worker->lock, flags);
    1106                 :          0 :         work->canceling--;
    1107                 :            : 
    1108                 :          0 : out_fast:
    1109                 :          0 :         raw_spin_unlock_irqrestore(&worker->lock, flags);
    1110                 :          0 : out:
    1111                 :          0 :         return ret;
    1112                 :            : }
    1113                 :            : 
    1114                 :            : /**
    1115                 :            :  * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
    1116                 :            :  * @work: the kthread work to cancel
    1117                 :            :  *
    1118                 :            :  * Cancel @work and wait for its execution to finish.  This function
    1119                 :            :  * can be used even if the work re-queues itself. On return from this
    1120                 :            :  * function, @work is guaranteed to be not pending or executing on any CPU.
    1121                 :            :  *
    1122                 :            :  * kthread_cancel_work_sync(&delayed_work->work) must not be used for
    1123                 :            :  * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
    1124                 :            :  *
    1125                 :            :  * The caller must ensure that the worker on which @work was last
    1126                 :            :  * queued can't be destroyed before this function returns.
    1127                 :            :  *
    1128                 :            :  * Return: %true if @work was pending, %false otherwise.
    1129                 :            :  */
    1130                 :          0 : bool kthread_cancel_work_sync(struct kthread_work *work)
    1131                 :            : {
    1132                 :          0 :         return __kthread_cancel_work_sync(work, false);
    1133                 :            : }
    1134                 :            : EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
    1135                 :            : 
    1136                 :            : /**
    1137                 :            :  * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
    1138                 :            :  *      wait for it to finish.
    1139                 :            :  * @dwork: the kthread delayed work to cancel
    1140                 :            :  *
    1141                 :            :  * This is kthread_cancel_work_sync() for delayed works.
    1142                 :            :  *
    1143                 :            :  * Return: %true if @dwork was pending, %false otherwise.
    1144                 :            :  */
    1145                 :          0 : bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
    1146                 :            : {
    1147                 :          0 :         return __kthread_cancel_work_sync(&dwork->work, true);
    1148                 :            : }
    1149                 :            : EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
    1150                 :            : 
    1151                 :            : /**
    1152                 :            :  * kthread_flush_worker - flush all current works on a kthread_worker
    1153                 :            :  * @worker: worker to flush
    1154                 :            :  *
    1155                 :            :  * Wait until all currently executing or pending works on @worker are
    1156                 :            :  * finished.
    1157                 :            :  */
    1158                 :          0 : void kthread_flush_worker(struct kthread_worker *worker)
    1159                 :            : {
    1160                 :          0 :         struct kthread_flush_work fwork = {
    1161                 :            :                 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
    1162                 :          0 :                 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
    1163                 :            :         };
    1164                 :            : 
    1165                 :          0 :         kthread_queue_work(worker, &fwork.work);
    1166                 :          0 :         wait_for_completion(&fwork.done);
    1167                 :          0 : }
    1168                 :            : EXPORT_SYMBOL_GPL(kthread_flush_worker);
    1169                 :            : 
    1170                 :            : /**
    1171                 :            :  * kthread_destroy_worker - destroy a kthread worker
    1172                 :            :  * @worker: worker to be destroyed
    1173                 :            :  *
    1174                 :            :  * Flush and destroy @worker.  The simple flush is enough because the kthread
    1175                 :            :  * worker API is used only in trivial scenarios.  There are no multi-step state
    1176                 :            :  * machines needed.
    1177                 :            :  */
    1178                 :          0 : void kthread_destroy_worker(struct kthread_worker *worker)
    1179                 :            : {
    1180                 :          0 :         struct task_struct *task;
    1181                 :            : 
    1182                 :          0 :         task = worker->task;
    1183   [ #  #  #  # ]:          0 :         if (WARN_ON(!task))
    1184                 :            :                 return;
    1185                 :            : 
    1186                 :          0 :         kthread_flush_worker(worker);
    1187                 :          0 :         kthread_stop(task);
    1188         [ #  # ]:          0 :         WARN_ON(!list_empty(&worker->work_list));
    1189                 :          0 :         kfree(worker);
    1190                 :            : }
    1191                 :            : EXPORT_SYMBOL(kthread_destroy_worker);
    1192                 :            : 
    1193                 :            : #ifdef CONFIG_BLK_CGROUP
    1194                 :            : /**
    1195                 :            :  * kthread_associate_blkcg - associate blkcg to current kthread
    1196                 :            :  * @css: the cgroup info
    1197                 :            :  *
    1198                 :            :  * Current thread must be a kthread. The thread is running jobs on behalf of
    1199                 :            :  * other threads. In some cases, we expect the jobs attach cgroup info of
    1200                 :            :  * original threads instead of that of current thread. This function stores
    1201                 :            :  * original thread's cgroup info in current kthread context for later
    1202                 :            :  * retrieval.
    1203                 :            :  */
    1204                 :            : void kthread_associate_blkcg(struct cgroup_subsys_state *css)
    1205                 :            : {
    1206                 :            :         struct kthread *kthread;
    1207                 :            : 
    1208                 :            :         if (!(current->flags & PF_KTHREAD))
    1209                 :            :                 return;
    1210                 :            :         kthread = to_kthread(current);
    1211                 :            :         if (!kthread)
    1212                 :            :                 return;
    1213                 :            : 
    1214                 :            :         if (kthread->blkcg_css) {
    1215                 :            :                 css_put(kthread->blkcg_css);
    1216                 :            :                 kthread->blkcg_css = NULL;
    1217                 :            :         }
    1218                 :            :         if (css) {
    1219                 :            :                 css_get(css);
    1220                 :            :                 kthread->blkcg_css = css;
    1221                 :            :         }
    1222                 :            : }
    1223                 :            : EXPORT_SYMBOL(kthread_associate_blkcg);
    1224                 :            : 
    1225                 :            : /**
    1226                 :            :  * kthread_blkcg - get associated blkcg css of current kthread
    1227                 :            :  *
    1228                 :            :  * Current thread must be a kthread.
    1229                 :            :  */
    1230                 :            : struct cgroup_subsys_state *kthread_blkcg(void)
    1231                 :            : {
    1232                 :            :         struct kthread *kthread;
    1233                 :            : 
    1234                 :            :         if (current->flags & PF_KTHREAD) {
    1235                 :            :                 kthread = to_kthread(current);
    1236                 :            :                 if (kthread)
    1237                 :            :                         return kthread->blkcg_css;
    1238                 :            :         }
    1239                 :            :         return NULL;
    1240                 :            : }
    1241                 :            : EXPORT_SYMBOL(kthread_blkcg);
    1242                 :            : #endif

Generated by: LCOV version 1.14