~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/workqueue.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/workqueue.c (Version linux-6.11.5) and /kernel/workqueue.c (Version linux-6.0.19)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*                                                  2 /*
  3  * kernel/workqueue.c - generic async executio      3  * kernel/workqueue.c - generic async execution with shared worker pool
  4  *                                                  4  *
  5  * Copyright (C) 2002           Ingo Molnar         5  * Copyright (C) 2002           Ingo Molnar
  6  *                                                  6  *
  7  *   Derived from the taskqueue/keventd code b      7  *   Derived from the taskqueue/keventd code by:
  8  *     David Woodhouse <dwmw2@infradead.org>        8  *     David Woodhouse <dwmw2@infradead.org>
  9  *     Andrew Morton                                9  *     Andrew Morton
 10  *     Kai Petzke <wpp@marie.physik.tu-berlin.     10  *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
 11  *     Theodore Ts'o <tytso@mit.edu>               11  *     Theodore Ts'o <tytso@mit.edu>
 12  *                                                 12  *
 13  * Made to use alloc_percpu by Christoph Lamet     13  * Made to use alloc_percpu by Christoph Lameter.
 14  *                                                 14  *
 15  * Copyright (C) 2010           SUSE Linux Pro     15  * Copyright (C) 2010           SUSE Linux Products GmbH
 16  * Copyright (C) 2010           Tejun Heo <tj@     16  * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
 17  *                                                 17  *
 18  * This is the generic async execution mechani     18  * This is the generic async execution mechanism.  Work items as are
 19  * executed in process context.  The worker po     19  * executed in process context.  The worker pool is shared and
 20  * automatically managed.  There are two worke     20  * automatically managed.  There are two worker pools for each CPU (one for
 21  * normal work items and the other for high pr     21  * normal work items and the other for high priority ones) and some extra
 22  * pools for workqueues which are not bound to     22  * pools for workqueues which are not bound to any specific CPU - the
 23  * number of these backing pools is dynamic.       23  * number of these backing pools is dynamic.
 24  *                                                 24  *
 25  * Please read Documentation/core-api/workqueu     25  * Please read Documentation/core-api/workqueue.rst for details.
 26  */                                                26  */
 27                                                    27 
 28 #include <linux/export.h>                          28 #include <linux/export.h>
 29 #include <linux/kernel.h>                          29 #include <linux/kernel.h>
 30 #include <linux/sched.h>                           30 #include <linux/sched.h>
 31 #include <linux/init.h>                            31 #include <linux/init.h>
 32 #include <linux/interrupt.h>                   << 
 33 #include <linux/signal.h>                          32 #include <linux/signal.h>
 34 #include <linux/completion.h>                      33 #include <linux/completion.h>
 35 #include <linux/workqueue.h>                       34 #include <linux/workqueue.h>
 36 #include <linux/slab.h>                            35 #include <linux/slab.h>
 37 #include <linux/cpu.h>                             36 #include <linux/cpu.h>
 38 #include <linux/notifier.h>                        37 #include <linux/notifier.h>
 39 #include <linux/kthread.h>                         38 #include <linux/kthread.h>
 40 #include <linux/hardirq.h>                         39 #include <linux/hardirq.h>
 41 #include <linux/mempolicy.h>                       40 #include <linux/mempolicy.h>
 42 #include <linux/freezer.h>                         41 #include <linux/freezer.h>
 43 #include <linux/debug_locks.h>                     42 #include <linux/debug_locks.h>
 44 #include <linux/lockdep.h>                         43 #include <linux/lockdep.h>
 45 #include <linux/idr.h>                             44 #include <linux/idr.h>
 46 #include <linux/jhash.h>                           45 #include <linux/jhash.h>
 47 #include <linux/hashtable.h>                       46 #include <linux/hashtable.h>
 48 #include <linux/rculist.h>                         47 #include <linux/rculist.h>
 49 #include <linux/nodemask.h>                        48 #include <linux/nodemask.h>
 50 #include <linux/moduleparam.h>                     49 #include <linux/moduleparam.h>
 51 #include <linux/uaccess.h>                         50 #include <linux/uaccess.h>
 52 #include <linux/sched/isolation.h>                 51 #include <linux/sched/isolation.h>
 53 #include <linux/sched/debug.h>                 << 
 54 #include <linux/nmi.h>                             52 #include <linux/nmi.h>
 55 #include <linux/kvm_para.h>                        53 #include <linux/kvm_para.h>
 56 #include <linux/delay.h>                       << 
 57 #include <linux/irq_work.h>                    << 
 58                                                    54 
 59 #include "workqueue_internal.h"                    55 #include "workqueue_internal.h"
 60                                                    56 
 61 enum worker_pool_flags {                       !!  57 enum {
 62         /*                                         58         /*
 63          * worker_pool flags                       59          * worker_pool flags
 64          *                                         60          *
 65          * A bound pool is either associated o     61          * A bound pool is either associated or disassociated with its CPU.
 66          * While associated (!DISASSOCIATED),      62          * While associated (!DISASSOCIATED), all workers are bound to the
 67          * CPU and none has %WORKER_UNBOUND se     63          * CPU and none has %WORKER_UNBOUND set and concurrency management
 68          * is in effect.                           64          * is in effect.
 69          *                                         65          *
 70          * While DISASSOCIATED, the cpu may be     66          * While DISASSOCIATED, the cpu may be offline and all workers have
 71          * %WORKER_UNBOUND set and concurrency     67          * %WORKER_UNBOUND set and concurrency management disabled, and may
 72          * be executing on any CPU.  The pool      68          * be executing on any CPU.  The pool behaves as an unbound one.
 73          *                                         69          *
 74          * Note that DISASSOCIATED should be f     70          * Note that DISASSOCIATED should be flipped only while holding
 75          * wq_pool_attach_mutex to avoid chang     71          * wq_pool_attach_mutex to avoid changing binding state while
 76          * worker_attach_to_pool() is in progr     72          * worker_attach_to_pool() is in progress.
 77          *                                     << 
 78          * As there can only be one concurrent << 
 79          * BH pool is per-CPU and always DISAS << 
 80          */                                        73          */
 81         POOL_BH                 = 1 << 0,      !!  74         POOL_MANAGER_ACTIVE     = 1 << 0,       /* being managed */
 82         POOL_MANAGER_ACTIVE     = 1 << 1,      << 
 83         POOL_DISASSOCIATED      = 1 << 2,          75         POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
 84         POOL_BH_DRAINING        = 1 << 3,      << 
 85 };                                             << 
 86                                                    76 
 87 enum worker_flags {                            << 
 88         /* worker flags */                         77         /* worker flags */
 89         WORKER_DIE              = 1 << 1,          78         WORKER_DIE              = 1 << 1,       /* die die die */
 90         WORKER_IDLE             = 1 << 2,          79         WORKER_IDLE             = 1 << 2,       /* is idle */
 91         WORKER_PREP             = 1 << 3,          80         WORKER_PREP             = 1 << 3,       /* preparing to run works */
 92         WORKER_CPU_INTENSIVE    = 1 << 6,          81         WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
 93         WORKER_UNBOUND          = 1 << 7,          82         WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
 94         WORKER_REBOUND          = 1 << 8,          83         WORKER_REBOUND          = 1 << 8,       /* worker was rebound */
 95                                                    84 
 96         WORKER_NOT_RUNNING      = WORKER_PREP      85         WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_CPU_INTENSIVE |
 97                                   WORKER_UNBOU     86                                   WORKER_UNBOUND | WORKER_REBOUND,
 98 };                                             << 
 99                                                << 
100 enum work_cancel_flags {                       << 
101         WORK_CANCEL_DELAYED     = 1 << 0,      << 
102         WORK_CANCEL_DISABLE     = 1 << 1,      << 
103 };                                             << 
104                                                    87 
105 enum wq_internal_consts {                      << 
106         NR_STD_WORKER_POOLS     = 2,               88         NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
107                                                    89 
108         UNBOUND_POOL_HASH_ORDER = 6,               90         UNBOUND_POOL_HASH_ORDER = 6,            /* hashed by pool->attrs */
109         BUSY_WORKER_HASH_ORDER  = 6,               91         BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
110                                                    92 
111         MAX_IDLE_WORKERS_RATIO  = 4,               93         MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
112         IDLE_WORKER_TIMEOUT     = 300 * HZ,        94         IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
113                                                    95 
114         MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >=      96         MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
115                                                    97                                                 /* call for help after 10ms
116                                                    98                                                    (min two ticks) */
117         MAYDAY_INTERVAL         = HZ / 10,         99         MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
118         CREATE_COOLDOWN         = HZ,             100         CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
119                                                   101 
120         /*                                        102         /*
121          * Rescue workers are used only on eme    103          * Rescue workers are used only on emergencies and shared by
122          * all cpus.  Give MIN_NICE.              104          * all cpus.  Give MIN_NICE.
123          */                                       105          */
124         RESCUER_NICE_LEVEL      = MIN_NICE,       106         RESCUER_NICE_LEVEL      = MIN_NICE,
125         HIGHPRI_NICE_LEVEL      = MIN_NICE,       107         HIGHPRI_NICE_LEVEL      = MIN_NICE,
126                                                   108 
127         WQ_NAME_LEN             = 32,          !! 109         WQ_NAME_LEN             = 24,
128         WORKER_ID_LEN           = 10 + WQ_NAME << 
129 };                                                110 };
130                                                   111 
131 /*                                                112 /*
132  * We don't want to trap softirq for too long. << 
133  * MAX_SOFTIRQ_RESTART in kernel/softirq.c. Th << 
134  * msecs_to_jiffies() can't be an initializer. << 
135  */                                            << 
136 #define BH_WORKER_JIFFIES       msecs_to_jiffi << 
137 #define BH_WORKER_RESTARTS      10             << 
138                                                << 
139 /*                                             << 
140  * Structure fields follow one of the followin    113  * Structure fields follow one of the following exclusion rules.
141  *                                                114  *
142  * I: Modifiable by initialization/destruction    115  * I: Modifiable by initialization/destruction paths and read-only for
143  *    everyone else.                              116  *    everyone else.
144  *                                                117  *
145  * P: Preemption protected.  Disabling preempt    118  * P: Preemption protected.  Disabling preemption is enough and should
146  *    only be modified and accessed from the l    119  *    only be modified and accessed from the local cpu.
147  *                                                120  *
148  * L: pool->lock protected.  Access with pool-    121  * L: pool->lock protected.  Access with pool->lock held.
149  *                                                122  *
150  * LN: pool->lock and wq_node_nr_active->lock  !! 123  * X: During normal operation, modification requires pool->lock and should
151  *     reads.                                  !! 124  *    be done only from local cpu.  Either disabling preemption on local
152  *                                             !! 125  *    cpu or grabbing pool->lock is enough for read access.  If
153  * K: Only modified by worker while holding po !! 126  *    POOL_DISASSOCIATED is set, it's identical to L.
154  *    self, while holding pool->lock or from I << 
155  *    kworker.                                 << 
156  *                                             << 
157  * S: Only modified by worker self.            << 
158  *                                                127  *
159  * A: wq_pool_attach_mutex protected.             128  * A: wq_pool_attach_mutex protected.
160  *                                                129  *
161  * PL: wq_pool_mutex protected.                   130  * PL: wq_pool_mutex protected.
162  *                                                131  *
163  * PR: wq_pool_mutex protected for writes.  RC    132  * PR: wq_pool_mutex protected for writes.  RCU protected for reads.
164  *                                                133  *
165  * PW: wq_pool_mutex and wq->mutex protected f    134  * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
166  *                                                135  *
167  * PWR: wq_pool_mutex and wq->mutex protected     136  * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
168  *      RCU for reads.                            137  *      RCU for reads.
169  *                                                138  *
170  * WQ: wq->mutex protected.                       139  * WQ: wq->mutex protected.
171  *                                                140  *
172  * WR: wq->mutex protected for writes.  RCU pr    141  * WR: wq->mutex protected for writes.  RCU protected for reads.
173  *                                                142  *
174  * WO: wq->mutex protected for writes. Updated << 
175  *     with READ_ONCE() without locking.       << 
176  *                                             << 
177  * MD: wq_mayday_lock protected.                  143  * MD: wq_mayday_lock protected.
178  *                                             << 
179  * WD: Used internally by the watchdog.        << 
180  */                                               144  */
181                                                   145 
182 /* struct worker is defined in workqueue_inter    146 /* struct worker is defined in workqueue_internal.h */
183                                                   147 
184 struct worker_pool {                              148 struct worker_pool {
185         raw_spinlock_t          lock;             149         raw_spinlock_t          lock;           /* the pool lock */
186         int                     cpu;              150         int                     cpu;            /* I: the associated cpu */
187         int                     node;             151         int                     node;           /* I: the associated node ID */
188         int                     id;               152         int                     id;             /* I: pool ID */
189         unsigned int            flags;         !! 153         unsigned int            flags;          /* X: flags */
190                                                   154 
191         unsigned long           watchdog_ts;      155         unsigned long           watchdog_ts;    /* L: watchdog timestamp */
192         bool                    cpu_stall;     << 
193                                                   156 
194         /*                                        157         /*
195          * The counter is incremented in a pro    158          * The counter is incremented in a process context on the associated CPU
196          * w/ preemption disabled, and decreme    159          * w/ preemption disabled, and decremented or reset in the same context
197          * but w/ pool->lock held. The readers    160          * but w/ pool->lock held. The readers grab pool->lock and are
198          * guaranteed to see if the counter re    161          * guaranteed to see if the counter reached zero.
199          */                                       162          */
200         int                     nr_running;       163         int                     nr_running;
201                                                   164 
202         struct list_head        worklist;         165         struct list_head        worklist;       /* L: list of pending works */
203                                                   166 
204         int                     nr_workers;       167         int                     nr_workers;     /* L: total number of workers */
205         int                     nr_idle;          168         int                     nr_idle;        /* L: currently idle workers */
206                                                   169 
207         struct list_head        idle_list;        170         struct list_head        idle_list;      /* L: list of idle workers */
208         struct timer_list       idle_timer;       171         struct timer_list       idle_timer;     /* L: worker idle timeout */
209         struct work_struct      idle_cull_work !! 172         struct timer_list       mayday_timer;   /* L: SOS timer for workers */
210                                                << 
211         struct timer_list       mayday_timer;  << 
212                                                   173 
213         /* a workers is either on busy_hash or    174         /* a workers is either on busy_hash or idle_list, or the manager */
214         DECLARE_HASHTABLE(busy_hash, BUSY_WORK    175         DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
215                                                   176                                                 /* L: hash of busy workers */
216                                                   177 
217         struct worker           *manager;         178         struct worker           *manager;       /* L: purely informational */
218         struct list_head        workers;          179         struct list_head        workers;        /* A: attached workers */
                                                   >> 180         struct completion       *detach_completion; /* all workers detached */
219                                                   181 
220         struct ida              worker_ida;       182         struct ida              worker_ida;     /* worker IDs for task name */
221                                                   183 
222         struct workqueue_attrs  *attrs;           184         struct workqueue_attrs  *attrs;         /* I: worker attributes */
223         struct hlist_node       hash_node;        185         struct hlist_node       hash_node;      /* PL: unbound_pool_hash node */
224         int                     refcnt;           186         int                     refcnt;         /* PL: refcnt for unbound pools */
225                                                   187 
226         /*                                        188         /*
227          * Destruction of pool is RCU protecte    189          * Destruction of pool is RCU protected to allow dereferences
228          * from get_work_pool().                  190          * from get_work_pool().
229          */                                       191          */
230         struct rcu_head         rcu;              192         struct rcu_head         rcu;
231 };                                                193 };
232                                                   194 
233 /*                                                195 /*
234  * Per-pool_workqueue statistics. These can be !! 196  * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
235  * tools/workqueue/wq_monitor.py.              << 
236  */                                            << 
237 enum pool_workqueue_stats {                    << 
238         PWQ_STAT_STARTED,       /* work items  << 
239         PWQ_STAT_COMPLETED,     /* work items  << 
240         PWQ_STAT_CPU_TIME,      /* total CPU t << 
241         PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_inte << 
242         PWQ_STAT_CM_WAKEUP,     /* concurrency << 
243         PWQ_STAT_REPATRIATED,   /* unbound wor << 
244         PWQ_STAT_MAYDAY,        /* maydays to  << 
245         PWQ_STAT_RESCUED,       /* linked work << 
246                                                << 
247         PWQ_NR_STATS,                          << 
248 };                                             << 
249                                                << 
250 /*                                             << 
251  * The per-pool workqueue.  While queued, bits << 
252  * of work_struct->data are used for flags and    197  * of work_struct->data are used for flags and the remaining high bits
253  * point to the pwq; thus, pwqs need to be ali    198  * point to the pwq; thus, pwqs need to be aligned at two's power of the
254  * number of flag bits.                           199  * number of flag bits.
255  */                                               200  */
256 struct pool_workqueue {                           201 struct pool_workqueue {
257         struct worker_pool      *pool;            202         struct worker_pool      *pool;          /* I: the associated pool */
258         struct workqueue_struct *wq;              203         struct workqueue_struct *wq;            /* I: the owning workqueue */
259         int                     work_color;       204         int                     work_color;     /* L: current color */
260         int                     flush_color;      205         int                     flush_color;    /* L: flushing color */
261         int                     refcnt;           206         int                     refcnt;         /* L: reference count */
262         int                     nr_in_flight[W    207         int                     nr_in_flight[WORK_NR_COLORS];
263                                                   208                                                 /* L: nr of in_flight works */
264         bool                    plugged;       << 
265                                                   209 
266         /*                                        210         /*
267          * nr_active management and WORK_STRUC    211          * nr_active management and WORK_STRUCT_INACTIVE:
268          *                                        212          *
269          * When pwq->nr_active >= max_active,     213          * When pwq->nr_active >= max_active, new work item is queued to
270          * pwq->inactive_works instead of pool    214          * pwq->inactive_works instead of pool->worklist and marked with
271          * WORK_STRUCT_INACTIVE.                  215          * WORK_STRUCT_INACTIVE.
272          *                                        216          *
273          * All work items marked with WORK_STR !! 217          * All work items marked with WORK_STRUCT_INACTIVE do not participate
274          * nr_active and all work items in pwq !! 218          * in pwq->nr_active and all work items in pwq->inactive_works are
275          * WORK_STRUCT_INACTIVE. But not all W !! 219          * marked with WORK_STRUCT_INACTIVE.  But not all WORK_STRUCT_INACTIVE
276          * in pwq->inactive_works. Some of the !! 220          * work items are in pwq->inactive_works.  Some of them are ready to
277          * pool->worklist or worker->scheduled !! 221          * run in pool->worklist or worker->scheduled.  Those work itmes are
278          * wq_barrier which is used for flush_ !! 222          * only struct wq_barrier which is used for flush_work() and should
279          * in nr_active. For non-barrier work  !! 223          * not participate in pwq->nr_active.  For non-barrier work item, it
280          * WORK_STRUCT_INACTIVE iff it is in p !! 224          * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
281          */                                       225          */
282         int                     nr_active;        226         int                     nr_active;      /* L: nr of active works */
                                                   >> 227         int                     max_active;     /* L: max active works */
283         struct list_head        inactive_works    228         struct list_head        inactive_works; /* L: inactive works */
284         struct list_head        pending_node;  << 
285         struct list_head        pwqs_node;        229         struct list_head        pwqs_node;      /* WR: node on wq->pwqs */
286         struct list_head        mayday_node;      230         struct list_head        mayday_node;    /* MD: node on wq->maydays */
287                                                   231 
288         u64                     stats[PWQ_NR_S << 
289                                                << 
290         /*                                        232         /*
291          * Release of unbound pwq is punted to !! 233          * Release of unbound pwq is punted to system_wq.  See put_pwq()
292          * and pwq_release_workfn() for detail !! 234          * and pwq_unbound_release_workfn() for details.  pool_workqueue
293          * RCU protected so that the first pwq !! 235          * itself is also RCU protected so that the first pwq can be
294          * grabbing wq->mutex.                 !! 236          * determined without grabbing wq->mutex.
295          */                                       237          */
296         struct kthread_work     release_work;  !! 238         struct work_struct      unbound_release_work;
297         struct rcu_head         rcu;              239         struct rcu_head         rcu;
298 } __aligned(1 << WORK_STRUCT_PWQ_SHIFT);       !! 240 } __aligned(1 << WORK_STRUCT_FLAG_BITS);
299                                                   241 
300 /*                                                242 /*
301  * Structure used to wait for workqueue flush.    243  * Structure used to wait for workqueue flush.
302  */                                               244  */
303 struct wq_flusher {                               245 struct wq_flusher {
304         struct list_head        list;             246         struct list_head        list;           /* WQ: list of flushers */
305         int                     flush_color;      247         int                     flush_color;    /* WQ: flush color waiting for */
306         struct completion       done;             248         struct completion       done;           /* flush completion */
307 };                                                249 };
308                                                   250 
309 struct wq_device;                                 251 struct wq_device;
310                                                   252 
311 /*                                                253 /*
312  * Unlike in a per-cpu workqueue where max_act << 
313  * on each CPU, in an unbound workqueue, max_a << 
314  * As sharing a single nr_active across multip << 
315  * the counting and enforcement is per NUMA no << 
316  *                                             << 
317  * The following struct is used to enforce per << 
318  * to start executing a work item, it should i << 
319  * tryinc_node_nr_active(). If acquisition fai << 
320  * ->max, the pwq is queued on ->pending_pwqs. << 
321  * and decrement ->nr, node_activate_pending_p << 
322  * round-robin order.                          << 
323  */                                            << 
324 struct wq_node_nr_active {                     << 
325         int                     max;           << 
326         atomic_t                nr;            << 
327         raw_spinlock_t          lock;          << 
328         struct list_head        pending_pwqs;  << 
329 };                                             << 
330                                                << 
331 /*                                             << 
332  * The externally visible workqueue.  It relay    254  * The externally visible workqueue.  It relays the issued work items to
333  * the appropriate worker_pool through its poo    255  * the appropriate worker_pool through its pool_workqueues.
334  */                                               256  */
335 struct workqueue_struct {                         257 struct workqueue_struct {
336         struct list_head        pwqs;             258         struct list_head        pwqs;           /* WR: all pwqs of this wq */
337         struct list_head        list;             259         struct list_head        list;           /* PR: list of all workqueues */
338                                                   260 
339         struct mutex            mutex;            261         struct mutex            mutex;          /* protects this wq */
340         int                     work_color;       262         int                     work_color;     /* WQ: current work color */
341         int                     flush_color;      263         int                     flush_color;    /* WQ: current flush color */
342         atomic_t                nr_pwqs_to_flu    264         atomic_t                nr_pwqs_to_flush; /* flush in progress */
343         struct wq_flusher       *first_flusher    265         struct wq_flusher       *first_flusher; /* WQ: first flusher */
344         struct list_head        flusher_queue;    266         struct list_head        flusher_queue;  /* WQ: flush waiters */
345         struct list_head        flusher_overfl    267         struct list_head        flusher_overflow; /* WQ: flush overflow list */
346                                                   268 
347         struct list_head        maydays;          269         struct list_head        maydays;        /* MD: pwqs requesting rescue */
348         struct worker           *rescuer;         270         struct worker           *rescuer;       /* MD: rescue worker */
349                                                   271 
350         int                     nr_drainers;      272         int                     nr_drainers;    /* WQ: drain in progress */
351                                                !! 273         int                     saved_max_active; /* WQ: saved pwq max_active */
352         /* See alloc_workqueue() function comm << 
353         int                     max_active;    << 
354         int                     min_active;    << 
355         int                     saved_max_acti << 
356         int                     saved_min_acti << 
357                                                   274 
358         struct workqueue_attrs  *unbound_attrs    275         struct workqueue_attrs  *unbound_attrs; /* PW: only for unbound wqs */
359         struct pool_workqueue __rcu *dfl_pwq;  !! 276         struct pool_workqueue   *dfl_pwq;       /* PW: only for unbound wqs */
360                                                   277 
361 #ifdef CONFIG_SYSFS                               278 #ifdef CONFIG_SYSFS
362         struct wq_device        *wq_dev;          279         struct wq_device        *wq_dev;        /* I: for sysfs interface */
363 #endif                                            280 #endif
364 #ifdef CONFIG_LOCKDEP                             281 #ifdef CONFIG_LOCKDEP
365         char                    *lock_name;       282         char                    *lock_name;
366         struct lock_class_key   key;              283         struct lock_class_key   key;
367         struct lockdep_map      lockdep_map;      284         struct lockdep_map      lockdep_map;
368 #endif                                            285 #endif
369         char                    name[WQ_NAME_L    286         char                    name[WQ_NAME_LEN]; /* I: workqueue name */
370                                                   287 
371         /*                                        288         /*
372          * Destruction of workqueue_struct is     289          * Destruction of workqueue_struct is RCU protected to allow walking
373          * the workqueues list without grabbin    290          * the workqueues list without grabbing wq_pool_mutex.
374          * This is used to dump all workqueues    291          * This is used to dump all workqueues from sysrq.
375          */                                       292          */
376         struct rcu_head         rcu;              293         struct rcu_head         rcu;
377                                                   294 
378         /* hot fields used during command issu    295         /* hot fields used during command issue, aligned to cacheline */
379         unsigned int            flags ____cach    296         unsigned int            flags ____cacheline_aligned; /* WQ: WQ_* flags */
380         struct pool_workqueue __rcu * __percpu !! 297         struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
381         struct wq_node_nr_active *node_nr_acti !! 298         struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
382 };                                                299 };
383                                                   300 
384 /*                                             !! 301 static struct kmem_cache *pwq_cache;
385  * Each pod type describes how CPUs should be  << 
386  * See the comment above workqueue_attrs->affn << 
387  */                                            << 
388 struct wq_pod_type {                           << 
389         int                     nr_pods;       << 
390         cpumask_var_t           *pod_cpus;     << 
391         int                     *pod_node;     << 
392         int                     *cpu_pod;      << 
393 };                                             << 
394                                                   302 
395 struct work_offq_data {                        !! 303 static cpumask_var_t *wq_numa_possible_cpumask;
396         u32                     pool_id;       !! 304                                         /* possible CPUs of each node */
397         u32                     disable;       << 
398         u32                     flags;         << 
399 };                                             << 
400                                                   305 
401 static const char *wq_affn_names[WQ_AFFN_NR_TY !! 306 static bool wq_disable_numa;
402         [WQ_AFFN_DFL]           = "default",   !! 307 module_param_named(disable_numa, wq_disable_numa, bool, 0444);
403         [WQ_AFFN_CPU]           = "cpu",       << 
404         [WQ_AFFN_SMT]           = "smt",       << 
405         [WQ_AFFN_CACHE]         = "cache",     << 
406         [WQ_AFFN_NUMA]          = "numa",      << 
407         [WQ_AFFN_SYSTEM]        = "system",    << 
408 };                                             << 
409                                                << 
410 /*                                             << 
411  * Per-cpu work items which run for longer tha << 
412  * automatically considered CPU intensive and  << 
413  * management to prevent them from noticeably  << 
414  * ULONG_MAX indicates that the user hasn't ov << 
415  * The actual value is initialized in wq_cpu_i << 
416  */                                            << 
417 static unsigned long wq_cpu_intensive_thresh_u << 
418 module_param_named(cpu_intensive_thresh_us, wq << 
419 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT          << 
420 static unsigned int wq_cpu_intensive_warning_t << 
421 module_param_named(cpu_intensive_warning_thres << 
422 #endif                                         << 
423                                                   308 
424 /* see the comment above the definition of WQ_    309 /* see the comment above the definition of WQ_POWER_EFFICIENT */
425 static bool wq_power_efficient = IS_ENABLED(CO    310 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
426 module_param_named(power_efficient, wq_power_e    311 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
427                                                   312 
428 static bool wq_online;                  /* can    313 static bool wq_online;                  /* can kworkers be created yet? */
429 static bool wq_topo_initialized __read_mostly  << 
430                                                   314 
431 static struct kmem_cache *pwq_cache;           !! 315 static bool wq_numa_enabled;            /* unbound NUMA affinity enabled */
432                                                   316 
433 static struct wq_pod_type wq_pod_types[WQ_AFFN !! 317 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
434 static enum wq_affn_scope wq_affn_dfl = WQ_AFF !! 318 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
435                                                << 
436 /* buf for wq_update_unbound_pod_attrs(), prot << 
437 static struct workqueue_attrs *unbound_wq_upda << 
438                                                   319 
439 static DEFINE_MUTEX(wq_pool_mutex);     /* pro    320 static DEFINE_MUTEX(wq_pool_mutex);     /* protects pools and workqueues list */
440 static DEFINE_MUTEX(wq_pool_attach_mutex); /*     321 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
441 static DEFINE_RAW_SPINLOCK(wq_mayday_lock);       322 static DEFINE_RAW_SPINLOCK(wq_mayday_lock);     /* protects wq->maydays list */
442 /* wait for manager to go away */                 323 /* wait for manager to go away */
443 static struct rcuwait manager_wait = __RCUWAIT    324 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
444                                                   325 
445 static LIST_HEAD(workqueues);           /* PR:    326 static LIST_HEAD(workqueues);           /* PR: list of all workqueues */
446 static bool workqueue_freezing;         /* PL:    327 static bool workqueue_freezing;         /* PL: have wqs started freezing? */
447                                                   328 
448 /* PL: mirror the cpu_online_mask excluding th !! 329 /* PL: allowable cpus for unbound wqs and work items */
449 static cpumask_var_t wq_online_cpumask;        << 
450                                                << 
451 /* PL&A: allowable cpus for unbound wqs and wo << 
452 static cpumask_var_t wq_unbound_cpumask;          330 static cpumask_var_t wq_unbound_cpumask;
453                                                   331 
454 /* PL: user requested unbound cpumask via sysf << 
455 static cpumask_var_t wq_requested_unbound_cpum << 
456                                                << 
457 /* PL: isolated cpumask to be excluded from un << 
458 static cpumask_var_t wq_isolated_cpumask;      << 
459                                                << 
460 /* for further constrain wq_unbound_cpumask by << 
461 static struct cpumask wq_cmdline_cpumask __ini << 
462                                                << 
463 /* CPU where unbound work was last round robin    332 /* CPU where unbound work was last round robin scheduled from this CPU */
464 static DEFINE_PER_CPU(int, wq_rr_cpu_last);       333 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
465                                                   334 
466 /*                                                335 /*
467  * Local execution of unbound work items is no    336  * Local execution of unbound work items is no longer guaranteed.  The
468  * following always forces round-robin CPU sel    337  * following always forces round-robin CPU selection on unbound work items
469  * to uncover usages which depend on it.          338  * to uncover usages which depend on it.
470  */                                               339  */
471 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU               340 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
472 static bool wq_debug_force_rr_cpu = true;         341 static bool wq_debug_force_rr_cpu = true;
473 #else                                             342 #else
474 static bool wq_debug_force_rr_cpu = false;        343 static bool wq_debug_force_rr_cpu = false;
475 #endif                                            344 #endif
476 module_param_named(debug_force_rr_cpu, wq_debu    345 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
477                                                   346 
478 /* to raise softirq for the BH worker pools on << 
479 static DEFINE_PER_CPU_SHARED_ALIGNED(struct ir << 
480                                      bh_pool_i << 
481                                                << 
482 /* the BH worker pools */                      << 
483 static DEFINE_PER_CPU_SHARED_ALIGNED(struct wo << 
484                                      bh_worker << 
485                                                << 
486 /* the per-cpu worker pools */                    347 /* the per-cpu worker pools */
487 static DEFINE_PER_CPU_SHARED_ALIGNED(struct wo !! 348 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
488                                      cpu_worke << 
489                                                   349 
490 static DEFINE_IDR(worker_pool_idr);     /* PR:    350 static DEFINE_IDR(worker_pool_idr);     /* PR: idr of all pools */
491                                                   351 
492 /* PL: hash of all unbound pools keyed by pool    352 /* PL: hash of all unbound pools keyed by pool->attrs */
493 static DEFINE_HASHTABLE(unbound_pool_hash, UNB    353 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
494                                                   354 
495 /* I: attributes used when instantiating stand    355 /* I: attributes used when instantiating standard unbound pools on demand */
496 static struct workqueue_attrs *unbound_std_wq_    356 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
497                                                   357 
498 /* I: attributes used when instantiating order    358 /* I: attributes used when instantiating ordered pools on demand */
499 static struct workqueue_attrs *ordered_wq_attr    359 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
500                                                   360 
501 /*                                             !! 361 struct workqueue_struct *system_wq __read_mostly;
502  * I: kthread_worker to release pwq's. pwq rel << 
503  * process context while holding a pool lock.  << 
504  * worker to avoid A-A deadlocks.              << 
505  */                                            << 
506 static struct kthread_worker *pwq_release_work << 
507                                                << 
508 struct workqueue_struct *system_wq __ro_after_ << 
509 EXPORT_SYMBOL(system_wq);                         362 EXPORT_SYMBOL(system_wq);
510 struct workqueue_struct *system_highpri_wq __r !! 363 struct workqueue_struct *system_highpri_wq __read_mostly;
511 EXPORT_SYMBOL_GPL(system_highpri_wq);             364 EXPORT_SYMBOL_GPL(system_highpri_wq);
512 struct workqueue_struct *system_long_wq __ro_a !! 365 struct workqueue_struct *system_long_wq __read_mostly;
513 EXPORT_SYMBOL_GPL(system_long_wq);                366 EXPORT_SYMBOL_GPL(system_long_wq);
514 struct workqueue_struct *system_unbound_wq __r !! 367 struct workqueue_struct *system_unbound_wq __read_mostly;
515 EXPORT_SYMBOL_GPL(system_unbound_wq);             368 EXPORT_SYMBOL_GPL(system_unbound_wq);
516 struct workqueue_struct *system_freezable_wq _ !! 369 struct workqueue_struct *system_freezable_wq __read_mostly;
517 EXPORT_SYMBOL_GPL(system_freezable_wq);           370 EXPORT_SYMBOL_GPL(system_freezable_wq);
518 struct workqueue_struct *system_power_efficien !! 371 struct workqueue_struct *system_power_efficient_wq __read_mostly;
519 EXPORT_SYMBOL_GPL(system_power_efficient_wq);     372 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
520 struct workqueue_struct *system_freezable_powe !! 373 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
521 EXPORT_SYMBOL_GPL(system_freezable_power_effic    374 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
522 struct workqueue_struct *system_bh_wq;         << 
523 EXPORT_SYMBOL_GPL(system_bh_wq);               << 
524 struct workqueue_struct *system_bh_highpri_wq; << 
525 EXPORT_SYMBOL_GPL(system_bh_highpri_wq);       << 
526                                                   375 
527 static int worker_thread(void *__worker);         376 static int worker_thread(void *__worker);
528 static void workqueue_sysfs_unregister(struct     377 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
529 static void show_pwq(struct pool_workqueue *pw    378 static void show_pwq(struct pool_workqueue *pwq);
530 static void show_one_worker_pool(struct worker    379 static void show_one_worker_pool(struct worker_pool *pool);
531                                                   380 
532 #define CREATE_TRACE_POINTS                       381 #define CREATE_TRACE_POINTS
533 #include <trace/events/workqueue.h>               382 #include <trace/events/workqueue.h>
534                                                   383 
535 #define assert_rcu_or_pool_mutex()                384 #define assert_rcu_or_pool_mutex()                                      \
536         RCU_LOCKDEP_WARN(!rcu_read_lock_any_he !! 385         RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
537                          !lockdep_is_held(&wq_    386                          !lockdep_is_held(&wq_pool_mutex),              \
538                          "RCU or wq_pool_mutex    387                          "RCU or wq_pool_mutex should be held")
539                                                   388 
540 #define assert_rcu_or_wq_mutex_or_pool_mutex(w    389 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)                        \
541         RCU_LOCKDEP_WARN(!rcu_read_lock_any_he !! 390         RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
542                          !lockdep_is_held(&wq-    391                          !lockdep_is_held(&wq->mutex) &&                \
543                          !lockdep_is_held(&wq_    392                          !lockdep_is_held(&wq_pool_mutex),              \
544                          "RCU, wq->mutex or wq    393                          "RCU, wq->mutex or wq_pool_mutex should be held")
545                                                   394 
546 #define for_each_bh_worker_pool(pool, cpu)     << 
547         for ((pool) = &per_cpu(bh_worker_pools << 
548              (pool) < &per_cpu(bh_worker_pools << 
549              (pool)++)                         << 
550                                                << 
551 #define for_each_cpu_worker_pool(pool, cpu)       395 #define for_each_cpu_worker_pool(pool, cpu)                             \
552         for ((pool) = &per_cpu(cpu_worker_pool    396         for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];               \
553              (pool) < &per_cpu(cpu_worker_pool    397              (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
554              (pool)++)                            398              (pool)++)
555                                                   399 
556 /**                                               400 /**
557  * for_each_pool - iterate through all worker_    401  * for_each_pool - iterate through all worker_pools in the system
558  * @pool: iteration cursor                        402  * @pool: iteration cursor
559  * @pi: integer used for iteration                403  * @pi: integer used for iteration
560  *                                                404  *
561  * This must be called either with wq_pool_mut    405  * This must be called either with wq_pool_mutex held or RCU read
562  * locked.  If the pool needs to be used beyon    406  * locked.  If the pool needs to be used beyond the locking in effect, the
563  * caller is responsible for guaranteeing that    407  * caller is responsible for guaranteeing that the pool stays online.
564  *                                                408  *
565  * The if/else clause exists only for the lock    409  * The if/else clause exists only for the lockdep assertion and can be
566  * ignored.                                       410  * ignored.
567  */                                               411  */
568 #define for_each_pool(pool, pi)                   412 #define for_each_pool(pool, pi)                                         \
569         idr_for_each_entry(&worker_pool_idr, p    413         idr_for_each_entry(&worker_pool_idr, pool, pi)                  \
570                 if (({ assert_rcu_or_pool_mute    414                 if (({ assert_rcu_or_pool_mutex(); false; })) { }       \
571                 else                              415                 else
572                                                   416 
573 /**                                               417 /**
574  * for_each_pool_worker - iterate through all     418  * for_each_pool_worker - iterate through all workers of a worker_pool
575  * @worker: iteration cursor                      419  * @worker: iteration cursor
576  * @pool: worker_pool to iterate workers of       420  * @pool: worker_pool to iterate workers of
577  *                                                421  *
578  * This must be called with wq_pool_attach_mut    422  * This must be called with wq_pool_attach_mutex.
579  *                                                423  *
580  * The if/else clause exists only for the lock    424  * The if/else clause exists only for the lockdep assertion and can be
581  * ignored.                                       425  * ignored.
582  */                                               426  */
583 #define for_each_pool_worker(worker, pool)        427 #define for_each_pool_worker(worker, pool)                              \
584         list_for_each_entry((worker), &(pool)-    428         list_for_each_entry((worker), &(pool)->workers, node)           \
585                 if (({ lockdep_assert_held(&wq    429                 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
586                 else                              430                 else
587                                                   431 
588 /**                                               432 /**
589  * for_each_pwq - iterate through all pool_wor    433  * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
590  * @pwq: iteration cursor                         434  * @pwq: iteration cursor
591  * @wq: the target workqueue                      435  * @wq: the target workqueue
592  *                                                436  *
593  * This must be called either with wq->mutex h    437  * This must be called either with wq->mutex held or RCU read locked.
594  * If the pwq needs to be used beyond the lock    438  * If the pwq needs to be used beyond the locking in effect, the caller is
595  * responsible for guaranteeing that the pwq s    439  * responsible for guaranteeing that the pwq stays online.
596  *                                                440  *
597  * The if/else clause exists only for the lock    441  * The if/else clause exists only for the lockdep assertion and can be
598  * ignored.                                       442  * ignored.
599  */                                               443  */
600 #define for_each_pwq(pwq, wq)                     444 #define for_each_pwq(pwq, wq)                                           \
601         list_for_each_entry_rcu((pwq), &(wq)->    445         list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node,          \
602                                  lockdep_is_he    446                                  lockdep_is_held(&(wq->mutex)))
603                                                   447 
604 #ifdef CONFIG_DEBUG_OBJECTS_WORK                  448 #ifdef CONFIG_DEBUG_OBJECTS_WORK
605                                                   449 
606 static const struct debug_obj_descr work_debug    450 static const struct debug_obj_descr work_debug_descr;
607                                                   451 
608 static void *work_debug_hint(void *addr)          452 static void *work_debug_hint(void *addr)
609 {                                                 453 {
610         return ((struct work_struct *) addr)->    454         return ((struct work_struct *) addr)->func;
611 }                                                 455 }
612                                                   456 
613 static bool work_is_static_object(void *addr)     457 static bool work_is_static_object(void *addr)
614 {                                                 458 {
615         struct work_struct *work = addr;          459         struct work_struct *work = addr;
616                                                   460 
617         return test_bit(WORK_STRUCT_STATIC_BIT    461         return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
618 }                                                 462 }
619                                                   463 
620 /*                                                464 /*
621  * fixup_init is called when:                     465  * fixup_init is called when:
622  * - an active object is initialized              466  * - an active object is initialized
623  */                                               467  */
624 static bool work_fixup_init(void *addr, enum d    468 static bool work_fixup_init(void *addr, enum debug_obj_state state)
625 {                                                 469 {
626         struct work_struct *work = addr;          470         struct work_struct *work = addr;
627                                                   471 
628         switch (state) {                          472         switch (state) {
629         case ODEBUG_STATE_ACTIVE:                 473         case ODEBUG_STATE_ACTIVE:
630                 cancel_work_sync(work);           474                 cancel_work_sync(work);
631                 debug_object_init(work, &work_    475                 debug_object_init(work, &work_debug_descr);
632                 return true;                      476                 return true;
633         default:                                  477         default:
634                 return false;                     478                 return false;
635         }                                         479         }
636 }                                                 480 }
637                                                   481 
638 /*                                                482 /*
639  * fixup_free is called when:                     483  * fixup_free is called when:
640  * - an active object is freed                    484  * - an active object is freed
641  */                                               485  */
642 static bool work_fixup_free(void *addr, enum d    486 static bool work_fixup_free(void *addr, enum debug_obj_state state)
643 {                                                 487 {
644         struct work_struct *work = addr;          488         struct work_struct *work = addr;
645                                                   489 
646         switch (state) {                          490         switch (state) {
647         case ODEBUG_STATE_ACTIVE:                 491         case ODEBUG_STATE_ACTIVE:
648                 cancel_work_sync(work);           492                 cancel_work_sync(work);
649                 debug_object_free(work, &work_    493                 debug_object_free(work, &work_debug_descr);
650                 return true;                      494                 return true;
651         default:                                  495         default:
652                 return false;                     496                 return false;
653         }                                         497         }
654 }                                                 498 }
655                                                   499 
656 static const struct debug_obj_descr work_debug    500 static const struct debug_obj_descr work_debug_descr = {
657         .name           = "work_struct",          501         .name           = "work_struct",
658         .debug_hint     = work_debug_hint,        502         .debug_hint     = work_debug_hint,
659         .is_static_object = work_is_static_obj    503         .is_static_object = work_is_static_object,
660         .fixup_init     = work_fixup_init,        504         .fixup_init     = work_fixup_init,
661         .fixup_free     = work_fixup_free,        505         .fixup_free     = work_fixup_free,
662 };                                                506 };
663                                                   507 
664 static inline void debug_work_activate(struct     508 static inline void debug_work_activate(struct work_struct *work)
665 {                                                 509 {
666         debug_object_activate(work, &work_debu    510         debug_object_activate(work, &work_debug_descr);
667 }                                                 511 }
668                                                   512 
669 static inline void debug_work_deactivate(struc    513 static inline void debug_work_deactivate(struct work_struct *work)
670 {                                                 514 {
671         debug_object_deactivate(work, &work_de    515         debug_object_deactivate(work, &work_debug_descr);
672 }                                                 516 }
673                                                   517 
674 void __init_work(struct work_struct *work, int    518 void __init_work(struct work_struct *work, int onstack)
675 {                                                 519 {
676         if (onstack)                              520         if (onstack)
677                 debug_object_init_on_stack(wor    521                 debug_object_init_on_stack(work, &work_debug_descr);
678         else                                      522         else
679                 debug_object_init(work, &work_    523                 debug_object_init(work, &work_debug_descr);
680 }                                                 524 }
681 EXPORT_SYMBOL_GPL(__init_work);                   525 EXPORT_SYMBOL_GPL(__init_work);
682                                                   526 
683 void destroy_work_on_stack(struct work_struct     527 void destroy_work_on_stack(struct work_struct *work)
684 {                                                 528 {
685         debug_object_free(work, &work_debug_de    529         debug_object_free(work, &work_debug_descr);
686 }                                                 530 }
687 EXPORT_SYMBOL_GPL(destroy_work_on_stack);         531 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
688                                                   532 
689 void destroy_delayed_work_on_stack(struct dela    533 void destroy_delayed_work_on_stack(struct delayed_work *work)
690 {                                                 534 {
691         destroy_timer_on_stack(&work->timer);     535         destroy_timer_on_stack(&work->timer);
692         debug_object_free(&work->work, &work_d    536         debug_object_free(&work->work, &work_debug_descr);
693 }                                                 537 }
694 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stac    538 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
695                                                   539 
696 #else                                             540 #else
697 static inline void debug_work_activate(struct     541 static inline void debug_work_activate(struct work_struct *work) { }
698 static inline void debug_work_deactivate(struc    542 static inline void debug_work_deactivate(struct work_struct *work) { }
699 #endif                                            543 #endif
700                                                   544 
701 /**                                               545 /**
702  * worker_pool_assign_id - allocate ID and ass    546  * worker_pool_assign_id - allocate ID and assign it to @pool
703  * @pool: the pool pointer of interest            547  * @pool: the pool pointer of interest
704  *                                                548  *
705  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE)    549  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
706  * successfully, -errno on failure.               550  * successfully, -errno on failure.
707  */                                               551  */
708 static int worker_pool_assign_id(struct worker    552 static int worker_pool_assign_id(struct worker_pool *pool)
709 {                                                 553 {
710         int ret;                                  554         int ret;
711                                                   555 
712         lockdep_assert_held(&wq_pool_mutex);      556         lockdep_assert_held(&wq_pool_mutex);
713                                                   557 
714         ret = idr_alloc(&worker_pool_idr, pool    558         ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
715                         GFP_KERNEL);              559                         GFP_KERNEL);
716         if (ret >= 0) {                           560         if (ret >= 0) {
717                 pool->id = ret;                   561                 pool->id = ret;
718                 return 0;                         562                 return 0;
719         }                                         563         }
720         return ret;                               564         return ret;
721 }                                                 565 }
722                                                   566 
723 static struct pool_workqueue __rcu **          << 
724 unbound_pwq_slot(struct workqueue_struct *wq,  << 
725 {                                              << 
726        if (cpu >= 0)                           << 
727                return per_cpu_ptr(wq->cpu_pwq, << 
728        else                                    << 
729                return &wq->dfl_pwq;            << 
730 }                                              << 
731                                                << 
732 /* @cpu < 0 for dfl_pwq */                     << 
733 static struct pool_workqueue *unbound_pwq(stru << 
734 {                                              << 
735         return rcu_dereference_check(*unbound_ << 
736                                      lockdep_i << 
737                                      lockdep_i << 
738 }                                              << 
739                                                << 
740 /**                                               567 /**
741  * unbound_effective_cpumask - effective cpuma !! 568  * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
742  * @wq: workqueue of interest                  !! 569  * @wq: the target workqueue
                                                   >> 570  * @node: the node ID
                                                   >> 571  *
                                                   >> 572  * This must be called with any of wq_pool_mutex, wq->mutex or RCU
                                                   >> 573  * read locked.
                                                   >> 574  * If the pwq needs to be used beyond the locking in effect, the caller is
                                                   >> 575  * responsible for guaranteeing that the pwq stays online.
743  *                                                576  *
744  * @wq->unbound_attrs->cpumask contains the cp !! 577  * Return: The unbound pool_workqueue for @node.
745  * is masked with wq_unbound_cpumask to determ << 
746  * default pwq is always mapped to the pool wi << 
747  */                                               578  */
748 static struct cpumask *unbound_effective_cpuma !! 579 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
                                                   >> 580                                                   int node)
749 {                                                 581 {
750         return unbound_pwq(wq, -1)->pool->attr !! 582         assert_rcu_or_wq_mutex_or_pool_mutex(wq);
                                                   >> 583 
                                                   >> 584         /*
                                                   >> 585          * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
                                                   >> 586          * delayed item is pending.  The plan is to keep CPU -> NODE
                                                   >> 587          * mapping valid and stable across CPU on/offlines.  Once that
                                                   >> 588          * happens, this workaround can be removed.
                                                   >> 589          */
                                                   >> 590         if (unlikely(node == NUMA_NO_NODE))
                                                   >> 591                 return wq->dfl_pwq;
                                                   >> 592 
                                                   >> 593         return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
751 }                                                 594 }
752                                                   595 
753 static unsigned int work_color_to_flags(int co    596 static unsigned int work_color_to_flags(int color)
754 {                                                 597 {
755         return color << WORK_STRUCT_COLOR_SHIF    598         return color << WORK_STRUCT_COLOR_SHIFT;
756 }                                                 599 }
757                                                   600 
758 static int get_work_color(unsigned long work_d    601 static int get_work_color(unsigned long work_data)
759 {                                                 602 {
760         return (work_data >> WORK_STRUCT_COLOR    603         return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
761                 ((1 << WORK_STRUCT_COLOR_BITS)    604                 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
762 }                                                 605 }
763                                                   606 
764 static int work_next_color(int color)             607 static int work_next_color(int color)
765 {                                                 608 {
766         return (color + 1) % WORK_NR_COLORS;      609         return (color + 1) % WORK_NR_COLORS;
767 }                                                 610 }
768                                                   611 
769 static unsigned long pool_offq_flags(struct wo << 
770 {                                              << 
771         return (pool->flags & POOL_BH) ? WORK_ << 
772 }                                              << 
773                                                << 
774 /*                                                612 /*
775  * While queued, %WORK_STRUCT_PWQ is set and n    613  * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
776  * contain the pointer to the queued pwq.  Onc    614  * contain the pointer to the queued pwq.  Once execution starts, the flag
777  * is cleared and the high bits contain OFFQ f    615  * is cleared and the high bits contain OFFQ flags and pool ID.
778  *                                                616  *
779  * set_work_pwq(), set_work_pool_and_clear_pen !! 617  * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
780  * can be used to set the pwq, pool or clear w !! 618  * and clear_work_data() can be used to set the pwq, pool or clear
781  * only be called while the work is owned - ie !! 619  * work->data.  These functions should only be called while the work is
                                                   >> 620  * owned - ie. while the PENDING bit is set.
782  *                                                621  *
783  * get_work_pool() and get_work_pwq() can be u    622  * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
784  * corresponding to a work.  Pool is available    623  * corresponding to a work.  Pool is available once the work has been
785  * queued anywhere after initialization until     624  * queued anywhere after initialization until it is sync canceled.  pwq is
786  * available only while the work item is queue    625  * available only while the work item is queued.
                                                   >> 626  *
                                                   >> 627  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
                                                   >> 628  * canceled.  While being canceled, a work item may have its PENDING set
                                                   >> 629  * but stay off timer and worklist for arbitrarily long and nobody should
                                                   >> 630  * try to steal the PENDING bit.
787  */                                               631  */
788 static inline void set_work_data(struct work_s !! 632 static inline void set_work_data(struct work_struct *work, unsigned long data,
                                                   >> 633                                  unsigned long flags)
789 {                                                 634 {
790         WARN_ON_ONCE(!work_pending(work));        635         WARN_ON_ONCE(!work_pending(work));
791         atomic_long_set(&work->data, data | wo !! 636         atomic_long_set(&work->data, data | flags | work_static(work));
792 }                                                 637 }
793                                                   638 
794 static void set_work_pwq(struct work_struct *w    639 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
795                          unsigned long flags)  !! 640                          unsigned long extra_flags)
796 {                                                 641 {
797         set_work_data(work, (unsigned long)pwq !! 642         set_work_data(work, (unsigned long)pwq,
798                       WORK_STRUCT_PWQ | flags) !! 643                       WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
799 }                                                 644 }
800                                                   645 
801 static void set_work_pool_and_keep_pending(str    646 static void set_work_pool_and_keep_pending(struct work_struct *work,
802                                            int !! 647                                            int pool_id)
803 {                                                 648 {
804         set_work_data(work, ((unsigned long)po !! 649         set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
805                       WORK_STRUCT_PENDING | fl !! 650                       WORK_STRUCT_PENDING);
806 }                                                 651 }
807                                                   652 
808 static void set_work_pool_and_clear_pending(st    653 static void set_work_pool_and_clear_pending(struct work_struct *work,
809                                             in !! 654                                             int pool_id)
810 {                                                 655 {
811         /*                                        656         /*
812          * The following wmb is paired with th    657          * The following wmb is paired with the implied mb in
813          * test_and_set_bit(PENDING) and ensur    658          * test_and_set_bit(PENDING) and ensures all updates to @work made
814          * here are visible to and precede any    659          * here are visible to and precede any updates by the next PENDING
815          * owner.                                 660          * owner.
816          */                                       661          */
817         smp_wmb();                                662         smp_wmb();
818         set_work_data(work, ((unsigned long)po !! 663         set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
819                       flags);                  << 
820         /*                                        664         /*
821          * The following mb guarantees that pr    665          * The following mb guarantees that previous clear of a PENDING bit
822          * will not be reordered with any spec    666          * will not be reordered with any speculative LOADS or STORES from
823          * work->current_func, which is execut    667          * work->current_func, which is executed afterwards.  This possible
824          * reordering can lead to a missed exe    668          * reordering can lead to a missed execution on attempt to queue
825          * the same @work.  E.g. consider this    669          * the same @work.  E.g. consider this case:
826          *                                        670          *
827          *   CPU#0                         CPU    671          *   CPU#0                         CPU#1
828          *   ----------------------------  ---    672          *   ----------------------------  --------------------------------
829          *                                        673          *
830          * 1  STORE event_indicated               674          * 1  STORE event_indicated
831          * 2  queue_work_on() {                   675          * 2  queue_work_on() {
832          * 3    test_and_set_bit(PENDING)         676          * 3    test_and_set_bit(PENDING)
833          * 4 }                             set    677          * 4 }                             set_..._and_clear_pending() {
834          * 5                                 s    678          * 5                                 set_work_data() # clear bit
835          * 6                                 s    679          * 6                                 smp_mb()
836          * 7                               wor    680          * 7                               work->current_func() {
837          * 8                                      681          * 8                                  LOAD event_indicated
838          *                                 }      682          *                                 }
839          *                                        683          *
840          * Without an explicit full barrier sp    684          * Without an explicit full barrier speculative LOAD on line 8 can
841          * be executed before CPU#0 does STORE    685          * be executed before CPU#0 does STORE on line 1.  If that happens,
842          * CPU#0 observes the PENDING bit is s    686          * CPU#0 observes the PENDING bit is still set and new execution of
843          * a @work is not queued in a hope, th    687          * a @work is not queued in a hope, that CPU#1 will eventually
844          * finish the queued @work.  Meanwhile    688          * finish the queued @work.  Meanwhile CPU#1 does not see
845          * event_indicated is set, because spe    689          * event_indicated is set, because speculative LOAD was executed
846          * before actual STORE.                   690          * before actual STORE.
847          */                                       691          */
848         smp_mb();                                 692         smp_mb();
849 }                                                 693 }
850                                                   694 
851 static inline struct pool_workqueue *work_stru !! 695 static void clear_work_data(struct work_struct *work)
852 {                                                 696 {
853         return (struct pool_workqueue *)(data  !! 697         smp_wmb();      /* see set_work_pool_and_clear_pending() */
                                                   >> 698         set_work_data(work, WORK_STRUCT_NO_POOL, 0);
854 }                                                 699 }
855                                                   700 
856 static struct pool_workqueue *get_work_pwq(str    701 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
857 {                                                 702 {
858         unsigned long data = atomic_long_read(    703         unsigned long data = atomic_long_read(&work->data);
859                                                   704 
860         if (data & WORK_STRUCT_PWQ)               705         if (data & WORK_STRUCT_PWQ)
861                 return work_struct_pwq(data);  !! 706                 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
862         else                                      707         else
863                 return NULL;                      708                 return NULL;
864 }                                                 709 }
865                                                   710 
866 /**                                               711 /**
867  * get_work_pool - return the worker_pool a gi    712  * get_work_pool - return the worker_pool a given work was associated with
868  * @work: the work item of interest               713  * @work: the work item of interest
869  *                                                714  *
870  * Pools are created and destroyed under wq_po    715  * Pools are created and destroyed under wq_pool_mutex, and allows read
871  * access under RCU read lock.  As such, this     716  * access under RCU read lock.  As such, this function should be
872  * called under wq_pool_mutex or inside of a r    717  * called under wq_pool_mutex or inside of a rcu_read_lock() region.
873  *                                                718  *
874  * All fields of the returned pool are accessi    719  * All fields of the returned pool are accessible as long as the above
875  * mentioned locking is in effect.  If the ret    720  * mentioned locking is in effect.  If the returned pool needs to be used
876  * beyond the critical section, the caller is     721  * beyond the critical section, the caller is responsible for ensuring the
877  * returned pool is and stays online.             722  * returned pool is and stays online.
878  *                                                723  *
879  * Return: The worker_pool @work was last asso    724  * Return: The worker_pool @work was last associated with.  %NULL if none.
880  */                                               725  */
881 static struct worker_pool *get_work_pool(struc    726 static struct worker_pool *get_work_pool(struct work_struct *work)
882 {                                                 727 {
883         unsigned long data = atomic_long_read(    728         unsigned long data = atomic_long_read(&work->data);
884         int pool_id;                              729         int pool_id;
885                                                   730 
886         assert_rcu_or_pool_mutex();               731         assert_rcu_or_pool_mutex();
887                                                   732 
888         if (data & WORK_STRUCT_PWQ)               733         if (data & WORK_STRUCT_PWQ)
889                 return work_struct_pwq(data)-> !! 734                 return ((struct pool_workqueue *)
                                                   >> 735                         (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
890                                                   736 
891         pool_id = data >> WORK_OFFQ_POOL_SHIFT    737         pool_id = data >> WORK_OFFQ_POOL_SHIFT;
892         if (pool_id == WORK_OFFQ_POOL_NONE)       738         if (pool_id == WORK_OFFQ_POOL_NONE)
893                 return NULL;                      739                 return NULL;
894                                                   740 
895         return idr_find(&worker_pool_idr, pool    741         return idr_find(&worker_pool_idr, pool_id);
896 }                                                 742 }
897                                                   743 
898 static unsigned long shift_and_mask(unsigned l !! 744 /**
                                                   >> 745  * get_work_pool_id - return the worker pool ID a given work is associated with
                                                   >> 746  * @work: the work item of interest
                                                   >> 747  *
                                                   >> 748  * Return: The worker_pool ID @work was last associated with.
                                                   >> 749  * %WORK_OFFQ_POOL_NONE if none.
                                                   >> 750  */
                                                   >> 751 static int get_work_pool_id(struct work_struct *work)
899 {                                                 752 {
900         return (v >> shift) & ((1U << bits) -  !! 753         unsigned long data = atomic_long_read(&work->data);
                                                   >> 754 
                                                   >> 755         if (data & WORK_STRUCT_PWQ)
                                                   >> 756                 return ((struct pool_workqueue *)
                                                   >> 757                         (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
                                                   >> 758 
                                                   >> 759         return data >> WORK_OFFQ_POOL_SHIFT;
901 }                                                 760 }
902                                                   761 
903 static void work_offqd_unpack(struct work_offq !! 762 static void mark_work_canceling(struct work_struct *work)
904 {                                                 763 {
905         WARN_ON_ONCE(data & WORK_STRUCT_PWQ);  !! 764         unsigned long pool_id = get_work_pool_id(work);
906                                                   765 
907         offqd->pool_id = shift_and_mask(data,  !! 766         pool_id <<= WORK_OFFQ_POOL_SHIFT;
908                                         WORK_O !! 767         set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
909         offqd->disable = shift_and_mask(data,  << 
910                                         WORK_O << 
911         offqd->flags = data & WORK_OFFQ_FLAG_M << 
912 }                                                 768 }
913                                                   769 
914 static unsigned long work_offqd_pack_flags(str !! 770 static bool work_is_canceling(struct work_struct *work)
915 {                                                 771 {
916         return ((unsigned long)offqd->disable  !! 772         unsigned long data = atomic_long_read(&work->data);
917                 ((unsigned long)offqd->flags); !! 773 
                                                   >> 774         return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
918 }                                                 775 }
919                                                   776 
920 /*                                                777 /*
921  * Policy functions.  These define the policie    778  * Policy functions.  These define the policies on how the global worker
922  * pools are managed.  Unless noted otherwise,    779  * pools are managed.  Unless noted otherwise, these functions assume that
923  * they're being called with pool->lock held.     780  * they're being called with pool->lock held.
924  */                                               781  */
925                                                   782 
                                                   >> 783 static bool __need_more_worker(struct worker_pool *pool)
                                                   >> 784 {
                                                   >> 785         return !pool->nr_running;
                                                   >> 786 }
                                                   >> 787 
926 /*                                                788 /*
927  * Need to wake up a worker?  Called from anyt    789  * Need to wake up a worker?  Called from anything but currently
928  * running workers.                               790  * running workers.
929  *                                                791  *
930  * Note that, because unbound workers never co    792  * Note that, because unbound workers never contribute to nr_running, this
931  * function will always return %true for unbou    793  * function will always return %true for unbound pools as long as the
932  * worklist isn't empty.                          794  * worklist isn't empty.
933  */                                               795  */
934 static bool need_more_worker(struct worker_poo    796 static bool need_more_worker(struct worker_pool *pool)
935 {                                                 797 {
936         return !list_empty(&pool->worklist) && !! 798         return !list_empty(&pool->worklist) && __need_more_worker(pool);
937 }                                                 799 }
938                                                   800 
939 /* Can I start working?  Called from busy but     801 /* Can I start working?  Called from busy but !running workers. */
940 static bool may_start_working(struct worker_po    802 static bool may_start_working(struct worker_pool *pool)
941 {                                                 803 {
942         return pool->nr_idle;                     804         return pool->nr_idle;
943 }                                                 805 }
944                                                   806 
945 /* Do I need to keep working?  Called from cur    807 /* Do I need to keep working?  Called from currently running workers. */
946 static bool keep_working(struct worker_pool *p    808 static bool keep_working(struct worker_pool *pool)
947 {                                                 809 {
948         return !list_empty(&pool->worklist) &&    810         return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
949 }                                                 811 }
950                                                   812 
951 /* Do we need a new worker?  Called from manag    813 /* Do we need a new worker?  Called from manager. */
952 static bool need_to_create_worker(struct worke    814 static bool need_to_create_worker(struct worker_pool *pool)
953 {                                                 815 {
954         return need_more_worker(pool) && !may_    816         return need_more_worker(pool) && !may_start_working(pool);
955 }                                                 817 }
956                                                   818 
957 /* Do we have too many workers and should some    819 /* Do we have too many workers and should some go away? */
958 static bool too_many_workers(struct worker_poo    820 static bool too_many_workers(struct worker_pool *pool)
959 {                                                 821 {
960         bool managing = pool->flags & POOL_MAN    822         bool managing = pool->flags & POOL_MANAGER_ACTIVE;
961         int nr_idle = pool->nr_idle + managing    823         int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
962         int nr_busy = pool->nr_workers - nr_id    824         int nr_busy = pool->nr_workers - nr_idle;
963                                                   825 
964         return nr_idle > 2 && (nr_idle - 2) *     826         return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
965 }                                                 827 }
966                                                   828 
967 /**                                            !! 829 /*
968  * worker_set_flags - set worker flags and adj !! 830  * Wake up functions.
969  * @worker: self                               << 
970  * @flags: flags to set                        << 
971  *                                             << 
972  * Set @flags in @worker->flags and adjust nr_ << 
973  */                                            << 
974 static inline void worker_set_flags(struct wor << 
975 {                                              << 
976         struct worker_pool *pool = worker->poo << 
977                                                << 
978         lockdep_assert_held(&pool->lock);      << 
979                                                << 
980         /* If transitioning into NOT_RUNNING,  << 
981         if ((flags & WORKER_NOT_RUNNING) &&    << 
982             !(worker->flags & WORKER_NOT_RUNNI << 
983                 pool->nr_running--;            << 
984         }                                      << 
985                                                << 
986         worker->flags |= flags;                << 
987 }                                              << 
988                                                << 
989 /**                                            << 
990  * worker_clr_flags - clear worker flags and a << 
991  * @worker: self                               << 
992  * @flags: flags to clear                      << 
993  *                                             << 
994  * Clear @flags in @worker->flags and adjust n << 
995  */                                               831  */
996 static inline void worker_clr_flags(struct wor << 
997 {                                              << 
998         struct worker_pool *pool = worker->poo << 
999         unsigned int oflags = worker->flags;   << 
1000                                               << 
1001         lockdep_assert_held(&pool->lock);     << 
1002                                               << 
1003         worker->flags &= ~flags;              << 
1004                                               << 
1005         /*                                    << 
1006          * If transitioning out of NOT_RUNNIN << 
1007          * that the nested NOT_RUNNING is not << 
1008          * of multiple flags, not a single fl << 
1009          */                                   << 
1010         if ((flags & WORKER_NOT_RUNNING) && ( << 
1011                 if (!(worker->flags & WORKER_ << 
1012                         pool->nr_running++;   << 
1013 }                                             << 
1014                                                  832 
1015 /* Return the first idle worker.  Called with    833 /* Return the first idle worker.  Called with pool->lock held. */
1016 static struct worker *first_idle_worker(struc    834 static struct worker *first_idle_worker(struct worker_pool *pool)
1017 {                                                835 {
1018         if (unlikely(list_empty(&pool->idle_l    836         if (unlikely(list_empty(&pool->idle_list)))
1019                 return NULL;                     837                 return NULL;
1020                                                  838 
1021         return list_first_entry(&pool->idle_l    839         return list_first_entry(&pool->idle_list, struct worker, entry);
1022 }                                                840 }
1023                                                  841 
1024 /**                                              842 /**
1025  * worker_enter_idle - enter idle state       !! 843  * wake_up_worker - wake up an idle worker
1026  * @worker: worker which is entering idle sta !! 844  * @pool: worker pool to wake worker from
1027  *                                               845  *
1028  * @worker is entering idle state.  Update st !! 846  * Wake up the first idle worker of @pool.
1029  * necessary.                                 << 
1030  *                                            << 
1031  * LOCKING:                                   << 
1032  * raw_spin_lock_irq(pool->lock).             << 
1033  */                                           << 
1034 static void worker_enter_idle(struct worker * << 
1035 {                                             << 
1036         struct worker_pool *pool = worker->po << 
1037                                               << 
1038         if (WARN_ON_ONCE(worker->flags & WORK << 
1039             WARN_ON_ONCE(!list_empty(&worker- << 
1040                          (worker->hentry.next << 
1041                 return;                       << 
1042                                               << 
1043         /* can't use worker_set_flags(), also << 
1044         worker->flags |= WORKER_IDLE;         << 
1045         pool->nr_idle++;                      << 
1046         worker->last_active = jiffies;        << 
1047                                               << 
1048         /* idle_list is LIFO */               << 
1049         list_add(&worker->entry, &pool->idle_ << 
1050                                               << 
1051         if (too_many_workers(pool) && !timer_ << 
1052                 mod_timer(&pool->idle_timer,  << 
1053                                               << 
1054         /* Sanity check nr_running. */        << 
1055         WARN_ON_ONCE(pool->nr_workers == pool << 
1056 }                                             << 
1057                                               << 
1058 /**                                           << 
1059  * worker_leave_idle - leave idle state       << 
1060  * @worker: worker which is leaving idle stat << 
1061  *                                            << 
1062  * @worker is leaving idle state.  Update sta << 
1063  *                                            << 
1064  * LOCKING:                                   << 
1065  * raw_spin_lock_irq(pool->lock).             << 
1066  */                                           << 
1067 static void worker_leave_idle(struct worker * << 
1068 {                                             << 
1069         struct worker_pool *pool = worker->po << 
1070                                               << 
1071         if (WARN_ON_ONCE(!(worker->flags & WO << 
1072                 return;                       << 
1073         worker_clr_flags(worker, WORKER_IDLE) << 
1074         pool->nr_idle--;                      << 
1075         list_del_init(&worker->entry);        << 
1076 }                                             << 
1077                                               << 
1078 /**                                           << 
1079  * find_worker_executing_work - find worker w << 
1080  * @pool: pool of interest                    << 
1081  * @work: work to find worker for             << 
1082  *                                            << 
1083  * Find a worker which is executing @work on  << 
1084  * @pool->busy_hash which is keyed by the add << 
1085  * to match, its current execution should mat << 
1086  * its work function.  This is to avoid unwan << 
1087  * unrelated work executions through a work i << 
1088  * being executed.                            << 
1089  *                                            << 
1090  * This is a bit tricky.  A work item may be  << 
1091  * starts and nothing prevents the freed area << 
1092  * another work item.  If the same work item  << 
1093  * before the original execution finishes, wo << 
1094  * recycled work item as currently executing  << 
1095  * current execution finishes, introducing an << 
1096  *                                            << 
1097  * This function checks the work item address << 
1098  * false positives.  Note that this isn't com << 
1099  * work function which can introduce dependen << 
1100  * recycled work item.  Well, if somebody wan << 
1101  * foot that badly, there's only so much we c << 
1102  * actually occurs, it should be easy to loca << 
1103  *                                               847  *
1104  * CONTEXT:                                      848  * CONTEXT:
1105  * raw_spin_lock_irq(pool->lock).                849  * raw_spin_lock_irq(pool->lock).
1106  *                                            << 
1107  * Return:                                    << 
1108  * Pointer to worker which is executing @work << 
1109  * otherwise.                                 << 
1110  */                                              850  */
1111 static struct worker *find_worker_executing_w !! 851 static void wake_up_worker(struct worker_pool *pool)
1112                                               << 
1113 {                                             << 
1114         struct worker *worker;                << 
1115                                               << 
1116         hash_for_each_possible(pool->busy_has << 
1117                                (unsigned long << 
1118                 if (worker->current_work == w << 
1119                     worker->current_func == w << 
1120                         return worker;        << 
1121                                               << 
1122         return NULL;                          << 
1123 }                                             << 
1124                                               << 
1125 /**                                           << 
1126  * move_linked_works - move linked works to a << 
1127  * @work: start of series of works to be sche << 
1128  * @head: target list to append @work to      << 
1129  * @nextp: out parameter for nested worklist  << 
1130  *                                            << 
1131  * Schedule linked works starting from @work  << 
1132  * scheduled starts at @work and includes any << 
1133  * WORK_STRUCT_LINKED set in its predecessor. << 
1134  * @nextp.                                    << 
1135  *                                            << 
1136  * CONTEXT:                                   << 
1137  * raw_spin_lock_irq(pool->lock).             << 
1138  */                                           << 
1139 static void move_linked_works(struct work_str << 
1140                               struct work_str << 
1141 {                                             << 
1142         struct work_struct *n;                << 
1143                                               << 
1144         /*                                    << 
1145          * Linked worklist will always end be << 
1146          * use NULL for list head.            << 
1147          */                                   << 
1148         list_for_each_entry_safe_from(work, n << 
1149                 list_move_tail(&work->entry,  << 
1150                 if (!(*work_data_bits(work) & << 
1151                         break;                << 
1152         }                                     << 
1153                                               << 
1154         /*                                    << 
1155          * If we're already inside safe list  << 
1156          * multiple works to the scheduled qu << 
1157          * needs to be updated.               << 
1158          */                                   << 
1159         if (nextp)                            << 
1160                 *nextp = n;                   << 
1161 }                                             << 
1162                                               << 
1163 /**                                           << 
1164  * assign_work - assign a work item and its l << 
1165  * @work: work to assign                      << 
1166  * @worker: worker to assign to               << 
1167  * @nextp: out parameter for nested worklist  << 
1168  *                                            << 
1169  * Assign @work and its linked work items to  << 
1170  * executed by another worker in the same poo << 
1171  *                                            << 
1172  * If @nextp is not NULL, it's updated to poi << 
1173  * scheduled work. This allows assign_work()  << 
1174  * list_for_each_entry_safe().                << 
1175  *                                            << 
1176  * Returns %true if @work was successfully as << 
1177  * was punted to another worker already execu << 
1178  */                                           << 
1179 static bool assign_work(struct work_struct *w << 
1180                         struct work_struct ** << 
1181 {                                             << 
1182         struct worker_pool *pool = worker->po << 
1183         struct worker *collision;             << 
1184                                               << 
1185         lockdep_assert_held(&pool->lock);     << 
1186                                               << 
1187         /*                                    << 
1188          * A single work shouldn't be execute << 
1189          * __queue_work() ensures that @work  << 
1190          * while still running in the previou << 
1191          * @work is not executed concurrently << 
1192          * pool. Check whether anyone is alre << 
1193          * defer the work to the currently ex << 
1194          */                                   << 
1195         collision = find_worker_executing_wor << 
1196         if (unlikely(collision)) {            << 
1197                 move_linked_works(work, &coll << 
1198                 return false;                 << 
1199         }                                     << 
1200                                               << 
1201         move_linked_works(work, &worker->sche << 
1202         return true;                          << 
1203 }                                             << 
1204                                               << 
1205 static struct irq_work *bh_pool_irq_work(stru << 
1206 {                                             << 
1207         int high = pool->attrs->nice == HIGHP << 
1208                                               << 
1209         return &per_cpu(bh_pool_irq_works, po << 
1210 }                                             << 
1211                                               << 
1212 static void kick_bh_pool(struct worker_pool * << 
1213 {                                             << 
1214 #ifdef CONFIG_SMP                             << 
1215         /* see drain_dead_softirq_workfn() fo << 
1216         if (unlikely(pool->cpu != smp_process << 
1217                      !(pool->flags & POOL_BH_ << 
1218                 irq_work_queue_on(bh_pool_irq << 
1219                 return;                       << 
1220         }                                     << 
1221 #endif                                        << 
1222         if (pool->attrs->nice == HIGHPRI_NICE << 
1223                 raise_softirq_irqoff(HI_SOFTI << 
1224         else                                  << 
1225                 raise_softirq_irqoff(TASKLET_ << 
1226 }                                             << 
1227                                               << 
1228 /**                                           << 
1229  * kick_pool - wake up an idle worker if nece << 
1230  * @pool: pool to kick                        << 
1231  *                                            << 
1232  * @pool may have pending work items. Wake up << 
1233  * whether a worker was woken up.             << 
1234  */                                           << 
1235 static bool kick_pool(struct worker_pool *poo << 
1236 {                                                852 {
1237         struct worker *worker = first_idle_wo    853         struct worker *worker = first_idle_worker(pool);
1238         struct task_struct *p;                << 
1239                                               << 
1240         lockdep_assert_held(&pool->lock);     << 
1241                                               << 
1242         if (!need_more_worker(pool) || !worke << 
1243                 return false;                 << 
1244                                               << 
1245         if (pool->flags & POOL_BH) {          << 
1246                 kick_bh_pool(pool);           << 
1247                 return true;                  << 
1248         }                                     << 
1249                                               << 
1250         p = worker->task;                     << 
1251                                               << 
1252 #ifdef CONFIG_SMP                             << 
1253         /*                                    << 
1254          * Idle @worker is about to execute @ << 
1255          * opportunity to migrate @worker at  << 
1256          * wake_cpu field. Let's see if we wa << 
1257          * execution locality.                << 
1258          *                                    << 
1259          * We're waking the worker that went  << 
1260          * chance that @worker is marked idle << 
1261          * so, setting the wake_cpu won't do  << 
1262          * optimization and the race window i << 
1263          * now. If this becomes pronounced, w << 
1264          * still on cpu when picking an idle  << 
1265          *                                    << 
1266          * If @pool has non-strict affinity,  << 
1267          * its affinity scope. Repatriate.    << 
1268          */                                   << 
1269         if (!pool->attrs->affn_strict &&      << 
1270             !cpumask_test_cpu(p->wake_cpu, po << 
1271                 struct work_struct *work = li << 
1272                                               << 
1273                 int wake_cpu = cpumask_any_an << 
1274                                               << 
1275                 if (wake_cpu < nr_cpu_ids) {  << 
1276                         p->wake_cpu = wake_cp << 
1277                         get_work_pwq(work)->s << 
1278                 }                             << 
1279         }                                     << 
1280 #endif                                        << 
1281         wake_up_process(p);                   << 
1282         return true;                          << 
1283 }                                             << 
1284                                               << 
1285 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT         << 
1286                                               << 
1287 /*                                            << 
1288  * Concurrency-managed per-cpu work items tha << 
1289  * wq_cpu_intensive_thresh_us trigger the aut << 
1290  * which prevents them from stalling other co << 
1291  * work function keeps triggering this mechan << 
1292  * should be using an unbound workqueue inste << 
1293  *                                            << 
1294  * wq_cpu_intensive_report() tracks work func << 
1295  * and report them so that they can be examin << 
1296  * workqueues as appropriate. To avoid floodi << 
1297  * function is tracked and reported with expo << 
1298  */                                           << 
1299 #define WCI_MAX_ENTS 128                      << 
1300                                               << 
1301 struct wci_ent {                              << 
1302         work_func_t             func;         << 
1303         atomic64_t              cnt;          << 
1304         struct hlist_node       hash_node;    << 
1305 };                                            << 
1306                                               << 
1307 static struct wci_ent wci_ents[WCI_MAX_ENTS]; << 
1308 static int wci_nr_ents;                       << 
1309 static DEFINE_RAW_SPINLOCK(wci_lock);         << 
1310 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_M << 
1311                                               << 
1312 static struct wci_ent *wci_find_ent(work_func << 
1313 {                                             << 
1314         struct wci_ent *ent;                  << 
1315                                               << 
1316         hash_for_each_possible_rcu(wci_hash,  << 
1317                                    (unsigned  << 
1318                 if (ent->func == func)        << 
1319                         return ent;           << 
1320         }                                     << 
1321         return NULL;                          << 
1322 }                                             << 
1323                                               << 
1324 static void wq_cpu_intensive_report(work_func << 
1325 {                                             << 
1326         struct wci_ent *ent;                  << 
1327                                               << 
1328 restart:                                      << 
1329         ent = wci_find_ent(func);             << 
1330         if (ent) {                            << 
1331                 u64 cnt;                      << 
1332                                               << 
1333                 /*                            << 
1334                  * Start reporting from the w << 
1335                  * exponentially.             << 
1336                  */                           << 
1337                 cnt = atomic64_inc_return_rel << 
1338                 if (wq_cpu_intensive_warning_ << 
1339                     cnt >= wq_cpu_intensive_w << 
1340                     is_power_of_2(cnt + 1 - w << 
1341                         printk_deferred(KERN_ << 
1342                                         ent-> << 
1343                                         atomi << 
1344                 return;                       << 
1345         }                                     << 
1346                                                  854 
1347         /*                                    !! 855         if (likely(worker))
1348          * @func is a new violation. Allocate !! 856                 wake_up_process(worker->task);
1349          * is exhausted, something went reall << 
1350          * noise already.                     << 
1351          */                                   << 
1352         if (wci_nr_ents >= WCI_MAX_ENTS)      << 
1353                 return;                       << 
1354                                               << 
1355         raw_spin_lock(&wci_lock);             << 
1356                                               << 
1357         if (wci_nr_ents >= WCI_MAX_ENTS) {    << 
1358                 raw_spin_unlock(&wci_lock);   << 
1359                 return;                       << 
1360         }                                     << 
1361                                               << 
1362         if (wci_find_ent(func)) {             << 
1363                 raw_spin_unlock(&wci_lock);   << 
1364                 goto restart;                 << 
1365         }                                     << 
1366                                               << 
1367         ent = &wci_ents[wci_nr_ents++];       << 
1368         ent->func = func;                     << 
1369         atomic64_set(&ent->cnt, 0);           << 
1370         hash_add_rcu(wci_hash, &ent->hash_nod << 
1371                                               << 
1372         raw_spin_unlock(&wci_lock);           << 
1373                                               << 
1374         goto restart;                         << 
1375 }                                                857 }
1376                                                  858 
1377 #else   /* CONFIG_WQ_CPU_INTENSIVE_REPORT */  << 
1378 static void wq_cpu_intensive_report(work_func << 
1379 #endif  /* CONFIG_WQ_CPU_INTENSIVE_REPORT */  << 
1380                                               << 
1381 /**                                              859 /**
1382  * wq_worker_running - a worker is running ag    860  * wq_worker_running - a worker is running again
1383  * @task: task waking up                         861  * @task: task waking up
1384  *                                               862  *
1385  * This function is called when a worker retu    863  * This function is called when a worker returns from schedule()
1386  */                                              864  */
1387 void wq_worker_running(struct task_struct *ta    865 void wq_worker_running(struct task_struct *task)
1388 {                                                866 {
1389         struct worker *worker = kthread_data(    867         struct worker *worker = kthread_data(task);
1390                                                  868 
1391         if (!READ_ONCE(worker->sleeping))     !! 869         if (!worker->sleeping)
1392                 return;                          870                 return;
1393                                                  871 
1394         /*                                       872         /*
1395          * If preempted by unbind_workers() b    873          * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
1396          * and the nr_running increment below    874          * and the nr_running increment below, we may ruin the nr_running reset
1397          * and leave with an unexpected pool-    875          * and leave with an unexpected pool->nr_running == 1 on the newly unbound
1398          * pool. Protect against such race.      876          * pool. Protect against such race.
1399          */                                      877          */
1400         preempt_disable();                       878         preempt_disable();
1401         if (!(worker->flags & WORKER_NOT_RUNN    879         if (!(worker->flags & WORKER_NOT_RUNNING))
1402                 worker->pool->nr_running++;      880                 worker->pool->nr_running++;
1403         preempt_enable();                        881         preempt_enable();
1404                                               !! 882         worker->sleeping = 0;
1405         /*                                    << 
1406          * CPU intensive auto-detection cares << 
1407          * CPU without sleeping. Reset the st << 
1408          */                                   << 
1409         worker->current_at = worker->task->se << 
1410                                               << 
1411         WRITE_ONCE(worker->sleeping, 0);      << 
1412 }                                                883 }
1413                                                  884 
1414 /**                                              885 /**
1415  * wq_worker_sleeping - a worker is going to     886  * wq_worker_sleeping - a worker is going to sleep
1416  * @task: task going to sleep                    887  * @task: task going to sleep
1417  *                                               888  *
1418  * This function is called from schedule() wh    889  * This function is called from schedule() when a busy worker is
1419  * going to sleep.                               890  * going to sleep.
1420  */                                              891  */
1421 void wq_worker_sleeping(struct task_struct *t    892 void wq_worker_sleeping(struct task_struct *task)
1422 {                                                893 {
1423         struct worker *worker = kthread_data(    894         struct worker *worker = kthread_data(task);
1424         struct worker_pool *pool;                895         struct worker_pool *pool;
1425                                                  896 
1426         /*                                       897         /*
1427          * Rescuers, which may not have all t    898          * Rescuers, which may not have all the fields set up like normal
1428          * workers, also reach here, let's no    899          * workers, also reach here, let's not access anything before
1429          * checking NOT_RUNNING.                 900          * checking NOT_RUNNING.
1430          */                                      901          */
1431         if (worker->flags & WORKER_NOT_RUNNIN    902         if (worker->flags & WORKER_NOT_RUNNING)
1432                 return;                          903                 return;
1433                                                  904 
1434         pool = worker->pool;                     905         pool = worker->pool;
1435                                                  906 
1436         /* Return if preempted before wq_work    907         /* Return if preempted before wq_worker_running() was reached */
1437         if (READ_ONCE(worker->sleeping))      !! 908         if (worker->sleeping)
1438                 return;                          909                 return;
1439                                                  910 
1440         WRITE_ONCE(worker->sleeping, 1);      !! 911         worker->sleeping = 1;
1441         raw_spin_lock_irq(&pool->lock);          912         raw_spin_lock_irq(&pool->lock);
1442                                                  913 
1443         /*                                       914         /*
1444          * Recheck in case unbind_workers() p    915          * Recheck in case unbind_workers() preempted us. We don't
1445          * want to decrement nr_running after    916          * want to decrement nr_running after the worker is unbound
1446          * and nr_running has been reset.        917          * and nr_running has been reset.
1447          */                                      918          */
1448         if (worker->flags & WORKER_NOT_RUNNIN    919         if (worker->flags & WORKER_NOT_RUNNING) {
1449                 raw_spin_unlock_irq(&pool->lo    920                 raw_spin_unlock_irq(&pool->lock);
1450                 return;                          921                 return;
1451         }                                        922         }
1452                                                  923 
1453         pool->nr_running--;                      924         pool->nr_running--;
1454         if (kick_pool(pool))                  !! 925         if (need_more_worker(pool))
1455                 worker->current_pwq->stats[PW !! 926                 wake_up_worker(pool);
1456                                               << 
1457         raw_spin_unlock_irq(&pool->lock);        927         raw_spin_unlock_irq(&pool->lock);
1458 }                                                928 }
1459                                                  929 
1460 /**                                              930 /**
1461  * wq_worker_tick - a scheduler tick occurred << 
1462  * @task: task currently running              << 
1463  *                                            << 
1464  * Called from sched_tick(). We're in the IRQ << 
1465  * worker's fields which follow the 'K' locki << 
1466  */                                           << 
1467 void wq_worker_tick(struct task_struct *task) << 
1468 {                                             << 
1469         struct worker *worker = kthread_data( << 
1470         struct pool_workqueue *pwq = worker-> << 
1471         struct worker_pool *pool = worker->po << 
1472                                               << 
1473         if (!pwq)                             << 
1474                 return;                       << 
1475                                               << 
1476         pwq->stats[PWQ_STAT_CPU_TIME] += TICK << 
1477                                               << 
1478         if (!wq_cpu_intensive_thresh_us)      << 
1479                 return;                       << 
1480                                               << 
1481         /*                                    << 
1482          * If the current worker is concurren << 
1483          * longer than wq_cpu_intensive_thres << 
1484          * CPU_INTENSIVE to avoid stalling ot << 
1485          *                                    << 
1486          * Set @worker->sleeping means that @ << 
1487          * switching out voluntarily and won' << 
1488          * @pool->nr_running until it wakes u << 
1489          * decrements ->nr_running, setting C << 
1490          * double decrements. The task is rel << 
1491          * We probably want to make this pret << 
1492          */                                   << 
1493         if ((worker->flags & WORKER_NOT_RUNNI << 
1494             worker->task->se.sum_exec_runtime << 
1495             wq_cpu_intensive_thresh_us * NSEC << 
1496                 return;                       << 
1497                                               << 
1498         raw_spin_lock(&pool->lock);           << 
1499                                               << 
1500         worker_set_flags(worker, WORKER_CPU_I << 
1501         wq_cpu_intensive_report(worker->curre << 
1502         pwq->stats[PWQ_STAT_CPU_INTENSIVE]++; << 
1503                                               << 
1504         if (kick_pool(pool))                  << 
1505                 pwq->stats[PWQ_STAT_CM_WAKEUP << 
1506                                               << 
1507         raw_spin_unlock(&pool->lock);         << 
1508 }                                             << 
1509                                               << 
1510 /**                                           << 
1511  * wq_worker_last_func - retrieve worker's la    931  * wq_worker_last_func - retrieve worker's last work function
1512  * @task: Task to retrieve last work function    932  * @task: Task to retrieve last work function of.
1513  *                                               933  *
1514  * Determine the last function a worker execu    934  * Determine the last function a worker executed. This is called from
1515  * the scheduler to get a worker's last known    935  * the scheduler to get a worker's last known identity.
1516  *                                               936  *
1517  * CONTEXT:                                      937  * CONTEXT:
1518  * raw_spin_lock_irq(rq->lock)                   938  * raw_spin_lock_irq(rq->lock)
1519  *                                               939  *
1520  * This function is called during schedule()     940  * This function is called during schedule() when a kworker is going
1521  * to sleep. It's used by psi to identify agg    941  * to sleep. It's used by psi to identify aggregation workers during
1522  * dequeuing, to allow periodic aggregation t    942  * dequeuing, to allow periodic aggregation to shut-off when that
1523  * worker is the last task in the system or c    943  * worker is the last task in the system or cgroup to go to sleep.
1524  *                                               944  *
1525  * As this function doesn't involve any workq    945  * As this function doesn't involve any workqueue-related locking, it
1526  * only returns stable values when called fro    946  * only returns stable values when called from inside the scheduler's
1527  * queuing and dequeuing paths, when @task, w    947  * queuing and dequeuing paths, when @task, which must be a kworker,
1528  * is guaranteed to not be processing any wor    948  * is guaranteed to not be processing any works.
1529  *                                               949  *
1530  * Return:                                       950  * Return:
1531  * The last work function %current executed a    951  * The last work function %current executed as a worker, NULL if it
1532  * hasn't executed any work yet.                 952  * hasn't executed any work yet.
1533  */                                              953  */
1534 work_func_t wq_worker_last_func(struct task_s    954 work_func_t wq_worker_last_func(struct task_struct *task)
1535 {                                                955 {
1536         struct worker *worker = kthread_data(    956         struct worker *worker = kthread_data(task);
1537                                                  957 
1538         return worker->last_func;                958         return worker->last_func;
1539 }                                                959 }
1540                                                  960 
1541 /**                                              961 /**
1542  * wq_node_nr_active - Determine wq_node_nr_a !! 962  * worker_set_flags - set worker flags and adjust nr_running accordingly
1543  * @wq: workqueue of interest                 !! 963  * @worker: self
1544  * @node: NUMA node, can be %NUMA_NO_NODE     !! 964  * @flags: flags to set
1545  *                                            << 
1546  * Determine wq_node_nr_active to use for @wq << 
1547  *                                            << 
1548  * - %NULL for per-cpu workqueues as they don << 
1549  *                                               965  *
1550  * - node_nr_active[nr_node_ids] if @node is  !! 966  * Set @flags in @worker->flags and adjust nr_running accordingly.
1551  *                                               967  *
1552  * - Otherwise, node_nr_active[@node].        !! 968  * CONTEXT:
                                                   >> 969  * raw_spin_lock_irq(pool->lock)
1553  */                                              970  */
1554 static struct wq_node_nr_active *wq_node_nr_a !! 971 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
1555                                               << 
1556 {                                                972 {
1557         if (!(wq->flags & WQ_UNBOUND))        !! 973         struct worker_pool *pool = worker->pool;
1558                 return NULL;                  !! 974 
                                                   >> 975         WARN_ON_ONCE(worker->task != current);
1559                                                  976 
1560         if (node == NUMA_NO_NODE)             !! 977         /* If transitioning into NOT_RUNNING, adjust nr_running. */
1561                 node = nr_node_ids;           !! 978         if ((flags & WORKER_NOT_RUNNING) &&
                                                   >> 979             !(worker->flags & WORKER_NOT_RUNNING)) {
                                                   >> 980                 pool->nr_running--;
                                                   >> 981         }
1562                                                  982 
1563         return wq->node_nr_active[node];      !! 983         worker->flags |= flags;
1564 }                                                984 }
1565                                                  985 
1566 /**                                              986 /**
1567  * wq_update_node_max_active - Update per-nod !! 987  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
1568  * @wq: workqueue to update                   !! 988  * @worker: self
1569  * @off_cpu: CPU that's going down, -1 if a C !! 989  * @flags: flags to clear
                                                   >> 990  *
                                                   >> 991  * Clear @flags in @worker->flags and adjust nr_running accordingly.
1570  *                                               992  *
1571  * Update @wq->node_nr_active[]->max. @wq mus !! 993  * CONTEXT:
1572  * distributed among nodes according to the p !! 994  * raw_spin_lock_irq(pool->lock)
1573  * cpus. The result is always between @wq->mi << 
1574  */                                              995  */
1575 static void wq_update_node_max_active(struct  !! 996 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
1576 {                                                997 {
1577         struct cpumask *effective = unbound_e !! 998         struct worker_pool *pool = worker->pool;
1578         int min_active = READ_ONCE(wq->min_ac !! 999         unsigned int oflags = worker->flags;
1579         int max_active = READ_ONCE(wq->max_ac << 
1580         int total_cpus, node;                 << 
1581                                               << 
1582         lockdep_assert_held(&wq->mutex);      << 
1583                                                  1000 
1584         if (!wq_topo_initialized)             !! 1001         WARN_ON_ONCE(worker->task != current);
1585                 return;                       << 
1586                                                  1002 
1587         if (off_cpu >= 0 && !cpumask_test_cpu !! 1003         worker->flags &= ~flags;
1588                 off_cpu = -1;                 << 
1589                                                  1004 
1590         total_cpus = cpumask_weight_and(effec !! 1005         /*
1591         if (off_cpu >= 0)                     !! 1006          * If transitioning out of NOT_RUNNING, increment nr_running.  Note
1592                 total_cpus--;                 !! 1007          * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
                                                   >> 1008          * of multiple flags, not a single flag.
                                                   >> 1009          */
                                                   >> 1010         if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
                                                   >> 1011                 if (!(worker->flags & WORKER_NOT_RUNNING))
                                                   >> 1012                         pool->nr_running++;
                                                   >> 1013 }
1593                                                  1014 
1594         /* If all CPUs of the wq get offline, !! 1015 /**
1595         if (unlikely(!total_cpus)) {          !! 1016  * find_worker_executing_work - find worker which is executing a work
1596                 for_each_node(node)           !! 1017  * @pool: pool of interest
1597                         wq_node_nr_active(wq, !! 1018  * @work: work to find worker for
                                                   >> 1019  *
                                                   >> 1020  * Find a worker which is executing @work on @pool by searching
                                                   >> 1021  * @pool->busy_hash which is keyed by the address of @work.  For a worker
                                                   >> 1022  * to match, its current execution should match the address of @work and
                                                   >> 1023  * its work function.  This is to avoid unwanted dependency between
                                                   >> 1024  * unrelated work executions through a work item being recycled while still
                                                   >> 1025  * being executed.
                                                   >> 1026  *
                                                   >> 1027  * This is a bit tricky.  A work item may be freed once its execution
                                                   >> 1028  * starts and nothing prevents the freed area from being recycled for
                                                   >> 1029  * another work item.  If the same work item address ends up being reused
                                                   >> 1030  * before the original execution finishes, workqueue will identify the
                                                   >> 1031  * recycled work item as currently executing and make it wait until the
                                                   >> 1032  * current execution finishes, introducing an unwanted dependency.
                                                   >> 1033  *
                                                   >> 1034  * This function checks the work item address and work function to avoid
                                                   >> 1035  * false positives.  Note that this isn't complete as one may construct a
                                                   >> 1036  * work function which can introduce dependency onto itself through a
                                                   >> 1037  * recycled work item.  Well, if somebody wants to shoot oneself in the
                                                   >> 1038  * foot that badly, there's only so much we can do, and if such deadlock
                                                   >> 1039  * actually occurs, it should be easy to locate the culprit work function.
                                                   >> 1040  *
                                                   >> 1041  * CONTEXT:
                                                   >> 1042  * raw_spin_lock_irq(pool->lock).
                                                   >> 1043  *
                                                   >> 1044  * Return:
                                                   >> 1045  * Pointer to worker which is executing @work if found, %NULL
                                                   >> 1046  * otherwise.
                                                   >> 1047  */
                                                   >> 1048 static struct worker *find_worker_executing_work(struct worker_pool *pool,
                                                   >> 1049                                                  struct work_struct *work)
                                                   >> 1050 {
                                                   >> 1051         struct worker *worker;
1598                                                  1052 
1599                 wq_node_nr_active(wq, NUMA_NO !! 1053         hash_for_each_possible(pool->busy_hash, worker, hentry,
1600                 return;                       !! 1054                                (unsigned long)work)
1601         }                                     !! 1055                 if (worker->current_work == work &&
                                                   >> 1056                     worker->current_func == work->func)
                                                   >> 1057                         return worker;
1602                                                  1058 
1603         for_each_node(node) {                 !! 1059         return NULL;
1604                 int node_cpus;                !! 1060 }
1605                                                  1061 
1606                 node_cpus = cpumask_weight_an !! 1062 /**
1607                 if (off_cpu >= 0 && cpu_to_no !! 1063  * move_linked_works - move linked works to a list
1608                         node_cpus--;          !! 1064  * @work: start of series of works to be scheduled
                                                   >> 1065  * @head: target list to append @work to
                                                   >> 1066  * @nextp: out parameter for nested worklist walking
                                                   >> 1067  *
                                                   >> 1068  * Schedule linked works starting from @work to @head.  Work series to
                                                   >> 1069  * be scheduled starts at @work and includes any consecutive work with
                                                   >> 1070  * WORK_STRUCT_LINKED set in its predecessor.
                                                   >> 1071  *
                                                   >> 1072  * If @nextp is not NULL, it's updated to point to the next work of
                                                   >> 1073  * the last scheduled work.  This allows move_linked_works() to be
                                                   >> 1074  * nested inside outer list_for_each_entry_safe().
                                                   >> 1075  *
                                                   >> 1076  * CONTEXT:
                                                   >> 1077  * raw_spin_lock_irq(pool->lock).
                                                   >> 1078  */
                                                   >> 1079 static void move_linked_works(struct work_struct *work, struct list_head *head,
                                                   >> 1080                               struct work_struct **nextp)
                                                   >> 1081 {
                                                   >> 1082         struct work_struct *n;
1609                                                  1083 
1610                 wq_node_nr_active(wq, node)-> !! 1084         /*
1611                         clamp(DIV_ROUND_UP(ma !! 1085          * Linked worklist will always end before the end of the list,
1612                               min_active, max !! 1086          * use NULL for list head.
                                                   >> 1087          */
                                                   >> 1088         list_for_each_entry_safe_from(work, n, NULL, entry) {
                                                   >> 1089                 list_move_tail(&work->entry, head);
                                                   >> 1090                 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
                                                   >> 1091                         break;
1613         }                                        1092         }
1614                                                  1093 
1615         wq_node_nr_active(wq, NUMA_NO_NODE)-> !! 1094         /*
                                                   >> 1095          * If we're already inside safe list traversal and have moved
                                                   >> 1096          * multiple works to the scheduled queue, the next position
                                                   >> 1097          * needs to be updated.
                                                   >> 1098          */
                                                   >> 1099         if (nextp)
                                                   >> 1100                 *nextp = n;
1616 }                                                1101 }
1617                                                  1102 
1618 /**                                              1103 /**
1619  * get_pwq - get an extra reference on the sp    1104  * get_pwq - get an extra reference on the specified pool_workqueue
1620  * @pwq: pool_workqueue to get                   1105  * @pwq: pool_workqueue to get
1621  *                                               1106  *
1622  * Obtain an extra reference on @pwq.  The ca    1107  * Obtain an extra reference on @pwq.  The caller should guarantee that
1623  * @pwq has positive refcnt and be holding th    1108  * @pwq has positive refcnt and be holding the matching pool->lock.
1624  */                                              1109  */
1625 static void get_pwq(struct pool_workqueue *pw    1110 static void get_pwq(struct pool_workqueue *pwq)
1626 {                                                1111 {
1627         lockdep_assert_held(&pwq->pool->lock)    1112         lockdep_assert_held(&pwq->pool->lock);
1628         WARN_ON_ONCE(pwq->refcnt <= 0);          1113         WARN_ON_ONCE(pwq->refcnt <= 0);
1629         pwq->refcnt++;                           1114         pwq->refcnt++;
1630 }                                                1115 }
1631                                                  1116 
1632 /**                                              1117 /**
1633  * put_pwq - put a pool_workqueue reference      1118  * put_pwq - put a pool_workqueue reference
1634  * @pwq: pool_workqueue to put                   1119  * @pwq: pool_workqueue to put
1635  *                                               1120  *
1636  * Drop a reference of @pwq.  If its refcnt r    1121  * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1637  * destruction.  The caller should be holding    1122  * destruction.  The caller should be holding the matching pool->lock.
1638  */                                              1123  */
1639 static void put_pwq(struct pool_workqueue *pw    1124 static void put_pwq(struct pool_workqueue *pwq)
1640 {                                                1125 {
1641         lockdep_assert_held(&pwq->pool->lock)    1126         lockdep_assert_held(&pwq->pool->lock);
1642         if (likely(--pwq->refcnt))               1127         if (likely(--pwq->refcnt))
1643                 return;                          1128                 return;
                                                   >> 1129         if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
                                                   >> 1130                 return;
1644         /*                                       1131         /*
1645          * @pwq can't be released under pool- !! 1132          * @pwq can't be released under pool->lock, bounce to
1646          * kthread_worker to avoid A-A deadlo !! 1133          * pwq_unbound_release_workfn().  This never recurses on the same
                                                   >> 1134          * pool->lock as this path is taken only for unbound workqueues and
                                                   >> 1135          * the release work item is scheduled on a per-cpu workqueue.  To
                                                   >> 1136          * avoid lockdep warning, unbound pool->locks are given lockdep
                                                   >> 1137          * subclass of 1 in get_unbound_pool().
1647          */                                      1138          */
1648         kthread_queue_work(pwq_release_worker !! 1139         schedule_work(&pwq->unbound_release_work);
1649 }                                                1140 }
1650                                                  1141 
1651 /**                                              1142 /**
1652  * put_pwq_unlocked - put_pwq() with surround    1143  * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1653  * @pwq: pool_workqueue to put (can be %NULL)    1144  * @pwq: pool_workqueue to put (can be %NULL)
1654  *                                               1145  *
1655  * put_pwq() with locking.  This function als    1146  * put_pwq() with locking.  This function also allows %NULL @pwq.
1656  */                                              1147  */
1657 static void put_pwq_unlocked(struct pool_work    1148 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1658 {                                                1149 {
1659         if (pwq) {                               1150         if (pwq) {
1660                 /*                               1151                 /*
1661                  * As both pwqs and pools are    1152                  * As both pwqs and pools are RCU protected, the
1662                  * following lock operations     1153                  * following lock operations are safe.
1663                  */                              1154                  */
1664                 raw_spin_lock_irq(&pwq->pool-    1155                 raw_spin_lock_irq(&pwq->pool->lock);
1665                 put_pwq(pwq);                    1156                 put_pwq(pwq);
1666                 raw_spin_unlock_irq(&pwq->poo    1157                 raw_spin_unlock_irq(&pwq->pool->lock);
1667         }                                        1158         }
1668 }                                                1159 }
1669                                                  1160 
1670 static bool pwq_is_empty(struct pool_workqueu !! 1161 static void pwq_activate_inactive_work(struct work_struct *work)
1671 {                                                1162 {
1672         return !pwq->nr_active && list_empty( !! 1163         struct pool_workqueue *pwq = get_work_pwq(work);
1673 }                                             << 
1674                                               << 
1675 static void __pwq_activate_work(struct pool_w << 
1676                                 struct work_s << 
1677 {                                             << 
1678         unsigned long *wdb = work_data_bits(w << 
1679                                                  1164 
1680         WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INA << 
1681         trace_workqueue_activate_work(work);     1165         trace_workqueue_activate_work(work);
1682         if (list_empty(&pwq->pool->worklist))    1166         if (list_empty(&pwq->pool->worklist))
1683                 pwq->pool->watchdog_ts = jiff    1167                 pwq->pool->watchdog_ts = jiffies;
1684         move_linked_works(work, &pwq->pool->w    1168         move_linked_works(work, &pwq->pool->worklist, NULL);
1685         __clear_bit(WORK_STRUCT_INACTIVE_BIT, !! 1169         __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
1686 }                                             !! 1170         pwq->nr_active++;
1687                                               << 
1688 static bool tryinc_node_nr_active(struct wq_n << 
1689 {                                             << 
1690         int max = READ_ONCE(nna->max);        << 
1691                                               << 
1692         while (true) {                        << 
1693                 int old, tmp;                 << 
1694                                               << 
1695                 old = atomic_read(&nna->nr);  << 
1696                 if (old >= max)               << 
1697                         return false;         << 
1698                 tmp = atomic_cmpxchg_relaxed( << 
1699                 if (tmp == old)               << 
1700                         return true;          << 
1701         }                                     << 
1702 }                                             << 
1703                                               << 
1704 /**                                           << 
1705  * pwq_tryinc_nr_active - Try to increment nr << 
1706  * @pwq: pool_workqueue of interest           << 
1707  * @fill: max_active may have increased, try  << 
1708  *                                            << 
1709  * Try to increment nr_active for @pwq. Retur << 
1710  * successfully obtained. %false otherwise.   << 
1711  */                                           << 
1712 static bool pwq_tryinc_nr_active(struct pool_ << 
1713 {                                             << 
1714         struct workqueue_struct *wq = pwq->wq << 
1715         struct worker_pool *pool = pwq->pool; << 
1716         struct wq_node_nr_active *nna = wq_no << 
1717         bool obtained = false;                << 
1718                                               << 
1719         lockdep_assert_held(&pool->lock);     << 
1720                                               << 
1721         if (!nna) {                           << 
1722                 /* BH or per-cpu workqueue, p << 
1723                 obtained = pwq->nr_active < R << 
1724                 goto out;                     << 
1725         }                                     << 
1726                                               << 
1727         if (unlikely(pwq->plugged))           << 
1728                 return false;                 << 
1729                                               << 
1730         /*                                    << 
1731          * Unbound workqueue uses per-node sh << 
1732          * already waiting on $nna, pwq_dec_n << 
1733          * concurrency level. Don't jump the  << 
1734          *                                    << 
1735          * We need to ignore the pending test << 
1736          * pwq_dec_nr_active() can only maint << 
1737          * increase it. This is indicated by  << 
1738          */                                   << 
1739         if (!list_empty(&pwq->pending_node) & << 
1740                 goto out;                     << 
1741                                               << 
1742         obtained = tryinc_node_nr_active(nna) << 
1743         if (obtained)                         << 
1744                 goto out;                     << 
1745                                               << 
1746         /*                                    << 
1747          * Lockless acquisition failed. Lock, << 
1748          * and try again. The smp_mb() is pai << 
1749          * of atomic_dec_return() in pwq_dec_ << 
1750          * we see the decremented $nna->nr or << 
1751          * $nna->pending_pwqs.                << 
1752          */                                   << 
1753         raw_spin_lock(&nna->lock);            << 
1754                                               << 
1755         if (list_empty(&pwq->pending_node))   << 
1756                 list_add_tail(&pwq->pending_n << 
1757         else if (likely(!fill))               << 
1758                 goto out_unlock;              << 
1759                                               << 
1760         smp_mb();                             << 
1761                                               << 
1762         obtained = tryinc_node_nr_active(nna) << 
1763                                               << 
1764         /*                                    << 
1765          * If @fill, @pwq might have already  << 
1766          * pending in cold paths doesn't affe << 
1767          */                                   << 
1768         if (obtained && likely(!fill))        << 
1769                 list_del_init(&pwq->pending_n << 
1770                                               << 
1771 out_unlock:                                   << 
1772         raw_spin_unlock(&nna->lock);          << 
1773 out:                                          << 
1774         if (obtained)                         << 
1775                 pwq->nr_active++;             << 
1776         return obtained;                      << 
1777 }                                             << 
1778                                               << 
1779 /**                                           << 
1780  * pwq_activate_first_inactive - Activate the << 
1781  * @pwq: pool_workqueue of interest           << 
1782  * @fill: max_active may have increased, try  << 
1783  *                                            << 
1784  * Activate the first inactive work item of @ << 
1785  * max_active limit.                          << 
1786  *                                            << 
1787  * Returns %true if an inactive work item has << 
1788  * inactive work item is found or max_active  << 
1789  */                                           << 
1790 static bool pwq_activate_first_inactive(struc << 
1791 {                                             << 
1792         struct work_struct *work =            << 
1793                 list_first_entry_or_null(&pwq << 
1794                                          stru << 
1795                                               << 
1796         if (work && pwq_tryinc_nr_active(pwq, << 
1797                 __pwq_activate_work(pwq, work << 
1798                 return true;                  << 
1799         } else {                              << 
1800                 return false;                 << 
1801         }                                     << 
1802 }                                             << 
1803                                               << 
1804 /**                                           << 
1805  * unplug_oldest_pwq - unplug the oldest pool << 
1806  * @wq: workqueue_struct where its oldest pwq << 
1807  *                                            << 
1808  * This function should only be called for or << 
1809  * oldest pwq is unplugged, the others are pl << 
1810  * ensure proper work item ordering::         << 
1811  *                                            << 
1812  *    dfl_pwq --------------+     [P] - plugg << 
1813  *                          |                 << 
1814  *                          v                 << 
1815  *    pwqs -> A -> B [P] -> C [P] (newest)    << 
1816  *            |    |        |                 << 
1817  *            1    3        5                 << 
1818  *            |    |        |                 << 
1819  *            2    4        6                 << 
1820  *                                            << 
1821  * When the oldest pwq is drained and removed << 
1822  * to unplug the next oldest one to start its << 
1823  * pwq's are linked into wq->pwqs with the ol << 
1824  * the list is the oldest.                    << 
1825  */                                           << 
1826 static void unplug_oldest_pwq(struct workqueu << 
1827 {                                             << 
1828         struct pool_workqueue *pwq;           << 
1829                                               << 
1830         lockdep_assert_held(&wq->mutex);      << 
1831                                               << 
1832         /* Caller should make sure that pwqs  << 
1833         pwq = list_first_entry_or_null(&wq->p << 
1834                                        pwqs_n << 
1835         raw_spin_lock_irq(&pwq->pool->lock);  << 
1836         if (pwq->plugged) {                   << 
1837                 pwq->plugged = false;         << 
1838                 if (pwq_activate_first_inacti << 
1839                         kick_pool(pwq->pool); << 
1840         }                                     << 
1841         raw_spin_unlock_irq(&pwq->pool->lock) << 
1842 }                                             << 
1843                                               << 
1844 /**                                           << 
1845  * node_activate_pending_pwq - Activate a pen << 
1846  * @nna: wq_node_nr_active to activate a pend << 
1847  * @caller_pool: worker_pool the caller is lo << 
1848  *                                            << 
1849  * Activate a pwq in @nna->pending_pwqs. Call << 
1850  * @caller_pool may be unlocked and relocked  << 
1851  */                                           << 
1852 static void node_activate_pending_pwq(struct  << 
1853                                       struct  << 
1854 {                                             << 
1855         struct worker_pool *locked_pool = cal << 
1856         struct pool_workqueue *pwq;           << 
1857         struct work_struct *work;             << 
1858                                               << 
1859         lockdep_assert_held(&caller_pool->loc << 
1860                                               << 
1861         raw_spin_lock(&nna->lock);            << 
1862 retry:                                        << 
1863         pwq = list_first_entry_or_null(&nna-> << 
1864                                        struct << 
1865         if (!pwq)                             << 
1866                 goto out_unlock;              << 
1867                                               << 
1868         /*                                    << 
1869          * If @pwq is for a different pool th << 
1870          * @pwq->pool->lock. Let's trylock fi << 
1871          * / lock dance. For that, we also ne << 
1872          * nested inside pool locks.          << 
1873          */                                   << 
1874         if (pwq->pool != locked_pool) {       << 
1875                 raw_spin_unlock(&locked_pool- << 
1876                 locked_pool = pwq->pool;      << 
1877                 if (!raw_spin_trylock(&locked << 
1878                         raw_spin_unlock(&nna- << 
1879                         raw_spin_lock(&locked << 
1880                         raw_spin_lock(&nna->l << 
1881                         goto retry;           << 
1882                 }                             << 
1883         }                                     << 
1884                                               << 
1885         /*                                    << 
1886          * $pwq may not have any inactive wor << 
1887          * Drop it from pending_pwqs and see  << 
1888          */                                   << 
1889         work = list_first_entry_or_null(&pwq- << 
1890                                         struc << 
1891         if (!work) {                          << 
1892                 list_del_init(&pwq->pending_n << 
1893                 goto retry;                   << 
1894         }                                     << 
1895                                               << 
1896         /*                                    << 
1897          * Acquire an nr_active count and act << 
1898          * $pwq still has inactive work items << 
1899          * pending_pwqs so that we round-robi << 
1900          * inactive work items are not activa << 
1901          * given that there has never been an << 
1902          */                                   << 
1903         if (likely(tryinc_node_nr_active(nna) << 
1904                 pwq->nr_active++;             << 
1905                 __pwq_activate_work(pwq, work << 
1906                                               << 
1907                 if (list_empty(&pwq->inactive << 
1908                         list_del_init(&pwq->p << 
1909                 else                          << 
1910                         list_move_tail(&pwq-> << 
1911                                               << 
1912                 /* if activating a foreign po << 
1913                 if (pwq->pool != caller_pool) << 
1914                         kick_pool(pwq->pool); << 
1915         }                                     << 
1916                                               << 
1917 out_unlock:                                   << 
1918         raw_spin_unlock(&nna->lock);          << 
1919         if (locked_pool != caller_pool) {     << 
1920                 raw_spin_unlock(&locked_pool- << 
1921                 raw_spin_lock(&caller_pool->l << 
1922         }                                     << 
1923 }                                                1171 }
1924                                                  1172 
1925 /**                                           !! 1173 static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
1926  * pwq_dec_nr_active - Retire an active count << 
1927  * @pwq: pool_workqueue of interest           << 
1928  *                                            << 
1929  * Decrement @pwq's nr_active and try to acti << 
1930  * For unbound workqueues, this function may  << 
1931  */                                           << 
1932 static void pwq_dec_nr_active(struct pool_wor << 
1933 {                                                1174 {
1934         struct worker_pool *pool = pwq->pool; !! 1175         struct work_struct *work = list_first_entry(&pwq->inactive_works,
1935         struct wq_node_nr_active *nna = wq_no !! 1176                                                     struct work_struct, entry);
1936                                               << 
1937         lockdep_assert_held(&pool->lock);     << 
1938                                               << 
1939         /*                                    << 
1940          * @pwq->nr_active should be decremen << 
1941          * workqueues.                        << 
1942          */                                   << 
1943         pwq->nr_active--;                     << 
1944                                               << 
1945         /*                                    << 
1946          * For a percpu workqueue, it's simpl << 
1947          * inactive work item on @pwq itself. << 
1948          */                                   << 
1949         if (!nna) {                           << 
1950                 pwq_activate_first_inactive(p << 
1951                 return;                       << 
1952         }                                     << 
1953                                               << 
1954         /*                                    << 
1955          * If @pwq is for an unbound workqueu << 
1956          * multiple pwqs and pools may be sha << 
1957          * pwq needs to wait for an nr_active << 
1958          * $nna->pending_pwqs. The following  << 
1959          * memory barrier is paired with smp_ << 
1960          * guarantee that either we see non-e << 
1961          * decremented $nna->nr.              << 
1962          *                                    << 
1963          * $nna->max may change as CPUs come  << 
1964          * max_active gets updated. However,  << 
1965          * larger than @pwq->wq->min_active w << 
1966          * This maintains the forward progres << 
1967          */                                   << 
1968         if (atomic_dec_return(&nna->nr) >= RE << 
1969                 return;                       << 
1970                                                  1177 
1971         if (!list_empty(&nna->pending_pwqs))  !! 1178         pwq_activate_inactive_work(work);
1972                 node_activate_pending_pwq(nna << 
1973 }                                                1179 }
1974                                                  1180 
1975 /**                                              1181 /**
1976  * pwq_dec_nr_in_flight - decrement pwq's nr_    1182  * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1977  * @pwq: pwq of interest                         1183  * @pwq: pwq of interest
1978  * @work_data: work_data of work which left t    1184  * @work_data: work_data of work which left the queue
1979  *                                               1185  *
1980  * A work either has completed or is removed     1186  * A work either has completed or is removed from pending queue,
1981  * decrement nr_in_flight of its pwq and hand    1187  * decrement nr_in_flight of its pwq and handle workqueue flushing.
1982  *                                               1188  *
1983  * NOTE:                                      << 
1984  * For unbound workqueues, this function may  << 
1985  * and thus should be called after all other  << 
1986  * work item is complete.                     << 
1987  *                                            << 
1988  * CONTEXT:                                      1189  * CONTEXT:
1989  * raw_spin_lock_irq(pool->lock).                1190  * raw_spin_lock_irq(pool->lock).
1990  */                                              1191  */
1991 static void pwq_dec_nr_in_flight(struct pool_    1192 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
1992 {                                                1193 {
1993         int color = get_work_color(work_data)    1194         int color = get_work_color(work_data);
1994                                                  1195 
1995         if (!(work_data & WORK_STRUCT_INACTIV !! 1196         if (!(work_data & WORK_STRUCT_INACTIVE)) {
1996                 pwq_dec_nr_active(pwq);       !! 1197                 pwq->nr_active--;
                                                   >> 1198                 if (!list_empty(&pwq->inactive_works)) {
                                                   >> 1199                         /* one down, submit an inactive one */
                                                   >> 1200                         if (pwq->nr_active < pwq->max_active)
                                                   >> 1201                                 pwq_activate_first_inactive(pwq);
                                                   >> 1202                 }
                                                   >> 1203         }
1997                                                  1204 
1998         pwq->nr_in_flight[color]--;              1205         pwq->nr_in_flight[color]--;
1999                                                  1206 
2000         /* is flush in progress and are we at    1207         /* is flush in progress and are we at the flushing tip? */
2001         if (likely(pwq->flush_color != color)    1208         if (likely(pwq->flush_color != color))
2002                 goto out_put;                    1209                 goto out_put;
2003                                                  1210 
2004         /* are there still in-flight works? *    1211         /* are there still in-flight works? */
2005         if (pwq->nr_in_flight[color])            1212         if (pwq->nr_in_flight[color])
2006                 goto out_put;                    1213                 goto out_put;
2007                                                  1214 
2008         /* this pwq is done, clear flush_colo    1215         /* this pwq is done, clear flush_color */
2009         pwq->flush_color = -1;                   1216         pwq->flush_color = -1;
2010                                                  1217 
2011         /*                                       1218         /*
2012          * If this was the last pwq, wake up     1219          * If this was the last pwq, wake up the first flusher.  It
2013          * will handle the rest.                 1220          * will handle the rest.
2014          */                                      1221          */
2015         if (atomic_dec_and_test(&pwq->wq->nr_    1222         if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
2016                 complete(&pwq->wq->first_flus    1223                 complete(&pwq->wq->first_flusher->done);
2017 out_put:                                         1224 out_put:
2018         put_pwq(pwq);                            1225         put_pwq(pwq);
2019 }                                                1226 }
2020                                                  1227 
2021 /**                                              1228 /**
2022  * try_to_grab_pending - steal work item from    1229  * try_to_grab_pending - steal work item from worklist and disable irq
2023  * @work: work item to steal                     1230  * @work: work item to steal
2024  * @cflags: %WORK_CANCEL_ flags               !! 1231  * @is_dwork: @work is a delayed_work
2025  * @irq_flags: place to store irq state       !! 1232  * @flags: place to store irq state
2026  *                                               1233  *
2027  * Try to grab PENDING bit of @work.  This fu    1234  * Try to grab PENDING bit of @work.  This function can handle @work in any
2028  * stable state - idle, on timer or on workli    1235  * stable state - idle, on timer or on worklist.
2029  *                                               1236  *
2030  * Return:                                       1237  * Return:
2031  *                                               1238  *
2032  *  ========    =============================    1239  *  ========    ================================================================
2033  *  1           if @work was pending and we s    1240  *  1           if @work was pending and we successfully stole PENDING
2034  *  0           if @work was idle and we clai    1241  *  0           if @work was idle and we claimed PENDING
2035  *  -EAGAIN     if PENDING couldn't be grabbe    1242  *  -EAGAIN     if PENDING couldn't be grabbed at the moment, safe to busy-retry
                                                   >> 1243  *  -ENOENT     if someone else is canceling @work, this state may persist
                                                   >> 1244  *              for arbitrarily long
2036  *  ========    =============================    1245  *  ========    ================================================================
2037  *                                               1246  *
2038  * Note:                                         1247  * Note:
2039  * On >= 0 return, the caller owns @work's PE    1248  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
2040  * interrupted while holding PENDING and @wor    1249  * interrupted while holding PENDING and @work off queue, irq must be
2041  * disabled on entry.  This, combined with de    1250  * disabled on entry.  This, combined with delayed_work->timer being
2042  * irqsafe, ensures that we return -EAGAIN fo    1251  * irqsafe, ensures that we return -EAGAIN for finite short period of time.
2043  *                                               1252  *
2044  * On successful return, >= 0, irq is disable    1253  * On successful return, >= 0, irq is disabled and the caller is
2045  * responsible for releasing it using local_i !! 1254  * responsible for releasing it using local_irq_restore(*@flags).
2046  *                                               1255  *
2047  * This function is safe to call from any con    1256  * This function is safe to call from any context including IRQ handler.
2048  */                                              1257  */
2049 static int try_to_grab_pending(struct work_st !! 1258 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
2050                                unsigned long  !! 1259                                unsigned long *flags)
2051 {                                                1260 {
2052         struct worker_pool *pool;                1261         struct worker_pool *pool;
2053         struct pool_workqueue *pwq;              1262         struct pool_workqueue *pwq;
2054                                                  1263 
2055         local_irq_save(*irq_flags);           !! 1264         local_irq_save(*flags);
2056                                                  1265 
2057         /* try to steal the timer if it exist    1266         /* try to steal the timer if it exists */
2058         if (cflags & WORK_CANCEL_DELAYED) {   !! 1267         if (is_dwork) {
2059                 struct delayed_work *dwork =     1268                 struct delayed_work *dwork = to_delayed_work(work);
2060                                                  1269 
2061                 /*                               1270                 /*
2062                  * dwork->timer is irqsafe.      1271                  * dwork->timer is irqsafe.  If del_timer() fails, it's
2063                  * guaranteed that the timer     1272                  * guaranteed that the timer is not queued anywhere and not
2064                  * running on the local CPU.     1273                  * running on the local CPU.
2065                  */                              1274                  */
2066                 if (likely(del_timer(&dwork->    1275                 if (likely(del_timer(&dwork->timer)))
2067                         return 1;                1276                         return 1;
2068         }                                        1277         }
2069                                                  1278 
2070         /* try to claim PENDING the normal wa    1279         /* try to claim PENDING the normal way */
2071         if (!test_and_set_bit(WORK_STRUCT_PEN    1280         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2072                 return 0;                        1281                 return 0;
2073                                                  1282 
2074         rcu_read_lock();                         1283         rcu_read_lock();
2075         /*                                       1284         /*
2076          * The queueing is in progress, or it    1285          * The queueing is in progress, or it is already queued. Try to
2077          * steal it from ->worklist without c    1286          * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2078          */                                      1287          */
2079         pool = get_work_pool(work);              1288         pool = get_work_pool(work);
2080         if (!pool)                               1289         if (!pool)
2081                 goto fail;                       1290                 goto fail;
2082                                                  1291 
2083         raw_spin_lock(&pool->lock);              1292         raw_spin_lock(&pool->lock);
2084         /*                                       1293         /*
2085          * work->data is guaranteed to point     1294          * work->data is guaranteed to point to pwq only while the work
2086          * item is queued on pwq->wq, and bot    1295          * item is queued on pwq->wq, and both updating work->data to point
2087          * to pwq on queueing and to pool on     1296          * to pwq on queueing and to pool on dequeueing are done under
2088          * pwq->pool->lock.  This in turn gua    1297          * pwq->pool->lock.  This in turn guarantees that, if work->data
2089          * points to pwq which is associated     1298          * points to pwq which is associated with a locked pool, the work
2090          * item is currently queued on that p    1299          * item is currently queued on that pool.
2091          */                                      1300          */
2092         pwq = get_work_pwq(work);                1301         pwq = get_work_pwq(work);
2093         if (pwq && pwq->pool == pool) {          1302         if (pwq && pwq->pool == pool) {
2094                 unsigned long work_data = *wo << 
2095                                               << 
2096                 debug_work_deactivate(work);     1303                 debug_work_deactivate(work);
2097                                                  1304 
2098                 /*                               1305                 /*
2099                  * A cancelable inactive work    1306                  * A cancelable inactive work item must be in the
2100                  * pwq->inactive_works since     1307                  * pwq->inactive_works since a queued barrier can't be
2101                  * canceled (see the comments    1308                  * canceled (see the comments in insert_wq_barrier()).
2102                  *                               1309                  *
2103                  * An inactive work item cann !! 1310                  * An inactive work item cannot be grabbed directly because
2104                  * it might have linked barri    1311                  * it might have linked barrier work items which, if left
2105                  * on the inactive_works list    1312                  * on the inactive_works list, will confuse pwq->nr_active
2106                  * management later on and ca !! 1313                  * management later on and cause stall.  Make sure the work
2107                  * barrier work items to the  !! 1314                  * item is activated before grabbing.
2108                  * item. Also keep WORK_STRUC << 
2109                  * it doesn't participate in  << 
2110                  * pwq_dec_nr_in_flight().    << 
2111                  */                              1315                  */
2112                 if (work_data & WORK_STRUCT_I !! 1316                 if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
2113                         move_linked_works(wor !! 1317                         pwq_activate_inactive_work(work);
2114                                                  1318 
2115                 list_del_init(&work->entry);     1319                 list_del_init(&work->entry);
                                                   >> 1320                 pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
2116                                                  1321 
2117                 /*                            !! 1322                 /* work->data points to pwq iff queued, point to pool */
2118                  * work->data points to pwq i !! 1323                 set_work_pool_and_keep_pending(work, pool->id);
2119                  * this destroys work->data n << 
2120                  */                           << 
2121                 set_work_pool_and_keep_pendin << 
2122                                               << 
2123                                               << 
2124                 /* must be the last step, see << 
2125                 pwq_dec_nr_in_flight(pwq, wor << 
2126                                                  1324 
2127                 raw_spin_unlock(&pool->lock);    1325                 raw_spin_unlock(&pool->lock);
2128                 rcu_read_unlock();               1326                 rcu_read_unlock();
2129                 return 1;                        1327                 return 1;
2130         }                                        1328         }
2131         raw_spin_unlock(&pool->lock);            1329         raw_spin_unlock(&pool->lock);
2132 fail:                                            1330 fail:
2133         rcu_read_unlock();                       1331         rcu_read_unlock();
2134         local_irq_restore(*irq_flags);        !! 1332         local_irq_restore(*flags);
                                                   >> 1333         if (work_is_canceling(work))
                                                   >> 1334                 return -ENOENT;
                                                   >> 1335         cpu_relax();
2135         return -EAGAIN;                          1336         return -EAGAIN;
2136 }                                                1337 }
2137                                                  1338 
2138 /**                                              1339 /**
2139  * work_grab_pending - steal work item from w << 
2140  * @work: work item to steal                  << 
2141  * @cflags: %WORK_CANCEL_ flags               << 
2142  * @irq_flags: place to store IRQ state       << 
2143  *                                            << 
2144  * Grab PENDING bit of @work. @work can be in << 
2145  * or on worklist.                            << 
2146  *                                            << 
2147  * Can be called from any context. IRQ is dis << 
2148  * stored in *@irq_flags. The caller is respo << 
2149  * local_irq_restore().                       << 
2150  *                                            << 
2151  * Returns %true if @work was pending. %false << 
2152  */                                           << 
2153 static bool work_grab_pending(struct work_str << 
2154                               unsigned long * << 
2155 {                                             << 
2156         int ret;                              << 
2157                                               << 
2158         while (true) {                        << 
2159                 ret = try_to_grab_pending(wor << 
2160                 if (ret >= 0)                 << 
2161                         return ret;           << 
2162                 cpu_relax();                  << 
2163         }                                     << 
2164 }                                             << 
2165                                               << 
2166 /**                                           << 
2167  * insert_work - insert a work into a pool       1340  * insert_work - insert a work into a pool
2168  * @pwq: pwq @work belongs to                    1341  * @pwq: pwq @work belongs to
2169  * @work: work to insert                         1342  * @work: work to insert
2170  * @head: insertion point                        1343  * @head: insertion point
2171  * @extra_flags: extra WORK_STRUCT_* flags to    1344  * @extra_flags: extra WORK_STRUCT_* flags to set
2172  *                                               1345  *
2173  * Insert @work which belongs to @pwq after @    1346  * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
2174  * work_struct flags.                            1347  * work_struct flags.
2175  *                                               1348  *
2176  * CONTEXT:                                      1349  * CONTEXT:
2177  * raw_spin_lock_irq(pool->lock).                1350  * raw_spin_lock_irq(pool->lock).
2178  */                                              1351  */
2179 static void insert_work(struct pool_workqueue    1352 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
2180                         struct list_head *hea    1353                         struct list_head *head, unsigned int extra_flags)
2181 {                                                1354 {
2182         debug_work_activate(work);            !! 1355         struct worker_pool *pool = pwq->pool;
2183                                                  1356 
2184         /* record the work call stack in orde    1357         /* record the work call stack in order to print it in KASAN reports */
2185         kasan_record_aux_stack_noalloc(work);    1358         kasan_record_aux_stack_noalloc(work);
2186                                                  1359 
2187         /* we own @work, set data and link */    1360         /* we own @work, set data and link */
2188         set_work_pwq(work, pwq, extra_flags);    1361         set_work_pwq(work, pwq, extra_flags);
2189         list_add_tail(&work->entry, head);       1362         list_add_tail(&work->entry, head);
2190         get_pwq(pwq);                            1363         get_pwq(pwq);
                                                   >> 1364 
                                                   >> 1365         if (__need_more_worker(pool))
                                                   >> 1366                 wake_up_worker(pool);
2191 }                                                1367 }
2192                                                  1368 
2193 /*                                               1369 /*
2194  * Test whether @work is being queued from an    1370  * Test whether @work is being queued from another work executing on the
2195  * same workqueue.                               1371  * same workqueue.
2196  */                                              1372  */
2197 static bool is_chained_work(struct workqueue_    1373 static bool is_chained_work(struct workqueue_struct *wq)
2198 {                                                1374 {
2199         struct worker *worker;                   1375         struct worker *worker;
2200                                                  1376 
2201         worker = current_wq_worker();            1377         worker = current_wq_worker();
2202         /*                                       1378         /*
2203          * Return %true iff I'm a worker exec    1379          * Return %true iff I'm a worker executing a work item on @wq.  If
2204          * I'm @worker, it's safe to derefere    1380          * I'm @worker, it's safe to dereference it without locking.
2205          */                                      1381          */
2206         return worker && worker->current_pwq-    1382         return worker && worker->current_pwq->wq == wq;
2207 }                                                1383 }
2208                                                  1384 
2209 /*                                               1385 /*
2210  * When queueing an unbound work item to a wq    1386  * When queueing an unbound work item to a wq, prefer local CPU if allowed
2211  * by wq_unbound_cpumask.  Otherwise, round r    1387  * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
2212  * avoid perturbing sensitive tasks.             1388  * avoid perturbing sensitive tasks.
2213  */                                              1389  */
2214 static int wq_select_unbound_cpu(int cpu)        1390 static int wq_select_unbound_cpu(int cpu)
2215 {                                                1391 {
                                                   >> 1392         static bool printed_dbg_warning;
2216         int new_cpu;                             1393         int new_cpu;
2217                                                  1394 
2218         if (likely(!wq_debug_force_rr_cpu)) {    1395         if (likely(!wq_debug_force_rr_cpu)) {
2219                 if (cpumask_test_cpu(cpu, wq_    1396                 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
2220                         return cpu;              1397                         return cpu;
2221         } else {                              !! 1398         } else if (!printed_dbg_warning) {
2222                 pr_warn_once("workqueue: roun !! 1399                 pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
                                                   >> 1400                 printed_dbg_warning = true;
2223         }                                        1401         }
2224                                                  1402 
                                                   >> 1403         if (cpumask_empty(wq_unbound_cpumask))
                                                   >> 1404                 return cpu;
                                                   >> 1405 
2225         new_cpu = __this_cpu_read(wq_rr_cpu_l    1406         new_cpu = __this_cpu_read(wq_rr_cpu_last);
2226         new_cpu = cpumask_next_and(new_cpu, w    1407         new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
2227         if (unlikely(new_cpu >= nr_cpu_ids))     1408         if (unlikely(new_cpu >= nr_cpu_ids)) {
2228                 new_cpu = cpumask_first_and(w    1409                 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
2229                 if (unlikely(new_cpu >= nr_cp    1410                 if (unlikely(new_cpu >= nr_cpu_ids))
2230                         return cpu;              1411                         return cpu;
2231         }                                        1412         }
2232         __this_cpu_write(wq_rr_cpu_last, new_    1413         __this_cpu_write(wq_rr_cpu_last, new_cpu);
2233                                                  1414 
2234         return new_cpu;                          1415         return new_cpu;
2235 }                                                1416 }
2236                                                  1417 
2237 static void __queue_work(int cpu, struct work    1418 static void __queue_work(int cpu, struct workqueue_struct *wq,
2238                          struct work_struct *    1419                          struct work_struct *work)
2239 {                                                1420 {
2240         struct pool_workqueue *pwq;              1421         struct pool_workqueue *pwq;
2241         struct worker_pool *last_pool, *pool; !! 1422         struct worker_pool *last_pool;
                                                   >> 1423         struct list_head *worklist;
2242         unsigned int work_flags;                 1424         unsigned int work_flags;
2243         unsigned int req_cpu = cpu;              1425         unsigned int req_cpu = cpu;
2244                                                  1426 
2245         /*                                       1427         /*
2246          * While a work item is PENDING && of    1428          * While a work item is PENDING && off queue, a task trying to
2247          * steal the PENDING will busy-loop w    1429          * steal the PENDING will busy-loop waiting for it to either get
2248          * queued or lose PENDING.  Grabbing     1430          * queued or lose PENDING.  Grabbing PENDING and queueing should
2249          * happen with IRQ disabled.             1431          * happen with IRQ disabled.
2250          */                                      1432          */
2251         lockdep_assert_irqs_disabled();          1433         lockdep_assert_irqs_disabled();
2252                                                  1434 
2253         /*                                    !! 1435 
2254          * For a draining wq, only works from !! 1436         /* if draining, only works from the same workqueue are allowed */
2255          * allowed. The __WQ_DESTROYING helps !! 1437         if (unlikely(wq->flags & __WQ_DRAINING) &&
2256          * queues a new work item to a wq aft !! 1438             WARN_ON_ONCE(!is_chained_work(wq)))
2257          */                                   << 
2258         if (unlikely(wq->flags & (__WQ_DESTRO << 
2259                      WARN_ON_ONCE(!is_chained << 
2260                 return;                          1439                 return;
2261         rcu_read_lock();                         1440         rcu_read_lock();
2262 retry:                                           1441 retry:
2263         /* pwq which will be used unless @wor    1442         /* pwq which will be used unless @work is executing elsewhere */
2264         if (req_cpu == WORK_CPU_UNBOUND) {    !! 1443         if (wq->flags & WQ_UNBOUND) {
2265                 if (wq->flags & WQ_UNBOUND)   !! 1444                 if (req_cpu == WORK_CPU_UNBOUND)
2266                         cpu = wq_select_unbou    1445                         cpu = wq_select_unbound_cpu(raw_smp_processor_id());
2267                 else                          !! 1446                 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
                                                   >> 1447         } else {
                                                   >> 1448                 if (req_cpu == WORK_CPU_UNBOUND)
2268                         cpu = raw_smp_process    1449                         cpu = raw_smp_processor_id();
                                                   >> 1450                 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
2269         }                                        1451         }
2270                                                  1452 
2271         pwq = rcu_dereference(*per_cpu_ptr(wq << 
2272         pool = pwq->pool;                     << 
2273                                               << 
2274         /*                                       1453         /*
2275          * If @work was previously on a diffe    1454          * If @work was previously on a different pool, it might still be
2276          * running there, in which case the w    1455          * running there, in which case the work needs to be queued on that
2277          * pool to guarantee non-reentrancy.     1456          * pool to guarantee non-reentrancy.
2278          *                                    << 
2279          * For ordered workqueue, work items  << 
2280          * for accurate order management.  Gu << 
2281          * non-reentrancy.  See the comments  << 
2282          */                                      1457          */
2283         last_pool = get_work_pool(work);         1458         last_pool = get_work_pool(work);
2284         if (last_pool && last_pool != pool && !! 1459         if (last_pool && last_pool != pwq->pool) {
2285                 struct worker *worker;           1460                 struct worker *worker;
2286                                                  1461 
2287                 raw_spin_lock(&last_pool->loc    1462                 raw_spin_lock(&last_pool->lock);
2288                                                  1463 
2289                 worker = find_worker_executin    1464                 worker = find_worker_executing_work(last_pool, work);
2290                                                  1465 
2291                 if (worker && worker->current    1466                 if (worker && worker->current_pwq->wq == wq) {
2292                         pwq = worker->current    1467                         pwq = worker->current_pwq;
2293                         pool = pwq->pool;     << 
2294                         WARN_ON_ONCE(pool !=  << 
2295                 } else {                         1468                 } else {
2296                         /* meh... not running    1469                         /* meh... not running there, queue here */
2297                         raw_spin_unlock(&last    1470                         raw_spin_unlock(&last_pool->lock);
2298                         raw_spin_lock(&pool-> !! 1471                         raw_spin_lock(&pwq->pool->lock);
2299                 }                                1472                 }
2300         } else {                                 1473         } else {
2301                 raw_spin_lock(&pool->lock);   !! 1474                 raw_spin_lock(&pwq->pool->lock);
2302         }                                        1475         }
2303                                                  1476 
2304         /*                                       1477         /*
2305          * pwq is determined and locked. For  !! 1478          * pwq is determined and locked.  For unbound pools, we could have
2306          * with pwq release and it could alre !! 1479          * raced with pwq release and it could already be dead.  If its
2307          * repeat pwq selection. Note that un !! 1480          * refcnt is zero, repeat pwq selection.  Note that pwqs never die
2308          * another pwq replacing it in cpu_pw !! 1481          * without another pwq replacing it in the numa_pwq_tbl or while
2309          * on it, so the retrying is guarante !! 1482          * work items are executing on it, so the retrying is guaranteed to
                                                   >> 1483          * make forward-progress.
2310          */                                      1484          */
2311         if (unlikely(!pwq->refcnt)) {            1485         if (unlikely(!pwq->refcnt)) {
2312                 if (wq->flags & WQ_UNBOUND) {    1486                 if (wq->flags & WQ_UNBOUND) {
2313                         raw_spin_unlock(&pool !! 1487                         raw_spin_unlock(&pwq->pool->lock);
2314                         cpu_relax();             1488                         cpu_relax();
2315                         goto retry;              1489                         goto retry;
2316                 }                                1490                 }
2317                 /* oops */                       1491                 /* oops */
2318                 WARN_ONCE(true, "workqueue: p    1492                 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
2319                           wq->name, cpu);        1493                           wq->name, cpu);
2320         }                                        1494         }
2321                                                  1495 
2322         /* pwq determined, queue */              1496         /* pwq determined, queue */
2323         trace_workqueue_queue_work(req_cpu, p    1497         trace_workqueue_queue_work(req_cpu, pwq, work);
2324                                                  1498 
2325         if (WARN_ON(!list_empty(&work->entry)    1499         if (WARN_ON(!list_empty(&work->entry)))
2326                 goto out;                        1500                 goto out;
2327                                                  1501 
2328         pwq->nr_in_flight[pwq->work_color]++;    1502         pwq->nr_in_flight[pwq->work_color]++;
2329         work_flags = work_color_to_flags(pwq-    1503         work_flags = work_color_to_flags(pwq->work_color);
2330                                                  1504 
2331         /*                                    !! 1505         if (likely(pwq->nr_active < pwq->max_active)) {
2332          * Limit the number of concurrently a << 
2333          * @work must also queue behind exist << 
2334          * ordering when max_active changes.  << 
2335          */                                   << 
2336         if (list_empty(&pwq->inactive_works)  << 
2337                 if (list_empty(&pool->worklis << 
2338                         pool->watchdog_ts = j << 
2339                                               << 
2340                 trace_workqueue_activate_work    1506                 trace_workqueue_activate_work(work);
2341                 insert_work(pwq, work, &pool- !! 1507                 pwq->nr_active++;
2342                 kick_pool(pool);              !! 1508                 worklist = &pwq->pool->worklist;
                                                   >> 1509                 if (list_empty(worklist))
                                                   >> 1510                         pwq->pool->watchdog_ts = jiffies;
2343         } else {                                 1511         } else {
2344                 work_flags |= WORK_STRUCT_INA    1512                 work_flags |= WORK_STRUCT_INACTIVE;
2345                 insert_work(pwq, work, &pwq-> !! 1513                 worklist = &pwq->inactive_works;
2346         }                                        1514         }
2347                                                  1515 
                                                   >> 1516         debug_work_activate(work);
                                                   >> 1517         insert_work(pwq, work, worklist, work_flags);
                                                   >> 1518 
2348 out:                                             1519 out:
2349         raw_spin_unlock(&pool->lock);         !! 1520         raw_spin_unlock(&pwq->pool->lock);
2350         rcu_read_unlock();                       1521         rcu_read_unlock();
2351 }                                                1522 }
2352                                                  1523 
2353 static bool clear_pending_if_disabled(struct  << 
2354 {                                             << 
2355         unsigned long data = *work_data_bits( << 
2356         struct work_offq_data offqd;          << 
2357                                               << 
2358         if (likely((data & WORK_STRUCT_PWQ) | << 
2359                    !(data & WORK_OFFQ_DISABLE << 
2360                 return false;                 << 
2361                                               << 
2362         work_offqd_unpack(&offqd, data);      << 
2363         set_work_pool_and_clear_pending(work, << 
2364                                         work_ << 
2365         return true;                          << 
2366 }                                             << 
2367                                               << 
2368 /**                                              1524 /**
2369  * queue_work_on - queue work on specific cpu    1525  * queue_work_on - queue work on specific cpu
2370  * @cpu: CPU number to execute work on           1526  * @cpu: CPU number to execute work on
2371  * @wq: workqueue to use                         1527  * @wq: workqueue to use
2372  * @work: work to queue                          1528  * @work: work to queue
2373  *                                               1529  *
2374  * We queue the work to a specific CPU, the c    1530  * We queue the work to a specific CPU, the caller must ensure it
2375  * can't go away.  Callers that fail to ensur    1531  * can't go away.  Callers that fail to ensure that the specified
2376  * CPU cannot go away will execute on a rando    1532  * CPU cannot go away will execute on a randomly chosen CPU.
2377  * But note well that callers specifying a CP << 
2378  * online will get a splat.                   << 
2379  *                                               1533  *
2380  * Return: %false if @work was already on a q    1534  * Return: %false if @work was already on a queue, %true otherwise.
2381  */                                              1535  */
2382 bool queue_work_on(int cpu, struct workqueue_    1536 bool queue_work_on(int cpu, struct workqueue_struct *wq,
2383                    struct work_struct *work)     1537                    struct work_struct *work)
2384 {                                                1538 {
2385         bool ret = false;                        1539         bool ret = false;
2386         unsigned long irq_flags;              !! 1540         unsigned long flags;
2387                                                  1541 
2388         local_irq_save(irq_flags);            !! 1542         local_irq_save(flags);
2389                                                  1543 
2390         if (!test_and_set_bit(WORK_STRUCT_PEN !! 1544         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2391             !clear_pending_if_disabled(work)) << 
2392                 __queue_work(cpu, wq, work);     1545                 __queue_work(cpu, wq, work);
2393                 ret = true;                      1546                 ret = true;
2394         }                                        1547         }
2395                                                  1548 
2396         local_irq_restore(irq_flags);         !! 1549         local_irq_restore(flags);
2397         return ret;                              1550         return ret;
2398 }                                                1551 }
2399 EXPORT_SYMBOL(queue_work_on);                    1552 EXPORT_SYMBOL(queue_work_on);
2400                                                  1553 
2401 /**                                              1554 /**
2402  * select_numa_node_cpu - Select a CPU based  !! 1555  * workqueue_select_cpu_near - Select a CPU based on NUMA node
2403  * @node: NUMA node ID that we want to select    1556  * @node: NUMA node ID that we want to select a CPU from
2404  *                                               1557  *
2405  * This function will attempt to find a "rand    1558  * This function will attempt to find a "random" cpu available on a given
2406  * node. If there are no CPUs available on th    1559  * node. If there are no CPUs available on the given node it will return
2407  * WORK_CPU_UNBOUND indicating that we should    1560  * WORK_CPU_UNBOUND indicating that we should just schedule to any
2408  * available CPU if we need to schedule this     1561  * available CPU if we need to schedule this work.
2409  */                                              1562  */
2410 static int select_numa_node_cpu(int node)     !! 1563 static int workqueue_select_cpu_near(int node)
2411 {                                                1564 {
2412         int cpu;                                 1565         int cpu;
2413                                                  1566 
                                                   >> 1567         /* No point in doing this if NUMA isn't enabled for workqueues */
                                                   >> 1568         if (!wq_numa_enabled)
                                                   >> 1569                 return WORK_CPU_UNBOUND;
                                                   >> 1570 
2414         /* Delay binding to CPU if node is no    1571         /* Delay binding to CPU if node is not valid or online */
2415         if (node < 0 || node >= MAX_NUMNODES     1572         if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
2416                 return WORK_CPU_UNBOUND;         1573                 return WORK_CPU_UNBOUND;
2417                                                  1574 
2418         /* Use local node/cpu if we are alrea    1575         /* Use local node/cpu if we are already there */
2419         cpu = raw_smp_processor_id();            1576         cpu = raw_smp_processor_id();
2420         if (node == cpu_to_node(cpu))            1577         if (node == cpu_to_node(cpu))
2421                 return cpu;                      1578                 return cpu;
2422                                                  1579 
2423         /* Use "random" otherwise know as "fi    1580         /* Use "random" otherwise know as "first" online CPU of node */
2424         cpu = cpumask_any_and(cpumask_of_node    1581         cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
2425                                                  1582 
2426         /* If CPU is valid return that, other    1583         /* If CPU is valid return that, otherwise just defer */
2427         return cpu < nr_cpu_ids ? cpu : WORK_    1584         return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
2428 }                                                1585 }
2429                                                  1586 
2430 /**                                              1587 /**
2431  * queue_work_node - queue work on a "random"    1588  * queue_work_node - queue work on a "random" cpu for a given NUMA node
2432  * @node: NUMA node that we are targeting the    1589  * @node: NUMA node that we are targeting the work for
2433  * @wq: workqueue to use                         1590  * @wq: workqueue to use
2434  * @work: work to queue                          1591  * @work: work to queue
2435  *                                               1592  *
2436  * We queue the work to a "random" CPU within    1593  * We queue the work to a "random" CPU within a given NUMA node. The basic
2437  * idea here is to provide a way to somehow a    1594  * idea here is to provide a way to somehow associate work with a given
2438  * NUMA node.                                    1595  * NUMA node.
2439  *                                               1596  *
2440  * This function will only make a best effort    1597  * This function will only make a best effort attempt at getting this onto
2441  * the right NUMA node. If no node is request    1598  * the right NUMA node. If no node is requested or the requested node is
2442  * offline then we just fall back to standard    1599  * offline then we just fall back to standard queue_work behavior.
2443  *                                               1600  *
2444  * Currently the "random" CPU ends up being t    1601  * Currently the "random" CPU ends up being the first available CPU in the
2445  * intersection of cpu_online_mask and the cp    1602  * intersection of cpu_online_mask and the cpumask of the node, unless we
2446  * are running on the node. In that case we j    1603  * are running on the node. In that case we just use the current CPU.
2447  *                                               1604  *
2448  * Return: %false if @work was already on a q    1605  * Return: %false if @work was already on a queue, %true otherwise.
2449  */                                              1606  */
2450 bool queue_work_node(int node, struct workque    1607 bool queue_work_node(int node, struct workqueue_struct *wq,
2451                      struct work_struct *work    1608                      struct work_struct *work)
2452 {                                                1609 {
2453         unsigned long irq_flags;              !! 1610         unsigned long flags;
2454         bool ret = false;                        1611         bool ret = false;
2455                                                  1612 
2456         /*                                       1613         /*
2457          * This current implementation is spe    1614          * This current implementation is specific to unbound workqueues.
2458          * Specifically we only return the fi    1615          * Specifically we only return the first available CPU for a given
2459          * node instead of cycling through in    1616          * node instead of cycling through individual CPUs within the node.
2460          *                                       1617          *
2461          * If this is used with a per-cpu wor    1618          * If this is used with a per-cpu workqueue then the logic in
2462          * workqueue_select_cpu_near would ne    1619          * workqueue_select_cpu_near would need to be updated to allow for
2463          * some round robin type logic.          1620          * some round robin type logic.
2464          */                                      1621          */
2465         WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND    1622         WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
2466                                                  1623 
2467         local_irq_save(irq_flags);            !! 1624         local_irq_save(flags);
2468                                                  1625 
2469         if (!test_and_set_bit(WORK_STRUCT_PEN !! 1626         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2470             !clear_pending_if_disabled(work)) !! 1627                 int cpu = workqueue_select_cpu_near(node);
2471                 int cpu = select_numa_node_cp << 
2472                                                  1628 
2473                 __queue_work(cpu, wq, work);     1629                 __queue_work(cpu, wq, work);
2474                 ret = true;                      1630                 ret = true;
2475         }                                        1631         }
2476                                                  1632 
2477         local_irq_restore(irq_flags);         !! 1633         local_irq_restore(flags);
2478         return ret;                              1634         return ret;
2479 }                                                1635 }
2480 EXPORT_SYMBOL_GPL(queue_work_node);              1636 EXPORT_SYMBOL_GPL(queue_work_node);
2481                                                  1637 
2482 void delayed_work_timer_fn(struct timer_list     1638 void delayed_work_timer_fn(struct timer_list *t)
2483 {                                                1639 {
2484         struct delayed_work *dwork = from_tim    1640         struct delayed_work *dwork = from_timer(dwork, t, timer);
2485                                                  1641 
2486         /* should have been called from irqsa    1642         /* should have been called from irqsafe timer with irq already off */
2487         __queue_work(dwork->cpu, dwork->wq, &    1643         __queue_work(dwork->cpu, dwork->wq, &dwork->work);
2488 }                                                1644 }
2489 EXPORT_SYMBOL(delayed_work_timer_fn);            1645 EXPORT_SYMBOL(delayed_work_timer_fn);
2490                                                  1646 
2491 static void __queue_delayed_work(int cpu, str    1647 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
2492                                 struct delaye    1648                                 struct delayed_work *dwork, unsigned long delay)
2493 {                                                1649 {
2494         struct timer_list *timer = &dwork->ti    1650         struct timer_list *timer = &dwork->timer;
2495         struct work_struct *work = &dwork->wo    1651         struct work_struct *work = &dwork->work;
2496                                                  1652 
2497         WARN_ON_ONCE(!wq);                       1653         WARN_ON_ONCE(!wq);
2498         WARN_ON_ONCE(timer->function != delay !! 1654         WARN_ON_FUNCTION_MISMATCH(timer->function, delayed_work_timer_fn);
2499         WARN_ON_ONCE(timer_pending(timer));      1655         WARN_ON_ONCE(timer_pending(timer));
2500         WARN_ON_ONCE(!list_empty(&work->entry    1656         WARN_ON_ONCE(!list_empty(&work->entry));
2501                                                  1657 
2502         /*                                       1658         /*
2503          * If @delay is 0, queue @dwork->work    1659          * If @delay is 0, queue @dwork->work immediately.  This is for
2504          * both optimization and correctness.    1660          * both optimization and correctness.  The earliest @timer can
2505          * expire is on the closest next tick    1661          * expire is on the closest next tick and delayed_work users depend
2506          * on that there's no such delay when    1662          * on that there's no such delay when @delay is 0.
2507          */                                      1663          */
2508         if (!delay) {                            1664         if (!delay) {
2509                 __queue_work(cpu, wq, &dwork-    1665                 __queue_work(cpu, wq, &dwork->work);
2510                 return;                          1666                 return;
2511         }                                        1667         }
2512                                                  1668 
2513         dwork->wq = wq;                          1669         dwork->wq = wq;
2514         dwork->cpu = cpu;                        1670         dwork->cpu = cpu;
2515         timer->expires = jiffies + delay;        1671         timer->expires = jiffies + delay;
2516                                                  1672 
2517         if (housekeeping_enabled(HK_TYPE_TIME !! 1673         if (unlikely(cpu != WORK_CPU_UNBOUND))
2518                 /* If the current cpu is a ho << 
2519                 cpu = smp_processor_id();     << 
2520                 if (!housekeeping_test_cpu(cp << 
2521                         cpu = housekeeping_an << 
2522                 add_timer_on(timer, cpu);        1674                 add_timer_on(timer, cpu);
2523         } else {                              !! 1675         else
2524                 if (likely(cpu == WORK_CPU_UN !! 1676                 add_timer(timer);
2525                         add_timer_global(time << 
2526                 else                          << 
2527                         add_timer_on(timer, c << 
2528         }                                     << 
2529 }                                                1677 }
2530                                                  1678 
2531 /**                                              1679 /**
2532  * queue_delayed_work_on - queue work on spec    1680  * queue_delayed_work_on - queue work on specific CPU after delay
2533  * @cpu: CPU number to execute work on           1681  * @cpu: CPU number to execute work on
2534  * @wq: workqueue to use                         1682  * @wq: workqueue to use
2535  * @dwork: work to queue                         1683  * @dwork: work to queue
2536  * @delay: number of jiffies to wait before q    1684  * @delay: number of jiffies to wait before queueing
2537  *                                               1685  *
2538  * Return: %false if @work was already on a q    1686  * Return: %false if @work was already on a queue, %true otherwise.  If
2539  * @delay is zero and @dwork is idle, it will    1687  * @delay is zero and @dwork is idle, it will be scheduled for immediate
2540  * execution.                                    1688  * execution.
2541  */                                              1689  */
2542 bool queue_delayed_work_on(int cpu, struct wo    1690 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
2543                            struct delayed_wor    1691                            struct delayed_work *dwork, unsigned long delay)
2544 {                                                1692 {
2545         struct work_struct *work = &dwork->wo    1693         struct work_struct *work = &dwork->work;
2546         bool ret = false;                        1694         bool ret = false;
2547         unsigned long irq_flags;              !! 1695         unsigned long flags;
2548                                                  1696 
2549         /* read the comment in __queue_work()    1697         /* read the comment in __queue_work() */
2550         local_irq_save(irq_flags);            !! 1698         local_irq_save(flags);
2551                                                  1699 
2552         if (!test_and_set_bit(WORK_STRUCT_PEN !! 1700         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2553             !clear_pending_if_disabled(work)) << 
2554                 __queue_delayed_work(cpu, wq,    1701                 __queue_delayed_work(cpu, wq, dwork, delay);
2555                 ret = true;                      1702                 ret = true;
2556         }                                        1703         }
2557                                                  1704 
2558         local_irq_restore(irq_flags);         !! 1705         local_irq_restore(flags);
2559         return ret;                              1706         return ret;
2560 }                                                1707 }
2561 EXPORT_SYMBOL(queue_delayed_work_on);            1708 EXPORT_SYMBOL(queue_delayed_work_on);
2562                                                  1709 
2563 /**                                              1710 /**
2564  * mod_delayed_work_on - modify delay of or q    1711  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
2565  * @cpu: CPU number to execute work on           1712  * @cpu: CPU number to execute work on
2566  * @wq: workqueue to use                         1713  * @wq: workqueue to use
2567  * @dwork: work to queue                         1714  * @dwork: work to queue
2568  * @delay: number of jiffies to wait before q    1715  * @delay: number of jiffies to wait before queueing
2569  *                                               1716  *
2570  * If @dwork is idle, equivalent to queue_del    1717  * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
2571  * modify @dwork's timer so that it expires a    1718  * modify @dwork's timer so that it expires after @delay.  If @delay is
2572  * zero, @work is guaranteed to be scheduled     1719  * zero, @work is guaranteed to be scheduled immediately regardless of its
2573  * current state.                                1720  * current state.
2574  *                                               1721  *
2575  * Return: %false if @dwork was idle and queu    1722  * Return: %false if @dwork was idle and queued, %true if @dwork was
2576  * pending and its timer was modified.           1723  * pending and its timer was modified.
2577  *                                               1724  *
2578  * This function is safe to call from any con    1725  * This function is safe to call from any context including IRQ handler.
2579  * See try_to_grab_pending() for details.        1726  * See try_to_grab_pending() for details.
2580  */                                              1727  */
2581 bool mod_delayed_work_on(int cpu, struct work    1728 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
2582                          struct delayed_work     1729                          struct delayed_work *dwork, unsigned long delay)
2583 {                                                1730 {
2584         unsigned long irq_flags;              !! 1731         unsigned long flags;
2585         bool ret;                             !! 1732         int ret;
2586                                                  1733 
2587         ret = work_grab_pending(&dwork->work, !! 1734         do {
                                                   >> 1735                 ret = try_to_grab_pending(&dwork->work, true, &flags);
                                                   >> 1736         } while (unlikely(ret == -EAGAIN));
2588                                                  1737 
2589         if (!clear_pending_if_disabled(&dwork !! 1738         if (likely(ret >= 0)) {
2590                 __queue_delayed_work(cpu, wq,    1739                 __queue_delayed_work(cpu, wq, dwork, delay);
                                                   >> 1740                 local_irq_restore(flags);
                                                   >> 1741         }
2591                                                  1742 
2592         local_irq_restore(irq_flags);         !! 1743         /* -ENOENT from try_to_grab_pending() becomes %true */
2593         return ret;                              1744         return ret;
2594 }                                                1745 }
2595 EXPORT_SYMBOL_GPL(mod_delayed_work_on);          1746 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
2596                                                  1747 
2597 static void rcu_work_rcufn(struct rcu_head *r    1748 static void rcu_work_rcufn(struct rcu_head *rcu)
2598 {                                                1749 {
2599         struct rcu_work *rwork = container_of    1750         struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
2600                                                  1751 
2601         /* read the comment in __queue_work()    1752         /* read the comment in __queue_work() */
2602         local_irq_disable();                     1753         local_irq_disable();
2603         __queue_work(WORK_CPU_UNBOUND, rwork-    1754         __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
2604         local_irq_enable();                      1755         local_irq_enable();
2605 }                                                1756 }
2606                                                  1757 
2607 /**                                              1758 /**
2608  * queue_rcu_work - queue work after a RCU gr    1759  * queue_rcu_work - queue work after a RCU grace period
2609  * @wq: workqueue to use                         1760  * @wq: workqueue to use
2610  * @rwork: work to queue                         1761  * @rwork: work to queue
2611  *                                               1762  *
2612  * Return: %false if @rwork was already pendi    1763  * Return: %false if @rwork was already pending, %true otherwise.  Note
2613  * that a full RCU grace period is guaranteed    1764  * that a full RCU grace period is guaranteed only after a %true return.
2614  * While @rwork is guaranteed to be executed     1765  * While @rwork is guaranteed to be executed after a %false return, the
2615  * execution may happen before a full RCU gra    1766  * execution may happen before a full RCU grace period has passed.
2616  */                                              1767  */
2617 bool queue_rcu_work(struct workqueue_struct *    1768 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
2618 {                                                1769 {
2619         struct work_struct *work = &rwork->wo    1770         struct work_struct *work = &rwork->work;
2620                                                  1771 
2621         /*                                    !! 1772         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2622          * rcu_work can't be canceled or disa << 
2623          * inside @rwork and disabled the inn << 
2624          */                                   << 
2625         if (!test_and_set_bit(WORK_STRUCT_PEN << 
2626             !WARN_ON_ONCE(clear_pending_if_di << 
2627                 rwork->wq = wq;                  1773                 rwork->wq = wq;
2628                 call_rcu_hurry(&rwork->rcu, r !! 1774                 call_rcu(&rwork->rcu, rcu_work_rcufn);
2629                 return true;                     1775                 return true;
2630         }                                        1776         }
2631                                                  1777 
2632         return false;                            1778         return false;
2633 }                                                1779 }
2634 EXPORT_SYMBOL(queue_rcu_work);                   1780 EXPORT_SYMBOL(queue_rcu_work);
2635                                                  1781 
                                                   >> 1782 /**
                                                   >> 1783  * worker_enter_idle - enter idle state
                                                   >> 1784  * @worker: worker which is entering idle state
                                                   >> 1785  *
                                                   >> 1786  * @worker is entering idle state.  Update stats and idle timer if
                                                   >> 1787  * necessary.
                                                   >> 1788  *
                                                   >> 1789  * LOCKING:
                                                   >> 1790  * raw_spin_lock_irq(pool->lock).
                                                   >> 1791  */
                                                   >> 1792 static void worker_enter_idle(struct worker *worker)
                                                   >> 1793 {
                                                   >> 1794         struct worker_pool *pool = worker->pool;
                                                   >> 1795 
                                                   >> 1796         if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
                                                   >> 1797             WARN_ON_ONCE(!list_empty(&worker->entry) &&
                                                   >> 1798                          (worker->hentry.next || worker->hentry.pprev)))
                                                   >> 1799                 return;
                                                   >> 1800 
                                                   >> 1801         /* can't use worker_set_flags(), also called from create_worker() */
                                                   >> 1802         worker->flags |= WORKER_IDLE;
                                                   >> 1803         pool->nr_idle++;
                                                   >> 1804         worker->last_active = jiffies;
                                                   >> 1805 
                                                   >> 1806         /* idle_list is LIFO */
                                                   >> 1807         list_add(&worker->entry, &pool->idle_list);
                                                   >> 1808 
                                                   >> 1809         if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
                                                   >> 1810                 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
                                                   >> 1811 
                                                   >> 1812         /* Sanity check nr_running. */
                                                   >> 1813         WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
                                                   >> 1814 }
                                                   >> 1815 
                                                   >> 1816 /**
                                                   >> 1817  * worker_leave_idle - leave idle state
                                                   >> 1818  * @worker: worker which is leaving idle state
                                                   >> 1819  *
                                                   >> 1820  * @worker is leaving idle state.  Update stats.
                                                   >> 1821  *
                                                   >> 1822  * LOCKING:
                                                   >> 1823  * raw_spin_lock_irq(pool->lock).
                                                   >> 1824  */
                                                   >> 1825 static void worker_leave_idle(struct worker *worker)
                                                   >> 1826 {
                                                   >> 1827         struct worker_pool *pool = worker->pool;
                                                   >> 1828 
                                                   >> 1829         if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
                                                   >> 1830                 return;
                                                   >> 1831         worker_clr_flags(worker, WORKER_IDLE);
                                                   >> 1832         pool->nr_idle--;
                                                   >> 1833         list_del_init(&worker->entry);
                                                   >> 1834 }
                                                   >> 1835 
2636 static struct worker *alloc_worker(int node)     1836 static struct worker *alloc_worker(int node)
2637 {                                                1837 {
2638         struct worker *worker;                   1838         struct worker *worker;
2639                                                  1839 
2640         worker = kzalloc_node(sizeof(*worker)    1840         worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
2641         if (worker) {                            1841         if (worker) {
2642                 INIT_LIST_HEAD(&worker->entry    1842                 INIT_LIST_HEAD(&worker->entry);
2643                 INIT_LIST_HEAD(&worker->sched    1843                 INIT_LIST_HEAD(&worker->scheduled);
2644                 INIT_LIST_HEAD(&worker->node)    1844                 INIT_LIST_HEAD(&worker->node);
2645                 /* on creation a worker is in    1845                 /* on creation a worker is in !idle && prep state */
2646                 worker->flags = WORKER_PREP;     1846                 worker->flags = WORKER_PREP;
2647         }                                        1847         }
2648         return worker;                           1848         return worker;
2649 }                                                1849 }
2650                                                  1850 
2651 static cpumask_t *pool_allowed_cpus(struct wo << 
2652 {                                             << 
2653         if (pool->cpu < 0 && pool->attrs->aff << 
2654                 return pool->attrs->__pod_cpu << 
2655         else                                  << 
2656                 return pool->attrs->cpumask;  << 
2657 }                                             << 
2658                                               << 
2659 /**                                              1851 /**
2660  * worker_attach_to_pool() - attach a worker     1852  * worker_attach_to_pool() - attach a worker to a pool
2661  * @worker: worker to be attached                1853  * @worker: worker to be attached
2662  * @pool: the target pool                        1854  * @pool: the target pool
2663  *                                               1855  *
2664  * Attach @worker to @pool.  Once attached, t    1856  * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
2665  * cpu-binding of @worker are kept coordinate    1857  * cpu-binding of @worker are kept coordinated with the pool across
2666  * cpu-[un]hotplugs.                             1858  * cpu-[un]hotplugs.
2667  */                                              1859  */
2668 static void worker_attach_to_pool(struct work    1860 static void worker_attach_to_pool(struct worker *worker,
2669                                   struct work !! 1861                                    struct worker_pool *pool)
2670 {                                                1862 {
2671         mutex_lock(&wq_pool_attach_mutex);       1863         mutex_lock(&wq_pool_attach_mutex);
2672                                                  1864 
2673         /*                                       1865         /*
2674          * The wq_pool_attach_mutex ensures % !! 1866          * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
2675          * across this function. See the comm !! 1867          * stable across this function.  See the comments above the flag
2676          * details. BH workers are, while per !! 1868          * definition for details.
2677          */                                      1869          */
2678         if (pool->flags & POOL_DISASSOCIATED) !! 1870         if (pool->flags & POOL_DISASSOCIATED)
2679                 worker->flags |= WORKER_UNBOU    1871                 worker->flags |= WORKER_UNBOUND;
2680         } else {                              !! 1872         else
2681                 WARN_ON_ONCE(pool->flags & PO << 
2682                 kthread_set_per_cpu(worker->t    1873                 kthread_set_per_cpu(worker->task, pool->cpu);
2683         }                                     << 
2684                                                  1874 
2685         if (worker->rescue_wq)                   1875         if (worker->rescue_wq)
2686                 set_cpus_allowed_ptr(worker-> !! 1876                 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
2687                                                  1877 
2688         list_add_tail(&worker->node, &pool->w    1878         list_add_tail(&worker->node, &pool->workers);
2689         worker->pool = pool;                     1879         worker->pool = pool;
2690                                                  1880 
2691         mutex_unlock(&wq_pool_attach_mutex);     1881         mutex_unlock(&wq_pool_attach_mutex);
2692 }                                                1882 }
2693                                                  1883 
2694 static void unbind_worker(struct worker *work << 
2695 {                                             << 
2696         lockdep_assert_held(&wq_pool_attach_m << 
2697                                               << 
2698         kthread_set_per_cpu(worker->task, -1) << 
2699         if (cpumask_intersects(wq_unbound_cpu << 
2700                 WARN_ON_ONCE(set_cpus_allowed << 
2701         else                                  << 
2702                 WARN_ON_ONCE(set_cpus_allowed << 
2703 }                                             << 
2704                                               << 
2705                                               << 
2706 static void detach_worker(struct worker *work << 
2707 {                                             << 
2708         lockdep_assert_held(&wq_pool_attach_m << 
2709                                               << 
2710         unbind_worker(worker);                << 
2711         list_del(&worker->node);              << 
2712 }                                             << 
2713                                               << 
2714 /**                                              1884 /**
2715  * worker_detach_from_pool() - detach a worke    1885  * worker_detach_from_pool() - detach a worker from its pool
2716  * @worker: worker which is attached to its p    1886  * @worker: worker which is attached to its pool
2717  *                                               1887  *
2718  * Undo the attaching which had been done in     1888  * Undo the attaching which had been done in worker_attach_to_pool().  The
2719  * caller worker shouldn't access to the pool    1889  * caller worker shouldn't access to the pool after detached except it has
2720  * other reference to the pool.                  1890  * other reference to the pool.
2721  */                                              1891  */
2722 static void worker_detach_from_pool(struct wo    1892 static void worker_detach_from_pool(struct worker *worker)
2723 {                                                1893 {
2724         struct worker_pool *pool = worker->po    1894         struct worker_pool *pool = worker->pool;
2725                                               !! 1895         struct completion *detach_completion = NULL;
2726         /* there is one permanent BH worker p << 
2727         WARN_ON_ONCE(pool->flags & POOL_BH);  << 
2728                                                  1896 
2729         mutex_lock(&wq_pool_attach_mutex);       1897         mutex_lock(&wq_pool_attach_mutex);
2730         detach_worker(worker);                !! 1898 
                                                   >> 1899         kthread_set_per_cpu(worker->task, -1);
                                                   >> 1900         list_del(&worker->node);
2731         worker->pool = NULL;                     1901         worker->pool = NULL;
                                                   >> 1902 
                                                   >> 1903         if (list_empty(&pool->workers))
                                                   >> 1904                 detach_completion = pool->detach_completion;
2732         mutex_unlock(&wq_pool_attach_mutex);     1905         mutex_unlock(&wq_pool_attach_mutex);
2733                                                  1906 
2734         /* clear leftover flags without pool-    1907         /* clear leftover flags without pool->lock after it is detached */
2735         worker->flags &= ~(WORKER_UNBOUND | W    1908         worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
2736 }                                             << 
2737                                               << 
2738 static int format_worker_id(char *buf, size_t << 
2739                             struct worker_poo << 
2740 {                                             << 
2741         if (worker->rescue_wq)                << 
2742                 return scnprintf(buf, size, " << 
2743                                  worker->resc << 
2744                                                  1909 
2745         if (pool) {                           !! 1910         if (detach_completion)
2746                 if (pool->cpu >= 0)           !! 1911                 complete(detach_completion);
2747                         return scnprintf(buf, << 
2748                                          pool << 
2749                                          pool << 
2750                 else                          << 
2751                         return scnprintf(buf, << 
2752                                          pool << 
2753         } else {                              << 
2754                 return scnprintf(buf, size, " << 
2755         }                                     << 
2756 }                                                1912 }
2757                                                  1913 
2758 /**                                              1914 /**
2759  * create_worker - create a new workqueue wor    1915  * create_worker - create a new workqueue worker
2760  * @pool: pool the new worker will belong to     1916  * @pool: pool the new worker will belong to
2761  *                                               1917  *
2762  * Create and start a new worker which is att    1918  * Create and start a new worker which is attached to @pool.
2763  *                                               1919  *
2764  * CONTEXT:                                      1920  * CONTEXT:
2765  * Might sleep.  Does GFP_KERNEL allocations.    1921  * Might sleep.  Does GFP_KERNEL allocations.
2766  *                                               1922  *
2767  * Return:                                       1923  * Return:
2768  * Pointer to the newly created worker.          1924  * Pointer to the newly created worker.
2769  */                                              1925  */
2770 static struct worker *create_worker(struct wo    1926 static struct worker *create_worker(struct worker_pool *pool)
2771 {                                                1927 {
2772         struct worker *worker;                   1928         struct worker *worker;
2773         int id;                                  1929         int id;
                                                   >> 1930         char id_buf[16];
2774                                                  1931 
2775         /* ID is needed to determine kthread     1932         /* ID is needed to determine kthread name */
2776         id = ida_alloc(&pool->worker_ida, GFP    1933         id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
2777         if (id < 0) {                         !! 1934         if (id < 0)
2778                 pr_err_once("workqueue: Faile << 
2779                             ERR_PTR(id));     << 
2780                 return NULL;                     1935                 return NULL;
2781         }                                     << 
2782                                                  1936 
2783         worker = alloc_worker(pool->node);       1937         worker = alloc_worker(pool->node);
2784         if (!worker) {                        !! 1938         if (!worker)
2785                 pr_err_once("workqueue: Faile << 
2786                 goto fail;                       1939                 goto fail;
2787         }                                     << 
2788                                                  1940 
2789         worker->id = id;                         1941         worker->id = id;
2790                                                  1942 
2791         if (!(pool->flags & POOL_BH)) {       !! 1943         if (pool->cpu >= 0)
2792                 char id_buf[WORKER_ID_LEN];   !! 1944                 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
                                                   >> 1945                          pool->attrs->nice < 0  ? "H" : "");
                                                   >> 1946         else
                                                   >> 1947                 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
2793                                                  1948 
2794                 format_worker_id(id_buf, size !! 1949         worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
2795                 worker->task = kthread_create !! 1950                                               "kworker/%s", id_buf);
2796                                               !! 1951         if (IS_ERR(worker->task))
2797                 if (IS_ERR(worker->task)) {   !! 1952                 goto fail;
2798                         if (PTR_ERR(worker->t << 
2799                                 pr_err("workq << 
2800                                        id_buf << 
2801                         } else {              << 
2802                                 pr_err_once(" << 
2803                                             w << 
2804                         }                     << 
2805                         goto fail;            << 
2806                 }                             << 
2807                                                  1953 
2808                 set_user_nice(worker->task, p !! 1954         set_user_nice(worker->task, pool->attrs->nice);
2809                 kthread_bind_mask(worker->tas !! 1955         kthread_bind_mask(worker->task, pool->attrs->cpumask);
2810         }                                     << 
2811                                                  1956 
2812         /* successful, attach the worker to t    1957         /* successful, attach the worker to the pool */
2813         worker_attach_to_pool(worker, pool);     1958         worker_attach_to_pool(worker, pool);
2814                                                  1959 
2815         /* start the newly created worker */     1960         /* start the newly created worker */
2816         raw_spin_lock_irq(&pool->lock);          1961         raw_spin_lock_irq(&pool->lock);
2817                                               << 
2818         worker->pool->nr_workers++;              1962         worker->pool->nr_workers++;
2819         worker_enter_idle(worker);               1963         worker_enter_idle(worker);
2820                                               !! 1964         wake_up_process(worker->task);
2821         /*                                    << 
2822          * @worker is waiting on a completion << 
2823          * check if not woken up soon. As kic << 
2824          * wake it up explicitly.             << 
2825          */                                   << 
2826         if (worker->task)                     << 
2827                 wake_up_process(worker->task) << 
2828                                               << 
2829         raw_spin_unlock_irq(&pool->lock);        1965         raw_spin_unlock_irq(&pool->lock);
2830                                                  1966 
2831         return worker;                           1967         return worker;
2832                                                  1968 
2833 fail:                                            1969 fail:
2834         ida_free(&pool->worker_ida, id);         1970         ida_free(&pool->worker_ida, id);
2835         kfree(worker);                           1971         kfree(worker);
2836         return NULL;                             1972         return NULL;
2837 }                                                1973 }
2838                                                  1974 
2839 static void detach_dying_workers(struct list_ << 
2840 {                                             << 
2841         struct worker *worker;                << 
2842                                               << 
2843         list_for_each_entry(worker, cull_list << 
2844                 detach_worker(worker);        << 
2845 }                                             << 
2846                                               << 
2847 static void reap_dying_workers(struct list_he << 
2848 {                                             << 
2849         struct worker *worker, *tmp;          << 
2850                                               << 
2851         list_for_each_entry_safe(worker, tmp, << 
2852                 list_del_init(&worker->entry) << 
2853                 kthread_stop_put(worker->task << 
2854                 kfree(worker);                << 
2855         }                                     << 
2856 }                                             << 
2857                                               << 
2858 /**                                              1975 /**
2859  * set_worker_dying - Tag a worker for destru !! 1976  * destroy_worker - destroy a workqueue worker
2860  * @worker: worker to be destroyed               1977  * @worker: worker to be destroyed
2861  * @list: transfer worker away from its pool- << 
2862  *                                               1978  *
2863  * Tag @worker for destruction and adjust @po !! 1979  * Destroy @worker and adjust @pool stats accordingly.  The worker should
2864  * should be idle.                            !! 1980  * be idle.
2865  *                                               1981  *
2866  * CONTEXT:                                      1982  * CONTEXT:
2867  * raw_spin_lock_irq(pool->lock).                1983  * raw_spin_lock_irq(pool->lock).
2868  */                                              1984  */
2869 static void set_worker_dying(struct worker *w !! 1985 static void destroy_worker(struct worker *worker)
2870 {                                                1986 {
2871         struct worker_pool *pool = worker->po    1987         struct worker_pool *pool = worker->pool;
2872                                                  1988 
2873         lockdep_assert_held(&pool->lock);        1989         lockdep_assert_held(&pool->lock);
2874         lockdep_assert_held(&wq_pool_attach_m << 
2875                                                  1990 
2876         /* sanity check frenzy */                1991         /* sanity check frenzy */
2877         if (WARN_ON(worker->current_work) ||     1992         if (WARN_ON(worker->current_work) ||
2878             WARN_ON(!list_empty(&worker->sche    1993             WARN_ON(!list_empty(&worker->scheduled)) ||
2879             WARN_ON(!(worker->flags & WORKER_    1994             WARN_ON(!(worker->flags & WORKER_IDLE)))
2880                 return;                          1995                 return;
2881                                                  1996 
2882         pool->nr_workers--;                      1997         pool->nr_workers--;
2883         pool->nr_idle--;                         1998         pool->nr_idle--;
2884                                                  1999 
                                                   >> 2000         list_del_init(&worker->entry);
2885         worker->flags |= WORKER_DIE;             2001         worker->flags |= WORKER_DIE;
2886                                               !! 2002         wake_up_process(worker->task);
2887         list_move(&worker->entry, list);      << 
2888                                               << 
2889         /* get an extra task struct reference << 
2890         get_task_struct(worker->task);        << 
2891 }                                                2003 }
2892                                                  2004 
2893 /**                                           << 
2894  * idle_worker_timeout - check if some idle w << 
2895  * @t: The pool's idle_timer that just expire << 
2896  *                                            << 
2897  * The timer is armed in worker_enter_idle(). << 
2898  * worker_leave_idle(), as a worker flicking  << 
2899  * pool is at the too_many_workers() tipping  << 
2900  * housekeeping overhead. Since IDLE_WORKER_T << 
2901  * it expire and re-evaluate things from ther << 
2902  */                                           << 
2903 static void idle_worker_timeout(struct timer_    2005 static void idle_worker_timeout(struct timer_list *t)
2904 {                                                2006 {
2905         struct worker_pool *pool = from_timer    2007         struct worker_pool *pool = from_timer(pool, t, idle_timer);
2906         bool do_cull = false;                 << 
2907                                               << 
2908         if (work_pending(&pool->idle_cull_wor << 
2909                 return;                       << 
2910                                               << 
2911         raw_spin_lock_irq(&pool->lock);       << 
2912                                               << 
2913         if (too_many_workers(pool)) {         << 
2914                 struct worker *worker;        << 
2915                 unsigned long expires;        << 
2916                                               << 
2917                 /* idle_list is kept in LIFO  << 
2918                 worker = list_last_entry(&poo << 
2919                 expires = worker->last_active << 
2920                 do_cull = !time_before(jiffie << 
2921                                               << 
2922                 if (!do_cull)                 << 
2923                         mod_timer(&pool->idle << 
2924         }                                     << 
2925         raw_spin_unlock_irq(&pool->lock);     << 
2926                                               << 
2927         if (do_cull)                          << 
2928                 queue_work(system_unbound_wq, << 
2929 }                                             << 
2930                                               << 
2931 /**                                           << 
2932  * idle_cull_fn - cull workers that have been << 
2933  * @work: the pool's work for handling these  << 
2934  *                                            << 
2935  * This goes through a pool's idle workers an << 
2936  * idle for at least IDLE_WORKER_TIMEOUT seco << 
2937  *                                            << 
2938  * We don't want to disturb isolated CPUs bec << 
2939  * culled, so this also resets worker affinit << 
2940  * context, hence the split between timer cal << 
2941  */                                           << 
2942 static void idle_cull_fn(struct work_struct * << 
2943 {                                             << 
2944         struct worker_pool *pool = container_ << 
2945         LIST_HEAD(cull_list);                 << 
2946                                                  2008 
2947         /*                                    << 
2948          * Grabbing wq_pool_attach_mutex here << 
2949          * cannot proceed beyong set_pf_worke << 
2950          * This is required as a previously-p << 
2951          * set_worker_dying() has happened bu << 
2952          */                                   << 
2953         mutex_lock(&wq_pool_attach_mutex);    << 
2954         raw_spin_lock_irq(&pool->lock);          2009         raw_spin_lock_irq(&pool->lock);
2955                                                  2010 
2956         while (too_many_workers(pool)) {         2011         while (too_many_workers(pool)) {
2957                 struct worker *worker;           2012                 struct worker *worker;
2958                 unsigned long expires;           2013                 unsigned long expires;
2959                                                  2014 
2960                 worker = list_last_entry(&poo !! 2015                 /* idle_list is kept in LIFO order, check the last one */
                                                   >> 2016                 worker = list_entry(pool->idle_list.prev, struct worker, entry);
2961                 expires = worker->last_active    2017                 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2962                                                  2018 
2963                 if (time_before(jiffies, expi    2019                 if (time_before(jiffies, expires)) {
2964                         mod_timer(&pool->idle    2020                         mod_timer(&pool->idle_timer, expires);
2965                         break;                   2021                         break;
2966                 }                                2022                 }
2967                                                  2023 
2968                 set_worker_dying(worker, &cul !! 2024                 destroy_worker(worker);
2969         }                                        2025         }
2970                                                  2026 
2971         raw_spin_unlock_irq(&pool->lock);        2027         raw_spin_unlock_irq(&pool->lock);
2972         detach_dying_workers(&cull_list);     << 
2973         mutex_unlock(&wq_pool_attach_mutex);  << 
2974                                               << 
2975         reap_dying_workers(&cull_list);       << 
2976 }                                                2028 }
2977                                                  2029 
2978 static void send_mayday(struct work_struct *w    2030 static void send_mayday(struct work_struct *work)
2979 {                                                2031 {
2980         struct pool_workqueue *pwq = get_work    2032         struct pool_workqueue *pwq = get_work_pwq(work);
2981         struct workqueue_struct *wq = pwq->wq    2033         struct workqueue_struct *wq = pwq->wq;
2982                                                  2034 
2983         lockdep_assert_held(&wq_mayday_lock);    2035         lockdep_assert_held(&wq_mayday_lock);
2984                                                  2036 
2985         if (!wq->rescuer)                        2037         if (!wq->rescuer)
2986                 return;                          2038                 return;
2987                                                  2039 
2988         /* mayday mayday mayday */               2040         /* mayday mayday mayday */
2989         if (list_empty(&pwq->mayday_node)) {     2041         if (list_empty(&pwq->mayday_node)) {
2990                 /*                               2042                 /*
2991                  * If @pwq is for an unbound     2043                  * If @pwq is for an unbound wq, its base ref may be put at
2992                  * any time due to an attribu    2044                  * any time due to an attribute change.  Pin @pwq until the
2993                  * rescuer is done with it.      2045                  * rescuer is done with it.
2994                  */                              2046                  */
2995                 get_pwq(pwq);                    2047                 get_pwq(pwq);
2996                 list_add_tail(&pwq->mayday_no    2048                 list_add_tail(&pwq->mayday_node, &wq->maydays);
2997                 wake_up_process(wq->rescuer->    2049                 wake_up_process(wq->rescuer->task);
2998                 pwq->stats[PWQ_STAT_MAYDAY]++ << 
2999         }                                        2050         }
3000 }                                                2051 }
3001                                                  2052 
3002 static void pool_mayday_timeout(struct timer_    2053 static void pool_mayday_timeout(struct timer_list *t)
3003 {                                                2054 {
3004         struct worker_pool *pool = from_timer    2055         struct worker_pool *pool = from_timer(pool, t, mayday_timer);
3005         struct work_struct *work;                2056         struct work_struct *work;
3006                                                  2057 
3007         raw_spin_lock_irq(&pool->lock);          2058         raw_spin_lock_irq(&pool->lock);
3008         raw_spin_lock(&wq_mayday_lock);          2059         raw_spin_lock(&wq_mayday_lock);         /* for wq->maydays */
3009                                                  2060 
3010         if (need_to_create_worker(pool)) {       2061         if (need_to_create_worker(pool)) {
3011                 /*                               2062                 /*
3012                  * We've been trying to creat    2063                  * We've been trying to create a new worker but
3013                  * haven't been successful.      2064                  * haven't been successful.  We might be hitting an
3014                  * allocation deadlock.  Send    2065                  * allocation deadlock.  Send distress signals to
3015                  * rescuers.                     2066                  * rescuers.
3016                  */                              2067                  */
3017                 list_for_each_entry(work, &po    2068                 list_for_each_entry(work, &pool->worklist, entry)
3018                         send_mayday(work);       2069                         send_mayday(work);
3019         }                                        2070         }
3020                                                  2071 
3021         raw_spin_unlock(&wq_mayday_lock);        2072         raw_spin_unlock(&wq_mayday_lock);
3022         raw_spin_unlock_irq(&pool->lock);        2073         raw_spin_unlock_irq(&pool->lock);
3023                                                  2074 
3024         mod_timer(&pool->mayday_timer, jiffie    2075         mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
3025 }                                                2076 }
3026                                                  2077 
3027 /**                                              2078 /**
3028  * maybe_create_worker - create a new worker     2079  * maybe_create_worker - create a new worker if necessary
3029  * @pool: pool to create a new worker for        2080  * @pool: pool to create a new worker for
3030  *                                               2081  *
3031  * Create a new worker for @pool if necessary    2082  * Create a new worker for @pool if necessary.  @pool is guaranteed to
3032  * have at least one idle worker on return fr    2083  * have at least one idle worker on return from this function.  If
3033  * creating a new worker takes longer than MA    2084  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
3034  * sent to all rescuers with works scheduled     2085  * sent to all rescuers with works scheduled on @pool to resolve
3035  * possible allocation deadlock.                 2086  * possible allocation deadlock.
3036  *                                               2087  *
3037  * On return, need_to_create_worker() is guar    2088  * On return, need_to_create_worker() is guaranteed to be %false and
3038  * may_start_working() %true.                    2089  * may_start_working() %true.
3039  *                                               2090  *
3040  * LOCKING:                                      2091  * LOCKING:
3041  * raw_spin_lock_irq(pool->lock) which may be    2092  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
3042  * multiple times.  Does GFP_KERNEL allocatio    2093  * multiple times.  Does GFP_KERNEL allocations.  Called only from
3043  * manager.                                      2094  * manager.
3044  */                                              2095  */
3045 static void maybe_create_worker(struct worker    2096 static void maybe_create_worker(struct worker_pool *pool)
3046 __releases(&pool->lock)                          2097 __releases(&pool->lock)
3047 __acquires(&pool->lock)                          2098 __acquires(&pool->lock)
3048 {                                                2099 {
3049 restart:                                         2100 restart:
3050         raw_spin_unlock_irq(&pool->lock);        2101         raw_spin_unlock_irq(&pool->lock);
3051                                                  2102 
3052         /* if we don't make progress in MAYDA    2103         /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
3053         mod_timer(&pool->mayday_timer, jiffie    2104         mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
3054                                                  2105 
3055         while (true) {                           2106         while (true) {
3056                 if (create_worker(pool) || !n    2107                 if (create_worker(pool) || !need_to_create_worker(pool))
3057                         break;                   2108                         break;
3058                                                  2109 
3059                 schedule_timeout_interruptibl    2110                 schedule_timeout_interruptible(CREATE_COOLDOWN);
3060                                                  2111 
3061                 if (!need_to_create_worker(po    2112                 if (!need_to_create_worker(pool))
3062                         break;                   2113                         break;
3063         }                                        2114         }
3064                                                  2115 
3065         del_timer_sync(&pool->mayday_timer);     2116         del_timer_sync(&pool->mayday_timer);
3066         raw_spin_lock_irq(&pool->lock);          2117         raw_spin_lock_irq(&pool->lock);
3067         /*                                       2118         /*
3068          * This is necessary even after a new    2119          * This is necessary even after a new worker was just successfully
3069          * created as @pool->lock was dropped    2120          * created as @pool->lock was dropped and the new worker might have
3070          * already become busy.                  2121          * already become busy.
3071          */                                      2122          */
3072         if (need_to_create_worker(pool))         2123         if (need_to_create_worker(pool))
3073                 goto restart;                    2124                 goto restart;
3074 }                                                2125 }
3075                                                  2126 
3076 /**                                              2127 /**
3077  * manage_workers - manage worker pool           2128  * manage_workers - manage worker pool
3078  * @worker: self                                 2129  * @worker: self
3079  *                                               2130  *
3080  * Assume the manager role and manage the wor    2131  * Assume the manager role and manage the worker pool @worker belongs
3081  * to.  At any given time, there can be only     2132  * to.  At any given time, there can be only zero or one manager per
3082  * pool.  The exclusion is handled automatica    2133  * pool.  The exclusion is handled automatically by this function.
3083  *                                               2134  *
3084  * The caller can safely start processing wor    2135  * The caller can safely start processing works on false return.  On
3085  * true return, it's guaranteed that need_to_    2136  * true return, it's guaranteed that need_to_create_worker() is false
3086  * and may_start_working() is true.              2137  * and may_start_working() is true.
3087  *                                               2138  *
3088  * CONTEXT:                                      2139  * CONTEXT:
3089  * raw_spin_lock_irq(pool->lock) which may be    2140  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
3090  * multiple times.  Does GFP_KERNEL allocatio    2141  * multiple times.  Does GFP_KERNEL allocations.
3091  *                                               2142  *
3092  * Return:                                       2143  * Return:
3093  * %false if the pool doesn't need management    2144  * %false if the pool doesn't need management and the caller can safely
3094  * start processing works, %true if managemen    2145  * start processing works, %true if management function was performed and
3095  * the conditions that the caller verified be    2146  * the conditions that the caller verified before calling the function may
3096  * no longer be true.                            2147  * no longer be true.
3097  */                                              2148  */
3098 static bool manage_workers(struct worker *wor    2149 static bool manage_workers(struct worker *worker)
3099 {                                                2150 {
3100         struct worker_pool *pool = worker->po    2151         struct worker_pool *pool = worker->pool;
3101                                                  2152 
3102         if (pool->flags & POOL_MANAGER_ACTIVE    2153         if (pool->flags & POOL_MANAGER_ACTIVE)
3103                 return false;                    2154                 return false;
3104                                                  2155 
3105         pool->flags |= POOL_MANAGER_ACTIVE;      2156         pool->flags |= POOL_MANAGER_ACTIVE;
3106         pool->manager = worker;                  2157         pool->manager = worker;
3107                                                  2158 
3108         maybe_create_worker(pool);               2159         maybe_create_worker(pool);
3109                                                  2160 
3110         pool->manager = NULL;                    2161         pool->manager = NULL;
3111         pool->flags &= ~POOL_MANAGER_ACTIVE;     2162         pool->flags &= ~POOL_MANAGER_ACTIVE;
3112         rcuwait_wake_up(&manager_wait);          2163         rcuwait_wake_up(&manager_wait);
3113         return true;                             2164         return true;
3114 }                                                2165 }
3115                                                  2166 
3116 /**                                              2167 /**
3117  * process_one_work - process single work        2168  * process_one_work - process single work
3118  * @worker: self                                 2169  * @worker: self
3119  * @work: work to process                        2170  * @work: work to process
3120  *                                               2171  *
3121  * Process @work.  This function contains all    2172  * Process @work.  This function contains all the logics necessary to
3122  * process a single work including synchroniz    2173  * process a single work including synchronization against and
3123  * interaction with other workers on the same    2174  * interaction with other workers on the same cpu, queueing and
3124  * flushing.  As long as context requirement     2175  * flushing.  As long as context requirement is met, any worker can
3125  * call this function to process a work.         2176  * call this function to process a work.
3126  *                                               2177  *
3127  * CONTEXT:                                      2178  * CONTEXT:
3128  * raw_spin_lock_irq(pool->lock) which is rel    2179  * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
3129  */                                              2180  */
3130 static void process_one_work(struct worker *w    2181 static void process_one_work(struct worker *worker, struct work_struct *work)
3131 __releases(&pool->lock)                          2182 __releases(&pool->lock)
3132 __acquires(&pool->lock)                          2183 __acquires(&pool->lock)
3133 {                                                2184 {
3134         struct pool_workqueue *pwq = get_work    2185         struct pool_workqueue *pwq = get_work_pwq(work);
3135         struct worker_pool *pool = worker->po    2186         struct worker_pool *pool = worker->pool;
                                                   >> 2187         bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
3136         unsigned long work_data;                 2188         unsigned long work_data;
3137         int lockdep_start_depth, rcu_start_de !! 2189         struct worker *collision;
3138         bool bh_draining = pool->flags & POOL << 
3139 #ifdef CONFIG_LOCKDEP                            2190 #ifdef CONFIG_LOCKDEP
3140         /*                                       2191         /*
3141          * It is permissible to free the stru    2192          * It is permissible to free the struct work_struct from
3142          * inside the function that is called    2193          * inside the function that is called from it, this we need to
3143          * take into account for lockdep too.    2194          * take into account for lockdep too.  To avoid bogus "held
3144          * lock freed" warnings as well as pr    2195          * lock freed" warnings as well as problems when looking into
3145          * work->lockdep_map, make a copy and    2196          * work->lockdep_map, make a copy and use that here.
3146          */                                      2197          */
3147         struct lockdep_map lockdep_map;          2198         struct lockdep_map lockdep_map;
3148                                                  2199 
3149         lockdep_copy_map(&lockdep_map, &work-    2200         lockdep_copy_map(&lockdep_map, &work->lockdep_map);
3150 #endif                                           2201 #endif
3151         /* ensure we're on the correct CPU */    2202         /* ensure we're on the correct CPU */
3152         WARN_ON_ONCE(!(pool->flags & POOL_DIS    2203         WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
3153                      raw_smp_processor_id() !    2204                      raw_smp_processor_id() != pool->cpu);
3154                                                  2205 
                                                   >> 2206         /*
                                                   >> 2207          * A single work shouldn't be executed concurrently by
                                                   >> 2208          * multiple workers on a single cpu.  Check whether anyone is
                                                   >> 2209          * already processing the work.  If so, defer the work to the
                                                   >> 2210          * currently executing one.
                                                   >> 2211          */
                                                   >> 2212         collision = find_worker_executing_work(pool, work);
                                                   >> 2213         if (unlikely(collision)) {
                                                   >> 2214                 move_linked_works(work, &collision->scheduled, NULL);
                                                   >> 2215                 return;
                                                   >> 2216         }
                                                   >> 2217 
3155         /* claim and dequeue */                  2218         /* claim and dequeue */
3156         debug_work_deactivate(work);             2219         debug_work_deactivate(work);
3157         hash_add(pool->busy_hash, &worker->he    2220         hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
3158         worker->current_work = work;             2221         worker->current_work = work;
3159         worker->current_func = work->func;       2222         worker->current_func = work->func;
3160         worker->current_pwq = pwq;               2223         worker->current_pwq = pwq;
3161         if (worker->task)                     << 
3162                 worker->current_at = worker-> << 
3163         work_data = *work_data_bits(work);       2224         work_data = *work_data_bits(work);
3164         worker->current_color = get_work_colo    2225         worker->current_color = get_work_color(work_data);
3165                                                  2226 
3166         /*                                       2227         /*
3167          * Record wq name for cmdline and deb    2228          * Record wq name for cmdline and debug reporting, may get
3168          * overridden through set_worker_desc    2229          * overridden through set_worker_desc().
3169          */                                      2230          */
3170         strscpy(worker->desc, pwq->wq->name,     2231         strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
3171                                                  2232 
3172         list_del_init(&work->entry);             2233         list_del_init(&work->entry);
3173                                                  2234 
3174         /*                                       2235         /*
3175          * CPU intensive works don't particip    2236          * CPU intensive works don't participate in concurrency management.
3176          * They're the scheduler's responsibi    2237          * They're the scheduler's responsibility.  This takes @worker out
3177          * of concurrency management and the     2238          * of concurrency management and the next code block will chain
3178          * execution of the pending work item    2239          * execution of the pending work items.
3179          */                                      2240          */
3180         if (unlikely(pwq->wq->flags & WQ_CPU_ !! 2241         if (unlikely(cpu_intensive))
3181                 worker_set_flags(worker, WORK    2242                 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
3182                                                  2243 
3183         /*                                       2244         /*
3184          * Kick @pool if necessary. It's alwa !! 2245          * Wake up another worker if necessary.  The condition is always
3185          * since nr_running would always be > !! 2246          * false for normal per-cpu workers since nr_running would always
3186          * chain execution of the pending wor !! 2247          * be >= 1 at this point.  This is used to chain execution of the
3187          * workers such as the UNBOUND and CP !! 2248          * pending work items for WORKER_NOT_RUNNING workers such as the
                                                   >> 2249          * UNBOUND and CPU_INTENSIVE ones.
3188          */                                      2250          */
3189         kick_pool(pool);                      !! 2251         if (need_more_worker(pool))
                                                   >> 2252                 wake_up_worker(pool);
3190                                                  2253 
3191         /*                                       2254         /*
3192          * Record the last pool and clear PEN    2255          * Record the last pool and clear PENDING which should be the last
3193          * update to @work.  Also, do this in    2256          * update to @work.  Also, do this inside @pool->lock so that
3194          * PENDING and queued state changes h    2257          * PENDING and queued state changes happen together while IRQ is
3195          * disabled.                             2258          * disabled.
3196          */                                      2259          */
3197         set_work_pool_and_clear_pending(work, !! 2260         set_work_pool_and_clear_pending(work, pool->id);
3198                                                  2261 
3199         pwq->stats[PWQ_STAT_STARTED]++;       << 
3200         raw_spin_unlock_irq(&pool->lock);        2262         raw_spin_unlock_irq(&pool->lock);
3201                                                  2263 
3202         rcu_start_depth = rcu_preempt_depth() !! 2264         lock_map_acquire(&pwq->wq->lockdep_map);
3203         lockdep_start_depth = lockdep_depth(c << 
3204         /* see drain_dead_softirq_workfn() */ << 
3205         if (!bh_draining)                     << 
3206                 lock_map_acquire(&pwq->wq->lo << 
3207         lock_map_acquire(&lockdep_map);          2265         lock_map_acquire(&lockdep_map);
3208         /*                                       2266         /*
3209          * Strictly speaking we should mark t    2267          * Strictly speaking we should mark the invariant state without holding
3210          * any locks, that is, before these t    2268          * any locks, that is, before these two lock_map_acquire()'s.
3211          *                                       2269          *
3212          * However, that would result in:        2270          * However, that would result in:
3213          *                                       2271          *
3214          *   A(W1)                               2272          *   A(W1)
3215          *   WFC(C)                              2273          *   WFC(C)
3216          *              A(W1)                    2274          *              A(W1)
3217          *              C(C)                     2275          *              C(C)
3218          *                                       2276          *
3219          * Which would create W1->C->W1 depen    2277          * Which would create W1->C->W1 dependencies, even though there is no
3220          * actual deadlock possible. There ar    2278          * actual deadlock possible. There are two solutions, using a
3221          * read-recursive acquire on the work    2279          * read-recursive acquire on the work(queue) 'locks', but this will then
3222          * hit the lockdep limitation on recu    2280          * hit the lockdep limitation on recursive locks, or simply discard
3223          * these locks.                          2281          * these locks.
3224          *                                       2282          *
3225          * AFAICT there is no possible deadlo    2283          * AFAICT there is no possible deadlock scenario between the
3226          * flush_work() and complete() primit    2284          * flush_work() and complete() primitives (except for single-threaded
3227          * workqueues), so hiding them isn't     2285          * workqueues), so hiding them isn't a problem.
3228          */                                      2286          */
3229         lockdep_invariant_state(true);           2287         lockdep_invariant_state(true);
3230         trace_workqueue_execute_start(work);     2288         trace_workqueue_execute_start(work);
3231         worker->current_func(work);              2289         worker->current_func(work);
3232         /*                                       2290         /*
3233          * While we must be careful to not us    2291          * While we must be careful to not use "work" after this, the trace
3234          * point will only record its address    2292          * point will only record its address.
3235          */                                      2293          */
3236         trace_workqueue_execute_end(work, wor    2294         trace_workqueue_execute_end(work, worker->current_func);
3237         pwq->stats[PWQ_STAT_COMPLETED]++;     << 
3238         lock_map_release(&lockdep_map);          2295         lock_map_release(&lockdep_map);
3239         if (!bh_draining)                     !! 2296         lock_map_release(&pwq->wq->lockdep_map);
3240                 lock_map_release(&pwq->wq->lo << 
3241                                                  2297 
3242         if (unlikely((worker->task && in_atom !! 2298         if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
3243                      lockdep_depth(current) ! !! 2299                 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
3244                      rcu_preempt_depth() != r !! 2300                        "     last function: %ps\n",
3245                 pr_err("BUG: workqueue leaked !! 2301                        current->comm, preempt_count(), task_pid_nr(current),
3246                        "     preempt=0x%08x l << 
3247                        current->comm, task_pi << 
3248                        lockdep_start_depth, l << 
3249                        rcu_start_depth, rcu_p << 
3250                        worker->current_func);    2302                        worker->current_func);
3251                 debug_show_held_locks(current    2303                 debug_show_held_locks(current);
3252                 dump_stack();                    2304                 dump_stack();
3253         }                                        2305         }
3254                                                  2306 
3255         /*                                       2307         /*
3256          * The following prevents a kworker f    2308          * The following prevents a kworker from hogging CPU on !PREEMPTION
3257          * kernels, where a requeueing work i    2309          * kernels, where a requeueing work item waiting for something to
3258          * happen could deadlock with stop_ma    2310          * happen could deadlock with stop_machine as such work item could
3259          * indefinitely requeue itself while     2311          * indefinitely requeue itself while all other CPUs are trapped in
3260          * stop_machine. At the same time, re    2312          * stop_machine. At the same time, report a quiescent RCU state so
3261          * the same condition doesn't freeze     2313          * the same condition doesn't freeze RCU.
3262          */                                      2314          */
3263         if (worker->task)                     !! 2315         cond_resched();
3264                 cond_resched();               << 
3265                                                  2316 
3266         raw_spin_lock_irq(&pool->lock);          2317         raw_spin_lock_irq(&pool->lock);
3267                                                  2318 
3268         /*                                    !! 2319         /* clear cpu intensive status */
3269          * In addition to %WQ_CPU_INTENSIVE,  !! 2320         if (unlikely(cpu_intensive))
3270          * CPU intensive by wq_worker_tick()  !! 2321                 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
3271          * wq_cpu_intensive_thresh_us. Clear  << 
3272          */                                   << 
3273         worker_clr_flags(worker, WORKER_CPU_I << 
3274                                                  2322 
3275         /* tag the worker for identification     2323         /* tag the worker for identification in schedule() */
3276         worker->last_func = worker->current_f    2324         worker->last_func = worker->current_func;
3277                                                  2325 
3278         /* we're done with it, release */        2326         /* we're done with it, release */
3279         hash_del(&worker->hentry);               2327         hash_del(&worker->hentry);
3280         worker->current_work = NULL;             2328         worker->current_work = NULL;
3281         worker->current_func = NULL;             2329         worker->current_func = NULL;
3282         worker->current_pwq = NULL;              2330         worker->current_pwq = NULL;
3283         worker->current_color = INT_MAX;         2331         worker->current_color = INT_MAX;
3284                                               << 
3285         /* must be the last step, see the fun << 
3286         pwq_dec_nr_in_flight(pwq, work_data);    2332         pwq_dec_nr_in_flight(pwq, work_data);
3287 }                                                2333 }
3288                                                  2334 
3289 /**                                              2335 /**
3290  * process_scheduled_works - process schedule    2336  * process_scheduled_works - process scheduled works
3291  * @worker: self                                 2337  * @worker: self
3292  *                                               2338  *
3293  * Process all scheduled works.  Please note     2339  * Process all scheduled works.  Please note that the scheduled list
3294  * may change while processing a work, so thi    2340  * may change while processing a work, so this function repeatedly
3295  * fetches a work from the top and executes i    2341  * fetches a work from the top and executes it.
3296  *                                               2342  *
3297  * CONTEXT:                                      2343  * CONTEXT:
3298  * raw_spin_lock_irq(pool->lock) which may be    2344  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
3299  * multiple times.                               2345  * multiple times.
3300  */                                              2346  */
3301 static void process_scheduled_works(struct wo    2347 static void process_scheduled_works(struct worker *worker)
3302 {                                                2348 {
3303         struct work_struct *work;             !! 2349         while (!list_empty(&worker->scheduled)) {
3304         bool first = true;                    !! 2350                 struct work_struct *work = list_first_entry(&worker->scheduled,
3305                                               !! 2351                                                 struct work_struct, entry);
3306         while ((work = list_first_entry_or_nu << 
3307                                               << 
3308                 if (first) {                  << 
3309                         worker->pool->watchdo << 
3310                         first = false;        << 
3311                 }                             << 
3312                 process_one_work(worker, work    2352                 process_one_work(worker, work);
3313         }                                        2353         }
3314 }                                                2354 }
3315                                                  2355 
3316 static void set_pf_worker(bool val)              2356 static void set_pf_worker(bool val)
3317 {                                                2357 {
3318         mutex_lock(&wq_pool_attach_mutex);       2358         mutex_lock(&wq_pool_attach_mutex);
3319         if (val)                                 2359         if (val)
3320                 current->flags |= PF_WQ_WORKE    2360                 current->flags |= PF_WQ_WORKER;
3321         else                                     2361         else
3322                 current->flags &= ~PF_WQ_WORK    2362                 current->flags &= ~PF_WQ_WORKER;
3323         mutex_unlock(&wq_pool_attach_mutex);     2363         mutex_unlock(&wq_pool_attach_mutex);
3324 }                                                2364 }
3325                                                  2365 
3326 /**                                              2366 /**
3327  * worker_thread - the worker thread function    2367  * worker_thread - the worker thread function
3328  * @__worker: self                               2368  * @__worker: self
3329  *                                               2369  *
3330  * The worker thread function.  All workers b    2370  * The worker thread function.  All workers belong to a worker_pool -
3331  * either a per-cpu one or dynamic unbound on    2371  * either a per-cpu one or dynamic unbound one.  These workers process all
3332  * work items regardless of their specific ta    2372  * work items regardless of their specific target workqueue.  The only
3333  * exception is work items which belong to wo    2373  * exception is work items which belong to workqueues with a rescuer which
3334  * will be explained in rescuer_thread().        2374  * will be explained in rescuer_thread().
3335  *                                               2375  *
3336  * Return: 0                                     2376  * Return: 0
3337  */                                              2377  */
3338 static int worker_thread(void *__worker)         2378 static int worker_thread(void *__worker)
3339 {                                                2379 {
3340         struct worker *worker = __worker;        2380         struct worker *worker = __worker;
3341         struct worker_pool *pool = worker->po    2381         struct worker_pool *pool = worker->pool;
3342                                                  2382 
3343         /* tell the scheduler that this is a     2383         /* tell the scheduler that this is a workqueue worker */
3344         set_pf_worker(true);                     2384         set_pf_worker(true);
3345 woke_up:                                         2385 woke_up:
3346         raw_spin_lock_irq(&pool->lock);          2386         raw_spin_lock_irq(&pool->lock);
3347                                                  2387 
3348         /* am I supposed to die? */              2388         /* am I supposed to die? */
3349         if (unlikely(worker->flags & WORKER_D    2389         if (unlikely(worker->flags & WORKER_DIE)) {
3350                 raw_spin_unlock_irq(&pool->lo    2390                 raw_spin_unlock_irq(&pool->lock);
                                                   >> 2391                 WARN_ON_ONCE(!list_empty(&worker->entry));
3351                 set_pf_worker(false);            2392                 set_pf_worker(false);
3352                 /*                            !! 2393 
3353                  * The worker is dead and PF_ !! 2394                 set_task_comm(worker->task, "kworker/dying");
3354                  * shouldn't be accessed, res << 
3355                  */                           << 
3356                 worker->pool = NULL;          << 
3357                 ida_free(&pool->worker_ida, w    2395                 ida_free(&pool->worker_ida, worker->id);
                                                   >> 2396                 worker_detach_from_pool(worker);
                                                   >> 2397                 kfree(worker);
3358                 return 0;                        2398                 return 0;
3359         }                                        2399         }
3360                                                  2400 
3361         worker_leave_idle(worker);               2401         worker_leave_idle(worker);
3362 recheck:                                         2402 recheck:
3363         /* no more worker necessary? */          2403         /* no more worker necessary? */
3364         if (!need_more_worker(pool))             2404         if (!need_more_worker(pool))
3365                 goto sleep;                      2405                 goto sleep;
3366                                                  2406 
3367         /* do we need to manage? */              2407         /* do we need to manage? */
3368         if (unlikely(!may_start_working(pool)    2408         if (unlikely(!may_start_working(pool)) && manage_workers(worker))
3369                 goto recheck;                    2409                 goto recheck;
3370                                                  2410 
3371         /*                                       2411         /*
3372          * ->scheduled list can only be fille    2412          * ->scheduled list can only be filled while a worker is
3373          * preparing to process a work or act    2413          * preparing to process a work or actually processing it.
3374          * Make sure nobody diddled with it w    2414          * Make sure nobody diddled with it while I was sleeping.
3375          */                                      2415          */
3376         WARN_ON_ONCE(!list_empty(&worker->sch    2416         WARN_ON_ONCE(!list_empty(&worker->scheduled));
3377                                                  2417 
3378         /*                                       2418         /*
3379          * Finish PREP stage.  We're guarante    2419          * Finish PREP stage.  We're guaranteed to have at least one idle
3380          * worker or that someone else has al    2420          * worker or that someone else has already assumed the manager
3381          * role.  This is where @worker start    2421          * role.  This is where @worker starts participating in concurrency
3382          * management if applicable and concu    2422          * management if applicable and concurrency management is restored
3383          * after being rebound.  See rebind_w    2423          * after being rebound.  See rebind_workers() for details.
3384          */                                      2424          */
3385         worker_clr_flags(worker, WORKER_PREP     2425         worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
3386                                                  2426 
3387         do {                                     2427         do {
3388                 struct work_struct *work =       2428                 struct work_struct *work =
3389                         list_first_entry(&poo    2429                         list_first_entry(&pool->worklist,
3390                                          stru    2430                                          struct work_struct, entry);
3391                                                  2431 
3392                 if (assign_work(work, worker, !! 2432                 pool->watchdog_ts = jiffies;
                                                   >> 2433 
                                                   >> 2434                 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
                                                   >> 2435                         /* optimization path, not strictly necessary */
                                                   >> 2436                         process_one_work(worker, work);
                                                   >> 2437                         if (unlikely(!list_empty(&worker->scheduled)))
                                                   >> 2438                                 process_scheduled_works(worker);
                                                   >> 2439                 } else {
                                                   >> 2440                         move_linked_works(work, &worker->scheduled, NULL);
3393                         process_scheduled_wor    2441                         process_scheduled_works(worker);
                                                   >> 2442                 }
3394         } while (keep_working(pool));            2443         } while (keep_working(pool));
3395                                                  2444 
3396         worker_set_flags(worker, WORKER_PREP)    2445         worker_set_flags(worker, WORKER_PREP);
3397 sleep:                                           2446 sleep:
3398         /*                                       2447         /*
3399          * pool->lock is held and there's no     2448          * pool->lock is held and there's no work to process and no need to
3400          * manage, sleep.  Workers are woken     2449          * manage, sleep.  Workers are woken up only while holding
3401          * pool->lock or from local cpu, so s    2450          * pool->lock or from local cpu, so setting the current state
3402          * before releasing pool->lock is eno    2451          * before releasing pool->lock is enough to prevent losing any
3403          * event.                                2452          * event.
3404          */                                      2453          */
3405         worker_enter_idle(worker);               2454         worker_enter_idle(worker);
3406         __set_current_state(TASK_IDLE);          2455         __set_current_state(TASK_IDLE);
3407         raw_spin_unlock_irq(&pool->lock);        2456         raw_spin_unlock_irq(&pool->lock);
3408         schedule();                              2457         schedule();
3409         goto woke_up;                            2458         goto woke_up;
3410 }                                                2459 }
3411                                                  2460 
3412 /**                                              2461 /**
3413  * rescuer_thread - the rescuer thread functi    2462  * rescuer_thread - the rescuer thread function
3414  * @__rescuer: self                              2463  * @__rescuer: self
3415  *                                               2464  *
3416  * Workqueue rescuer thread function.  There'    2465  * Workqueue rescuer thread function.  There's one rescuer for each
3417  * workqueue which has WQ_MEM_RECLAIM set.       2466  * workqueue which has WQ_MEM_RECLAIM set.
3418  *                                               2467  *
3419  * Regular work processing on a pool may bloc    2468  * Regular work processing on a pool may block trying to create a new
3420  * worker which uses GFP_KERNEL allocation wh    2469  * worker which uses GFP_KERNEL allocation which has slight chance of
3421  * developing into deadlock if some works cur    2470  * developing into deadlock if some works currently on the same queue
3422  * need to be processed to satisfy the GFP_KE    2471  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
3423  * the problem rescuer solves.                   2472  * the problem rescuer solves.
3424  *                                               2473  *
3425  * When such condition is possible, the pool     2474  * When such condition is possible, the pool summons rescuers of all
3426  * workqueues which have works queued on the     2475  * workqueues which have works queued on the pool and let them process
3427  * those works so that forward progress can b    2476  * those works so that forward progress can be guaranteed.
3428  *                                               2477  *
3429  * This should happen rarely.                    2478  * This should happen rarely.
3430  *                                               2479  *
3431  * Return: 0                                     2480  * Return: 0
3432  */                                              2481  */
3433 static int rescuer_thread(void *__rescuer)       2482 static int rescuer_thread(void *__rescuer)
3434 {                                                2483 {
3435         struct worker *rescuer = __rescuer;      2484         struct worker *rescuer = __rescuer;
3436         struct workqueue_struct *wq = rescuer    2485         struct workqueue_struct *wq = rescuer->rescue_wq;
                                                   >> 2486         struct list_head *scheduled = &rescuer->scheduled;
3437         bool should_stop;                        2487         bool should_stop;
3438                                                  2488 
3439         set_user_nice(current, RESCUER_NICE_L    2489         set_user_nice(current, RESCUER_NICE_LEVEL);
3440                                                  2490 
3441         /*                                       2491         /*
3442          * Mark rescuer as worker too.  As WO    2492          * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
3443          * doesn't participate in concurrency    2493          * doesn't participate in concurrency management.
3444          */                                      2494          */
3445         set_pf_worker(true);                     2495         set_pf_worker(true);
3446 repeat:                                          2496 repeat:
3447         set_current_state(TASK_IDLE);            2497         set_current_state(TASK_IDLE);
3448                                                  2498 
3449         /*                                       2499         /*
3450          * By the time the rescuer is request    2500          * By the time the rescuer is requested to stop, the workqueue
3451          * shouldn't have any work pending, b    2501          * shouldn't have any work pending, but @wq->maydays may still have
3452          * pwq(s) queued.  This can happen by    2502          * pwq(s) queued.  This can happen by non-rescuer workers consuming
3453          * all the work items before the resc    2503          * all the work items before the rescuer got to them.  Go through
3454          * @wq->maydays processing before act    2504          * @wq->maydays processing before acting on should_stop so that the
3455          * list is always empty on exit.         2505          * list is always empty on exit.
3456          */                                      2506          */
3457         should_stop = kthread_should_stop();     2507         should_stop = kthread_should_stop();
3458                                                  2508 
3459         /* see whether any pwq is asking for     2509         /* see whether any pwq is asking for help */
3460         raw_spin_lock_irq(&wq_mayday_lock);      2510         raw_spin_lock_irq(&wq_mayday_lock);
3461                                                  2511 
3462         while (!list_empty(&wq->maydays)) {      2512         while (!list_empty(&wq->maydays)) {
3463                 struct pool_workqueue *pwq =     2513                 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
3464                                         struc    2514                                         struct pool_workqueue, mayday_node);
3465                 struct worker_pool *pool = pw    2515                 struct worker_pool *pool = pwq->pool;
3466                 struct work_struct *work, *n;    2516                 struct work_struct *work, *n;
                                                   >> 2517                 bool first = true;
3467                                                  2518 
3468                 __set_current_state(TASK_RUNN    2519                 __set_current_state(TASK_RUNNING);
3469                 list_del_init(&pwq->mayday_no    2520                 list_del_init(&pwq->mayday_node);
3470                                                  2521 
3471                 raw_spin_unlock_irq(&wq_mayda    2522                 raw_spin_unlock_irq(&wq_mayday_lock);
3472                                                  2523 
3473                 worker_attach_to_pool(rescuer    2524                 worker_attach_to_pool(rescuer, pool);
3474                                                  2525 
3475                 raw_spin_lock_irq(&pool->lock    2526                 raw_spin_lock_irq(&pool->lock);
3476                                                  2527 
3477                 /*                               2528                 /*
3478                  * Slurp in all works issued     2529                  * Slurp in all works issued via this workqueue and
3479                  * process'em.                   2530                  * process'em.
3480                  */                              2531                  */
3481                 WARN_ON_ONCE(!list_empty(&res !! 2532                 WARN_ON_ONCE(!list_empty(scheduled));
3482                 list_for_each_entry_safe(work    2533                 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
3483                         if (get_work_pwq(work !! 2534                         if (get_work_pwq(work) == pwq) {
3484                             assign_work(work, !! 2535                                 if (first)
3485                                 pwq->stats[PW !! 2536                                         pool->watchdog_ts = jiffies;
                                                   >> 2537                                 move_linked_works(work, scheduled, &n);
                                                   >> 2538                         }
                                                   >> 2539                         first = false;
3486                 }                                2540                 }
3487                                                  2541 
3488                 if (!list_empty(&rescuer->sch !! 2542                 if (!list_empty(scheduled)) {
3489                         process_scheduled_wor    2543                         process_scheduled_works(rescuer);
3490                                                  2544 
3491                         /*                       2545                         /*
3492                          * The above executio    2546                          * The above execution of rescued work items could
3493                          * have created more     2547                          * have created more to rescue through
3494                          * pwq_activate_first    2548                          * pwq_activate_first_inactive() or chained
3495                          * queueing.  Let's p    2549                          * queueing.  Let's put @pwq back on mayday list so
3496                          * that such back-to-    2550                          * that such back-to-back work items, which may be
3497                          * being used to reli    2551                          * being used to relieve memory pressure, don't
3498                          * incur MAYDAY_INTER    2552                          * incur MAYDAY_INTERVAL delay inbetween.
3499                          */                      2553                          */
3500                         if (pwq->nr_active &&    2554                         if (pwq->nr_active && need_to_create_worker(pool)) {
3501                                 raw_spin_lock    2555                                 raw_spin_lock(&wq_mayday_lock);
3502                                 /*               2556                                 /*
3503                                  * Queue iff     2557                                  * Queue iff we aren't racing destruction
3504                                  * and somebo    2558                                  * and somebody else hasn't queued it already.
3505                                  */              2559                                  */
3506                                 if (wq->rescu    2560                                 if (wq->rescuer && list_empty(&pwq->mayday_node)) {
3507                                         get_p    2561                                         get_pwq(pwq);
3508                                         list_    2562                                         list_add_tail(&pwq->mayday_node, &wq->maydays);
3509                                 }                2563                                 }
3510                                 raw_spin_unlo    2564                                 raw_spin_unlock(&wq_mayday_lock);
3511                         }                        2565                         }
3512                 }                                2566                 }
3513                                                  2567 
3514                 /*                               2568                 /*
3515                  * Put the reference grabbed     2569                  * Put the reference grabbed by send_mayday().  @pool won't
3516                  * go away while we're still     2570                  * go away while we're still attached to it.
3517                  */                              2571                  */
3518                 put_pwq(pwq);                    2572                 put_pwq(pwq);
3519                                                  2573 
3520                 /*                               2574                 /*
3521                  * Leave this pool. Notify re !! 2575                  * Leave this pool.  If need_more_worker() is %true, notify a
3522                  * with 0 concurrency and sta !! 2576                  * regular worker; otherwise, we end up with 0 concurrency
                                                   >> 2577                  * and stalling the execution.
3523                  */                              2578                  */
3524                 kick_pool(pool);              !! 2579                 if (need_more_worker(pool))
                                                   >> 2580                         wake_up_worker(pool);
3525                                                  2581 
3526                 raw_spin_unlock_irq(&pool->lo    2582                 raw_spin_unlock_irq(&pool->lock);
3527                                                  2583 
3528                 worker_detach_from_pool(rescu    2584                 worker_detach_from_pool(rescuer);
3529                                                  2585 
3530                 raw_spin_lock_irq(&wq_mayday_    2586                 raw_spin_lock_irq(&wq_mayday_lock);
3531         }                                        2587         }
3532                                                  2588 
3533         raw_spin_unlock_irq(&wq_mayday_lock);    2589         raw_spin_unlock_irq(&wq_mayday_lock);
3534                                                  2590 
3535         if (should_stop) {                       2591         if (should_stop) {
3536                 __set_current_state(TASK_RUNN    2592                 __set_current_state(TASK_RUNNING);
3537                 set_pf_worker(false);            2593                 set_pf_worker(false);
3538                 return 0;                        2594                 return 0;
3539         }                                        2595         }
3540                                                  2596 
3541         /* rescuers should never participate     2597         /* rescuers should never participate in concurrency management */
3542         WARN_ON_ONCE(!(rescuer->flags & WORKE    2598         WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
3543         schedule();                              2599         schedule();
3544         goto repeat;                             2600         goto repeat;
3545 }                                                2601 }
3546                                                  2602 
3547 static void bh_worker(struct worker *worker)  << 
3548 {                                             << 
3549         struct worker_pool *pool = worker->po << 
3550         int nr_restarts = BH_WORKER_RESTARTS; << 
3551         unsigned long end = jiffies + BH_WORK << 
3552                                               << 
3553         raw_spin_lock_irq(&pool->lock);       << 
3554         worker_leave_idle(worker);            << 
3555                                               << 
3556         /*                                    << 
3557          * This function follows the structur << 
3558          * explanations on each step.         << 
3559          */                                   << 
3560         if (!need_more_worker(pool))          << 
3561                 goto done;                    << 
3562                                               << 
3563         WARN_ON_ONCE(!list_empty(&worker->sch << 
3564         worker_clr_flags(worker, WORKER_PREP  << 
3565                                               << 
3566         do {                                  << 
3567                 struct work_struct *work =    << 
3568                         list_first_entry(&poo << 
3569                                          stru << 
3570                                               << 
3571                 if (assign_work(work, worker, << 
3572                         process_scheduled_wor << 
3573         } while (keep_working(pool) &&        << 
3574                  --nr_restarts && time_before << 
3575                                               << 
3576         worker_set_flags(worker, WORKER_PREP) << 
3577 done:                                         << 
3578         worker_enter_idle(worker);            << 
3579         kick_pool(pool);                      << 
3580         raw_spin_unlock_irq(&pool->lock);     << 
3581 }                                             << 
3582                                               << 
3583 /*                                            << 
3584  * TODO: Convert all tasklet users to workque << 
3585  *                                            << 
3586  * This is currently called from tasklet[_hi] << 
3587  * whenever there are tasklets to run. Let's  << 
3588  * queued. Once conversion from tasklet is co << 
3589  * can be dropped.                            << 
3590  *                                            << 
3591  * After full conversion, we'll add worker->s << 
3592  * softirq action and obtain the worker point << 
3593  */                                           << 
3594 void workqueue_softirq_action(bool highpri)   << 
3595 {                                             << 
3596         struct worker_pool *pool =            << 
3597                 &per_cpu(bh_worker_pools, smp << 
3598         if (need_more_worker(pool))           << 
3599                 bh_worker(list_first_entry(&p << 
3600 }                                             << 
3601                                               << 
3602 struct wq_drain_dead_softirq_work {           << 
3603         struct work_struct      work;         << 
3604         struct worker_pool      *pool;        << 
3605         struct completion       done;         << 
3606 };                                            << 
3607                                               << 
3608 static void drain_dead_softirq_workfn(struct  << 
3609 {                                             << 
3610         struct wq_drain_dead_softirq_work *de << 
3611                 container_of(work, struct wq_ << 
3612         struct worker_pool *pool = dead_work- << 
3613         bool repeat;                          << 
3614                                               << 
3615         /*                                    << 
3616          * @pool's CPU is dead and we want to << 
3617          * items from this BH work item which << 
3618          * its CPU is dead, @pool can't be ki << 
3619          * will be nested, a lockdep annotati << 
3620          * @pool with %POOL_BH_DRAINING for t << 
3621          */                                   << 
3622         raw_spin_lock_irq(&pool->lock);       << 
3623         pool->flags |= POOL_BH_DRAINING;      << 
3624         raw_spin_unlock_irq(&pool->lock);     << 
3625                                               << 
3626         bh_worker(list_first_entry(&pool->wor << 
3627                                               << 
3628         raw_spin_lock_irq(&pool->lock);       << 
3629         pool->flags &= ~POOL_BH_DRAINING;     << 
3630         repeat = need_more_worker(pool);      << 
3631         raw_spin_unlock_irq(&pool->lock);     << 
3632                                               << 
3633         /*                                    << 
3634          * bh_worker() might hit consecutive  << 
3635          * still are pending work items, resc << 
3636          * don't hog this CPU's BH.           << 
3637          */                                   << 
3638         if (repeat) {                         << 
3639                 if (pool->attrs->nice == HIGH << 
3640                         queue_work(system_bh_ << 
3641                 else                          << 
3642                         queue_work(system_bh_ << 
3643         } else {                              << 
3644                 complete(&dead_work->done);   << 
3645         }                                     << 
3646 }                                             << 
3647                                               << 
3648 /*                                            << 
3649  * @cpu is dead. Drain the remaining BH work  << 
3650  * possible to allocate dead_work per CPU and << 
3651  * have to worry about draining overlapping w << 
3652  * nesting (one CPU's dead_work queued on ano << 
3653  * on). Let's keep it simple and drain them s << 
3654  * items which shouldn't be requeued on the s << 
3655  */                                           << 
3656 void workqueue_softirq_dead(unsigned int cpu) << 
3657 {                                             << 
3658         int i;                                << 
3659                                               << 
3660         for (i = 0; i < NR_STD_WORKER_POOLS;  << 
3661                 struct worker_pool *pool = &p << 
3662                 struct wq_drain_dead_softirq_ << 
3663                                               << 
3664                 if (!need_more_worker(pool))  << 
3665                         continue;             << 
3666                                               << 
3667                 INIT_WORK_ONSTACK(&dead_work. << 
3668                 dead_work.pool = pool;        << 
3669                 init_completion(&dead_work.do << 
3670                                               << 
3671                 if (pool->attrs->nice == HIGH << 
3672                         queue_work(system_bh_ << 
3673                 else                          << 
3674                         queue_work(system_bh_ << 
3675                                               << 
3676                 wait_for_completion(&dead_wor << 
3677                 destroy_work_on_stack(&dead_w << 
3678         }                                     << 
3679 }                                             << 
3680                                               << 
3681 /**                                              2603 /**
3682  * check_flush_dependency - check for flush d    2604  * check_flush_dependency - check for flush dependency sanity
3683  * @target_wq: workqueue being flushed           2605  * @target_wq: workqueue being flushed
3684  * @target_work: work item being flushed (NUL    2606  * @target_work: work item being flushed (NULL for workqueue flushes)
3685  *                                               2607  *
3686  * %current is trying to flush the whole @tar    2608  * %current is trying to flush the whole @target_wq or @target_work on it.
3687  * If @target_wq doesn't have %WQ_MEM_RECLAIM    2609  * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
3688  * reclaiming memory or running on a workqueu    2610  * reclaiming memory or running on a workqueue which doesn't have
3689  * %WQ_MEM_RECLAIM as that can break forward-    2611  * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
3690  * a deadlock.                                   2612  * a deadlock.
3691  */                                              2613  */
3692 static void check_flush_dependency(struct wor    2614 static void check_flush_dependency(struct workqueue_struct *target_wq,
3693                                    struct wor    2615                                    struct work_struct *target_work)
3694 {                                                2616 {
3695         work_func_t target_func = target_work    2617         work_func_t target_func = target_work ? target_work->func : NULL;
3696         struct worker *worker;                   2618         struct worker *worker;
3697                                                  2619 
3698         if (target_wq->flags & WQ_MEM_RECLAIM    2620         if (target_wq->flags & WQ_MEM_RECLAIM)
3699                 return;                          2621                 return;
3700                                                  2622 
3701         worker = current_wq_worker();            2623         worker = current_wq_worker();
3702                                                  2624 
3703         WARN_ONCE(current->flags & PF_MEMALLO    2625         WARN_ONCE(current->flags & PF_MEMALLOC,
3704                   "workqueue: PF_MEMALLOC tas    2626                   "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
3705                   current->pid, current->comm    2627                   current->pid, current->comm, target_wq->name, target_func);
3706         WARN_ONCE(worker && ((worker->current    2628         WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
3707                               (WQ_MEM_RECLAIM    2629                               (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
3708                   "workqueue: WQ_MEM_RECLAIM     2630                   "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
3709                   worker->current_pwq->wq->na    2631                   worker->current_pwq->wq->name, worker->current_func,
3710                   target_wq->name, target_fun    2632                   target_wq->name, target_func);
3711 }                                                2633 }
3712                                                  2634 
3713 struct wq_barrier {                              2635 struct wq_barrier {
3714         struct work_struct      work;            2636         struct work_struct      work;
3715         struct completion       done;            2637         struct completion       done;
3716         struct task_struct      *task;  /* pu    2638         struct task_struct      *task;  /* purely informational */
3717 };                                               2639 };
3718                                                  2640 
3719 static void wq_barrier_func(struct work_struc    2641 static void wq_barrier_func(struct work_struct *work)
3720 {                                                2642 {
3721         struct wq_barrier *barr = container_o    2643         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
3722         complete(&barr->done);                   2644         complete(&barr->done);
3723 }                                                2645 }
3724                                                  2646 
3725 /**                                              2647 /**
3726  * insert_wq_barrier - insert a barrier work     2648  * insert_wq_barrier - insert a barrier work
3727  * @pwq: pwq to insert barrier into              2649  * @pwq: pwq to insert barrier into
3728  * @barr: wq_barrier to insert                   2650  * @barr: wq_barrier to insert
3729  * @target: target work to attach @barr to       2651  * @target: target work to attach @barr to
3730  * @worker: worker currently executing @targe    2652  * @worker: worker currently executing @target, NULL if @target is not executing
3731  *                                               2653  *
3732  * @barr is linked to @target such that @barr    2654  * @barr is linked to @target such that @barr is completed only after
3733  * @target finishes execution.  Please note t    2655  * @target finishes execution.  Please note that the ordering
3734  * guarantee is observed only with respect to    2656  * guarantee is observed only with respect to @target and on the local
3735  * cpu.                                          2657  * cpu.
3736  *                                               2658  *
3737  * Currently, a queued barrier can't be cance    2659  * Currently, a queued barrier can't be canceled.  This is because
3738  * try_to_grab_pending() can't determine whet    2660  * try_to_grab_pending() can't determine whether the work to be
3739  * grabbed is at the head of the queue and th    2661  * grabbed is at the head of the queue and thus can't clear LINKED
3740  * flag of the previous work while there must    2662  * flag of the previous work while there must be a valid next work
3741  * after a work with LINKED flag set.            2663  * after a work with LINKED flag set.
3742  *                                               2664  *
3743  * Note that when @worker is non-NULL, @targe    2665  * Note that when @worker is non-NULL, @target may be modified
3744  * underneath us, so we can't reliably determ    2666  * underneath us, so we can't reliably determine pwq from @target.
3745  *                                               2667  *
3746  * CONTEXT:                                      2668  * CONTEXT:
3747  * raw_spin_lock_irq(pool->lock).                2669  * raw_spin_lock_irq(pool->lock).
3748  */                                              2670  */
3749 static void insert_wq_barrier(struct pool_wor    2671 static void insert_wq_barrier(struct pool_workqueue *pwq,
3750                               struct wq_barri    2672                               struct wq_barrier *barr,
3751                               struct work_str    2673                               struct work_struct *target, struct worker *worker)
3752 {                                                2674 {
3753         static __maybe_unused struct lock_cla << 
3754         unsigned int work_flags = 0;             2675         unsigned int work_flags = 0;
3755         unsigned int work_color;                 2676         unsigned int work_color;
3756         struct list_head *head;                  2677         struct list_head *head;
3757                                                  2678 
3758         /*                                       2679         /*
3759          * debugobject calls are safe here ev    2680          * debugobject calls are safe here even with pool->lock locked
3760          * as we know for sure that this will    2681          * as we know for sure that this will not trigger any of the
3761          * checks and call back into the fixu    2682          * checks and call back into the fixup functions where we
3762          * might deadlock.                       2683          * might deadlock.
3763          *                                    << 
3764          * BH and threaded workqueues need se << 
3765          * spuriously triggering "inconsisten << 
3766          * usage".                            << 
3767          */                                      2684          */
3768         INIT_WORK_ONSTACK_KEY(&barr->work, wq !! 2685         INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
3769                               (pwq->wq->flags << 
3770         __set_bit(WORK_STRUCT_PENDING_BIT, wo    2686         __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
3771                                                  2687 
3772         init_completion_map(&barr->done, &tar    2688         init_completion_map(&barr->done, &target->lockdep_map);
3773                                                  2689 
3774         barr->task = current;                    2690         barr->task = current;
3775                                                  2691 
3776         /* The barrier work item does not par !! 2692         /* The barrier work item does not participate in pwq->nr_active. */
3777         work_flags |= WORK_STRUCT_INACTIVE;      2693         work_flags |= WORK_STRUCT_INACTIVE;
3778                                                  2694 
3779         /*                                       2695         /*
3780          * If @target is currently being exec    2696          * If @target is currently being executed, schedule the
3781          * barrier to the worker; otherwise,     2697          * barrier to the worker; otherwise, put it after @target.
3782          */                                      2698          */
3783         if (worker) {                            2699         if (worker) {
3784                 head = worker->scheduled.next    2700                 head = worker->scheduled.next;
3785                 work_color = worker->current_    2701                 work_color = worker->current_color;
3786         } else {                                 2702         } else {
3787                 unsigned long *bits = work_da    2703                 unsigned long *bits = work_data_bits(target);
3788                                                  2704 
3789                 head = target->entry.next;       2705                 head = target->entry.next;
3790                 /* there can already be other    2706                 /* there can already be other linked works, inherit and set */
3791                 work_flags |= *bits & WORK_ST    2707                 work_flags |= *bits & WORK_STRUCT_LINKED;
3792                 work_color = get_work_color(*    2708                 work_color = get_work_color(*bits);
3793                 __set_bit(WORK_STRUCT_LINKED_    2709                 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
3794         }                                        2710         }
3795                                                  2711 
3796         pwq->nr_in_flight[work_color]++;         2712         pwq->nr_in_flight[work_color]++;
3797         work_flags |= work_color_to_flags(wor    2713         work_flags |= work_color_to_flags(work_color);
3798                                                  2714 
                                                   >> 2715         debug_work_activate(&barr->work);
3799         insert_work(pwq, &barr->work, head, w    2716         insert_work(pwq, &barr->work, head, work_flags);
3800 }                                                2717 }
3801                                                  2718 
3802 /**                                              2719 /**
3803  * flush_workqueue_prep_pwqs - prepare pwqs f    2720  * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
3804  * @wq: workqueue being flushed                  2721  * @wq: workqueue being flushed
3805  * @flush_color: new flush color, < 0 for no-    2722  * @flush_color: new flush color, < 0 for no-op
3806  * @work_color: new work color, < 0 for no-op    2723  * @work_color: new work color, < 0 for no-op
3807  *                                               2724  *
3808  * Prepare pwqs for workqueue flushing.          2725  * Prepare pwqs for workqueue flushing.
3809  *                                               2726  *
3810  * If @flush_color is non-negative, flush_col    2727  * If @flush_color is non-negative, flush_color on all pwqs should be
3811  * -1.  If no pwq has in-flight commands at t    2728  * -1.  If no pwq has in-flight commands at the specified color, all
3812  * pwq->flush_color's stay at -1 and %false i    2729  * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
3813  * has in flight commands, its pwq->flush_col    2730  * has in flight commands, its pwq->flush_color is set to
3814  * @flush_color, @wq->nr_pwqs_to_flush is upd    2731  * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
3815  * wakeup logic is armed and %true is returne    2732  * wakeup logic is armed and %true is returned.
3816  *                                               2733  *
3817  * The caller should have initialized @wq->fi    2734  * The caller should have initialized @wq->first_flusher prior to
3818  * calling this function with non-negative @f    2735  * calling this function with non-negative @flush_color.  If
3819  * @flush_color is negative, no flush color u    2736  * @flush_color is negative, no flush color update is done and %false
3820  * is returned.                                  2737  * is returned.
3821  *                                               2738  *
3822  * If @work_color is non-negative, all pwqs s    2739  * If @work_color is non-negative, all pwqs should have the same
3823  * work_color which is previous to @work_colo    2740  * work_color which is previous to @work_color and all will be
3824  * advanced to @work_color.                      2741  * advanced to @work_color.
3825  *                                               2742  *
3826  * CONTEXT:                                      2743  * CONTEXT:
3827  * mutex_lock(wq->mutex).                        2744  * mutex_lock(wq->mutex).
3828  *                                               2745  *
3829  * Return:                                       2746  * Return:
3830  * %true if @flush_color >= 0 and there's som    2747  * %true if @flush_color >= 0 and there's something to flush.  %false
3831  * otherwise.                                    2748  * otherwise.
3832  */                                              2749  */
3833 static bool flush_workqueue_prep_pwqs(struct     2750 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
3834                                       int flu    2751                                       int flush_color, int work_color)
3835 {                                                2752 {
3836         bool wait = false;                       2753         bool wait = false;
3837         struct pool_workqueue *pwq;              2754         struct pool_workqueue *pwq;
3838                                                  2755 
3839         if (flush_color >= 0) {                  2756         if (flush_color >= 0) {
3840                 WARN_ON_ONCE(atomic_read(&wq-    2757                 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
3841                 atomic_set(&wq->nr_pwqs_to_fl    2758                 atomic_set(&wq->nr_pwqs_to_flush, 1);
3842         }                                        2759         }
3843                                                  2760 
3844         for_each_pwq(pwq, wq) {                  2761         for_each_pwq(pwq, wq) {
3845                 struct worker_pool *pool = pw    2762                 struct worker_pool *pool = pwq->pool;
3846                                                  2763 
3847                 raw_spin_lock_irq(&pool->lock    2764                 raw_spin_lock_irq(&pool->lock);
3848                                                  2765 
3849                 if (flush_color >= 0) {          2766                 if (flush_color >= 0) {
3850                         WARN_ON_ONCE(pwq->flu    2767                         WARN_ON_ONCE(pwq->flush_color != -1);
3851                                                  2768 
3852                         if (pwq->nr_in_flight    2769                         if (pwq->nr_in_flight[flush_color]) {
3853                                 pwq->flush_co    2770                                 pwq->flush_color = flush_color;
3854                                 atomic_inc(&w    2771                                 atomic_inc(&wq->nr_pwqs_to_flush);
3855                                 wait = true;     2772                                 wait = true;
3856                         }                        2773                         }
3857                 }                                2774                 }
3858                                                  2775 
3859                 if (work_color >= 0) {           2776                 if (work_color >= 0) {
3860                         WARN_ON_ONCE(work_col    2777                         WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
3861                         pwq->work_color = wor    2778                         pwq->work_color = work_color;
3862                 }                                2779                 }
3863                                                  2780 
3864                 raw_spin_unlock_irq(&pool->lo    2781                 raw_spin_unlock_irq(&pool->lock);
3865         }                                        2782         }
3866                                                  2783 
3867         if (flush_color >= 0 && atomic_dec_an    2784         if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
3868                 complete(&wq->first_flusher->    2785                 complete(&wq->first_flusher->done);
3869                                                  2786 
3870         return wait;                             2787         return wait;
3871 }                                                2788 }
3872                                                  2789 
3873 static void touch_wq_lockdep_map(struct workq << 
3874 {                                             << 
3875 #ifdef CONFIG_LOCKDEP                         << 
3876         if (wq->flags & WQ_BH)                << 
3877                 local_bh_disable();           << 
3878                                               << 
3879         lock_map_acquire(&wq->lockdep_map);   << 
3880         lock_map_release(&wq->lockdep_map);   << 
3881                                               << 
3882         if (wq->flags & WQ_BH)                << 
3883                 local_bh_enable();            << 
3884 #endif                                        << 
3885 }                                             << 
3886                                               << 
3887 static void touch_work_lockdep_map(struct wor << 
3888                                    struct wor << 
3889 {                                             << 
3890 #ifdef CONFIG_LOCKDEP                         << 
3891         if (wq->flags & WQ_BH)                << 
3892                 local_bh_disable();           << 
3893                                               << 
3894         lock_map_acquire(&work->lockdep_map); << 
3895         lock_map_release(&work->lockdep_map); << 
3896                                               << 
3897         if (wq->flags & WQ_BH)                << 
3898                 local_bh_enable();            << 
3899 #endif                                        << 
3900 }                                             << 
3901                                               << 
3902 /**                                              2790 /**
3903  * __flush_workqueue - ensure that any schedu    2791  * __flush_workqueue - ensure that any scheduled work has run to completion.
3904  * @wq: workqueue to flush                       2792  * @wq: workqueue to flush
3905  *                                               2793  *
3906  * This function sleeps until all work items     2794  * This function sleeps until all work items which were queued on entry
3907  * have finished execution, but it is not liv    2795  * have finished execution, but it is not livelocked by new incoming ones.
3908  */                                              2796  */
3909 void __flush_workqueue(struct workqueue_struc    2797 void __flush_workqueue(struct workqueue_struct *wq)
3910 {                                                2798 {
3911         struct wq_flusher this_flusher = {       2799         struct wq_flusher this_flusher = {
3912                 .list = LIST_HEAD_INIT(this_f    2800                 .list = LIST_HEAD_INIT(this_flusher.list),
3913                 .flush_color = -1,               2801                 .flush_color = -1,
3914                 .done = COMPLETION_INITIALIZE    2802                 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
3915         };                                       2803         };
3916         int next_color;                          2804         int next_color;
3917                                                  2805 
3918         if (WARN_ON(!wq_online))                 2806         if (WARN_ON(!wq_online))
3919                 return;                          2807                 return;
3920                                                  2808 
3921         touch_wq_lockdep_map(wq);             !! 2809         lock_map_acquire(&wq->lockdep_map);
                                                   >> 2810         lock_map_release(&wq->lockdep_map);
3922                                                  2811 
3923         mutex_lock(&wq->mutex);                  2812         mutex_lock(&wq->mutex);
3924                                                  2813 
3925         /*                                       2814         /*
3926          * Start-to-wait phase                   2815          * Start-to-wait phase
3927          */                                      2816          */
3928         next_color = work_next_color(wq->work    2817         next_color = work_next_color(wq->work_color);
3929                                                  2818 
3930         if (next_color != wq->flush_color) {     2819         if (next_color != wq->flush_color) {
3931                 /*                               2820                 /*
3932                  * Color space is not full.      2821                  * Color space is not full.  The current work_color
3933                  * becomes our flush_color an    2822                  * becomes our flush_color and work_color is advanced
3934                  * by one.                       2823                  * by one.
3935                  */                              2824                  */
3936                 WARN_ON_ONCE(!list_empty(&wq-    2825                 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
3937                 this_flusher.flush_color = wq    2826                 this_flusher.flush_color = wq->work_color;
3938                 wq->work_color = next_color;     2827                 wq->work_color = next_color;
3939                                                  2828 
3940                 if (!wq->first_flusher) {        2829                 if (!wq->first_flusher) {
3941                         /* no flush in progre    2830                         /* no flush in progress, become the first flusher */
3942                         WARN_ON_ONCE(wq->flus    2831                         WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
3943                                                  2832 
3944                         wq->first_flusher = &    2833                         wq->first_flusher = &this_flusher;
3945                                                  2834 
3946                         if (!flush_workqueue_    2835                         if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
3947                                                  2836                                                        wq->work_color)) {
3948                                 /* nothing to    2837                                 /* nothing to flush, done */
3949                                 wq->flush_col    2838                                 wq->flush_color = next_color;
3950                                 wq->first_flu    2839                                 wq->first_flusher = NULL;
3951                                 goto out_unlo    2840                                 goto out_unlock;
3952                         }                        2841                         }
3953                 } else {                         2842                 } else {
3954                         /* wait in queue */      2843                         /* wait in queue */
3955                         WARN_ON_ONCE(wq->flus    2844                         WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
3956                         list_add_tail(&this_f    2845                         list_add_tail(&this_flusher.list, &wq->flusher_queue);
3957                         flush_workqueue_prep_    2846                         flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
3958                 }                                2847                 }
3959         } else {                                 2848         } else {
3960                 /*                               2849                 /*
3961                  * Oops, color space is full,    2850                  * Oops, color space is full, wait on overflow queue.
3962                  * The next flush completion     2851                  * The next flush completion will assign us
3963                  * flush_color and transfer t    2852                  * flush_color and transfer to flusher_queue.
3964                  */                              2853                  */
3965                 list_add_tail(&this_flusher.l    2854                 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
3966         }                                        2855         }
3967                                                  2856 
3968         check_flush_dependency(wq, NULL);        2857         check_flush_dependency(wq, NULL);
3969                                                  2858 
3970         mutex_unlock(&wq->mutex);                2859         mutex_unlock(&wq->mutex);
3971                                                  2860 
3972         wait_for_completion(&this_flusher.don    2861         wait_for_completion(&this_flusher.done);
3973                                                  2862 
3974         /*                                       2863         /*
3975          * Wake-up-and-cascade phase             2864          * Wake-up-and-cascade phase
3976          *                                       2865          *
3977          * First flushers are responsible for    2866          * First flushers are responsible for cascading flushes and
3978          * handling overflow.  Non-first flus    2867          * handling overflow.  Non-first flushers can simply return.
3979          */                                      2868          */
3980         if (READ_ONCE(wq->first_flusher) != &    2869         if (READ_ONCE(wq->first_flusher) != &this_flusher)
3981                 return;                          2870                 return;
3982                                                  2871 
3983         mutex_lock(&wq->mutex);                  2872         mutex_lock(&wq->mutex);
3984                                                  2873 
3985         /* we might have raced, check again w    2874         /* we might have raced, check again with mutex held */
3986         if (wq->first_flusher != &this_flushe    2875         if (wq->first_flusher != &this_flusher)
3987                 goto out_unlock;                 2876                 goto out_unlock;
3988                                                  2877 
3989         WRITE_ONCE(wq->first_flusher, NULL);     2878         WRITE_ONCE(wq->first_flusher, NULL);
3990                                                  2879 
3991         WARN_ON_ONCE(!list_empty(&this_flushe    2880         WARN_ON_ONCE(!list_empty(&this_flusher.list));
3992         WARN_ON_ONCE(wq->flush_color != this_    2881         WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
3993                                                  2882 
3994         while (true) {                           2883         while (true) {
3995                 struct wq_flusher *next, *tmp    2884                 struct wq_flusher *next, *tmp;
3996                                                  2885 
3997                 /* complete all the flushers     2886                 /* complete all the flushers sharing the current flush color */
3998                 list_for_each_entry_safe(next    2887                 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
3999                         if (next->flush_color    2888                         if (next->flush_color != wq->flush_color)
4000                                 break;           2889                                 break;
4001                         list_del_init(&next->    2890                         list_del_init(&next->list);
4002                         complete(&next->done)    2891                         complete(&next->done);
4003                 }                                2892                 }
4004                                                  2893 
4005                 WARN_ON_ONCE(!list_empty(&wq-    2894                 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
4006                              wq->flush_color     2895                              wq->flush_color != work_next_color(wq->work_color));
4007                                                  2896 
4008                 /* this flush_color is finish    2897                 /* this flush_color is finished, advance by one */
4009                 wq->flush_color = work_next_c    2898                 wq->flush_color = work_next_color(wq->flush_color);
4010                                                  2899 
4011                 /* one color has been freed,     2900                 /* one color has been freed, handle overflow queue */
4012                 if (!list_empty(&wq->flusher_    2901                 if (!list_empty(&wq->flusher_overflow)) {
4013                         /*                       2902                         /*
4014                          * Assign the same co    2903                          * Assign the same color to all overflowed
4015                          * flushers, advance     2904                          * flushers, advance work_color and append to
4016                          * flusher_queue.  Th    2905                          * flusher_queue.  This is the start-to-wait
4017                          * phase for these ov    2906                          * phase for these overflowed flushers.
4018                          */                      2907                          */
4019                         list_for_each_entry(t    2908                         list_for_each_entry(tmp, &wq->flusher_overflow, list)
4020                                 tmp->flush_co    2909                                 tmp->flush_color = wq->work_color;
4021                                                  2910 
4022                         wq->work_color = work    2911                         wq->work_color = work_next_color(wq->work_color);
4023                                                  2912 
4024                         list_splice_tail_init    2913                         list_splice_tail_init(&wq->flusher_overflow,
4025                                                  2914                                               &wq->flusher_queue);
4026                         flush_workqueue_prep_    2915                         flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
4027                 }                                2916                 }
4028                                                  2917 
4029                 if (list_empty(&wq->flusher_q    2918                 if (list_empty(&wq->flusher_queue)) {
4030                         WARN_ON_ONCE(wq->flus    2919                         WARN_ON_ONCE(wq->flush_color != wq->work_color);
4031                         break;                   2920                         break;
4032                 }                                2921                 }
4033                                                  2922 
4034                 /*                               2923                 /*
4035                  * Need to flush more colors.    2924                  * Need to flush more colors.  Make the next flusher
4036                  * the new first flusher and     2925                  * the new first flusher and arm pwqs.
4037                  */                              2926                  */
4038                 WARN_ON_ONCE(wq->flush_color     2927                 WARN_ON_ONCE(wq->flush_color == wq->work_color);
4039                 WARN_ON_ONCE(wq->flush_color     2928                 WARN_ON_ONCE(wq->flush_color != next->flush_color);
4040                                                  2929 
4041                 list_del_init(&next->list);      2930                 list_del_init(&next->list);
4042                 wq->first_flusher = next;        2931                 wq->first_flusher = next;
4043                                                  2932 
4044                 if (flush_workqueue_prep_pwqs    2933                 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
4045                         break;                   2934                         break;
4046                                                  2935 
4047                 /*                               2936                 /*
4048                  * Meh... this color is alrea    2937                  * Meh... this color is already done, clear first
4049                  * flusher and repeat cascadi    2938                  * flusher and repeat cascading.
4050                  */                              2939                  */
4051                 wq->first_flusher = NULL;        2940                 wq->first_flusher = NULL;
4052         }                                        2941         }
4053                                                  2942 
4054 out_unlock:                                      2943 out_unlock:
4055         mutex_unlock(&wq->mutex);                2944         mutex_unlock(&wq->mutex);
4056 }                                                2945 }
4057 EXPORT_SYMBOL(__flush_workqueue);                2946 EXPORT_SYMBOL(__flush_workqueue);
4058                                                  2947 
4059 /**                                              2948 /**
4060  * drain_workqueue - drain a workqueue           2949  * drain_workqueue - drain a workqueue
4061  * @wq: workqueue to drain                       2950  * @wq: workqueue to drain
4062  *                                               2951  *
4063  * Wait until the workqueue becomes empty.  W    2952  * Wait until the workqueue becomes empty.  While draining is in progress,
4064  * only chain queueing is allowed.  IOW, only    2953  * only chain queueing is allowed.  IOW, only currently pending or running
4065  * work items on @wq can queue further work i    2954  * work items on @wq can queue further work items on it.  @wq is flushed
4066  * repeatedly until it becomes empty.  The nu    2955  * repeatedly until it becomes empty.  The number of flushing is determined
4067  * by the depth of chaining and should be rel    2956  * by the depth of chaining and should be relatively short.  Whine if it
4068  * takes too long.                               2957  * takes too long.
4069  */                                              2958  */
4070 void drain_workqueue(struct workqueue_struct     2959 void drain_workqueue(struct workqueue_struct *wq)
4071 {                                                2960 {
4072         unsigned int flush_cnt = 0;              2961         unsigned int flush_cnt = 0;
4073         struct pool_workqueue *pwq;              2962         struct pool_workqueue *pwq;
4074                                                  2963 
4075         /*                                       2964         /*
4076          * __queue_work() needs to test wheth    2965          * __queue_work() needs to test whether there are drainers, is much
4077          * hotter than drain_workqueue() and     2966          * hotter than drain_workqueue() and already looks at @wq->flags.
4078          * Use __WQ_DRAINING so that queue do    2967          * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
4079          */                                      2968          */
4080         mutex_lock(&wq->mutex);                  2969         mutex_lock(&wq->mutex);
4081         if (!wq->nr_drainers++)                  2970         if (!wq->nr_drainers++)
4082                 wq->flags |= __WQ_DRAINING;      2971                 wq->flags |= __WQ_DRAINING;
4083         mutex_unlock(&wq->mutex);                2972         mutex_unlock(&wq->mutex);
4084 reflush:                                         2973 reflush:
4085         __flush_workqueue(wq);                   2974         __flush_workqueue(wq);
4086                                                  2975 
4087         mutex_lock(&wq->mutex);                  2976         mutex_lock(&wq->mutex);
4088                                                  2977 
4089         for_each_pwq(pwq, wq) {                  2978         for_each_pwq(pwq, wq) {
4090                 bool drained;                    2979                 bool drained;
4091                                                  2980 
4092                 raw_spin_lock_irq(&pwq->pool-    2981                 raw_spin_lock_irq(&pwq->pool->lock);
4093                 drained = pwq_is_empty(pwq);  !! 2982                 drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
4094                 raw_spin_unlock_irq(&pwq->poo    2983                 raw_spin_unlock_irq(&pwq->pool->lock);
4095                                                  2984 
4096                 if (drained)                     2985                 if (drained)
4097                         continue;                2986                         continue;
4098                                                  2987 
4099                 if (++flush_cnt == 10 ||         2988                 if (++flush_cnt == 10 ||
4100                     (flush_cnt % 100 == 0 &&     2989                     (flush_cnt % 100 == 0 && flush_cnt <= 1000))
4101                         pr_warn("workqueue %s    2990                         pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
4102                                 wq->name, __f    2991                                 wq->name, __func__, flush_cnt);
4103                                                  2992 
4104                 mutex_unlock(&wq->mutex);        2993                 mutex_unlock(&wq->mutex);
4105                 goto reflush;                    2994                 goto reflush;
4106         }                                        2995         }
4107                                                  2996 
4108         if (!--wq->nr_drainers)                  2997         if (!--wq->nr_drainers)
4109                 wq->flags &= ~__WQ_DRAINING;     2998                 wq->flags &= ~__WQ_DRAINING;
4110         mutex_unlock(&wq->mutex);                2999         mutex_unlock(&wq->mutex);
4111 }                                                3000 }
4112 EXPORT_SYMBOL_GPL(drain_workqueue);              3001 EXPORT_SYMBOL_GPL(drain_workqueue);
4113                                                  3002 
4114 static bool start_flush_work(struct work_stru    3003 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
4115                              bool from_cancel    3004                              bool from_cancel)
4116 {                                                3005 {
4117         struct worker *worker = NULL;            3006         struct worker *worker = NULL;
4118         struct worker_pool *pool;                3007         struct worker_pool *pool;
4119         struct pool_workqueue *pwq;              3008         struct pool_workqueue *pwq;
4120         struct workqueue_struct *wq;          !! 3009 
                                                   >> 3010         might_sleep();
4121                                                  3011 
4122         rcu_read_lock();                         3012         rcu_read_lock();
4123         pool = get_work_pool(work);              3013         pool = get_work_pool(work);
4124         if (!pool) {                             3014         if (!pool) {
4125                 rcu_read_unlock();               3015                 rcu_read_unlock();
4126                 return false;                    3016                 return false;
4127         }                                        3017         }
4128                                                  3018 
4129         raw_spin_lock_irq(&pool->lock);          3019         raw_spin_lock_irq(&pool->lock);
4130         /* see the comment in try_to_grab_pen    3020         /* see the comment in try_to_grab_pending() with the same code */
4131         pwq = get_work_pwq(work);                3021         pwq = get_work_pwq(work);
4132         if (pwq) {                               3022         if (pwq) {
4133                 if (unlikely(pwq->pool != poo    3023                 if (unlikely(pwq->pool != pool))
4134                         goto already_gone;       3024                         goto already_gone;
4135         } else {                                 3025         } else {
4136                 worker = find_worker_executin    3026                 worker = find_worker_executing_work(pool, work);
4137                 if (!worker)                     3027                 if (!worker)
4138                         goto already_gone;       3028                         goto already_gone;
4139                 pwq = worker->current_pwq;       3029                 pwq = worker->current_pwq;
4140         }                                        3030         }
4141                                                  3031 
4142         wq = pwq->wq;                         !! 3032         check_flush_dependency(pwq->wq, work);
4143         check_flush_dependency(wq, work);     << 
4144                                                  3033 
4145         insert_wq_barrier(pwq, barr, work, wo    3034         insert_wq_barrier(pwq, barr, work, worker);
4146         raw_spin_unlock_irq(&pool->lock);        3035         raw_spin_unlock_irq(&pool->lock);
4147                                                  3036 
4148         touch_work_lockdep_map(work, wq);     << 
4149                                               << 
4150         /*                                       3037         /*
4151          * Force a lock recursion deadlock wh    3038          * Force a lock recursion deadlock when using flush_work() inside a
4152          * single-threaded or rescuer equippe    3039          * single-threaded or rescuer equipped workqueue.
4153          *                                       3040          *
4154          * For single threaded workqueues the    3041          * For single threaded workqueues the deadlock happens when the work
4155          * is after the work issuing the flus    3042          * is after the work issuing the flush_work(). For rescuer equipped
4156          * workqueues the deadlock happens wh    3043          * workqueues the deadlock happens when the rescuer stalls, blocking
4157          * forward progress.                     3044          * forward progress.
4158          */                                      3045          */
4159         if (!from_cancel && (wq->saved_max_ac !! 3046         if (!from_cancel &&
4160                 touch_wq_lockdep_map(wq);     !! 3047             (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
4161                                               !! 3048                 lock_map_acquire(&pwq->wq->lockdep_map);
                                                   >> 3049                 lock_map_release(&pwq->wq->lockdep_map);
                                                   >> 3050         }
4162         rcu_read_unlock();                       3051         rcu_read_unlock();
4163         return true;                             3052         return true;
4164 already_gone:                                    3053 already_gone:
4165         raw_spin_unlock_irq(&pool->lock);        3054         raw_spin_unlock_irq(&pool->lock);
4166         rcu_read_unlock();                       3055         rcu_read_unlock();
4167         return false;                            3056         return false;
4168 }                                                3057 }
4169                                                  3058 
4170 static bool __flush_work(struct work_struct *    3059 static bool __flush_work(struct work_struct *work, bool from_cancel)
4171 {                                                3060 {
4172         struct wq_barrier barr;                  3061         struct wq_barrier barr;
4173                                                  3062 
4174         if (WARN_ON(!wq_online))                 3063         if (WARN_ON(!wq_online))
4175                 return false;                    3064                 return false;
4176                                                  3065 
4177         if (WARN_ON(!work->func))                3066         if (WARN_ON(!work->func))
4178                 return false;                    3067                 return false;
4179                                                  3068 
4180         if (!start_flush_work(work, &barr, fr !! 3069         lock_map_acquire(&work->lockdep_map);
4181                 return false;                 !! 3070         lock_map_release(&work->lockdep_map);
4182                                               << 
4183         /*                                    << 
4184          * start_flush_work() returned %true. << 
4185          * that @work must have been executin << 
4186          * can't currently be queued. Its dat << 
4187          * was queued on a BH workqueue, we a << 
4188          * BH context and thus can be busy-wa << 
4189          */                                   << 
4190         if (from_cancel) {                    << 
4191                 unsigned long data = *work_da << 
4192                                                  3071 
4193                 if (!WARN_ON_ONCE(data & WORK !! 3072         if (start_flush_work(work, &barr, from_cancel)) {
4194                     (data & WORK_OFFQ_BH)) {  !! 3073                 wait_for_completion(&barr.done);
4195                         /*                    !! 3074                 destroy_work_on_stack(&barr.work);
4196                          * On RT, prevent a l !! 3075                 return true;
4197                          * soft interrupt pro !! 3076         } else {
4198                          * running by keeping !! 3077                 return false;
4199                          * runs on a differen << 
4200                          * than doing the BH  << 
4201                          * This is copied fro << 
4202                          * kernel/softirq.c:: << 
4203                          */                   << 
4204                         while (!try_wait_for_ << 
4205                                 if (IS_ENABLE << 
4206                                         local << 
4207                                         local << 
4208                                 } else {      << 
4209                                         cpu_r << 
4210                                 }             << 
4211                         }                     << 
4212                         goto out_destroy;     << 
4213                 }                             << 
4214         }                                        3078         }
4215                                               << 
4216         wait_for_completion(&barr.done);      << 
4217                                               << 
4218 out_destroy:                                  << 
4219         destroy_work_on_stack(&barr.work);    << 
4220         return true;                          << 
4221 }                                                3079 }
4222                                                  3080 
4223 /**                                              3081 /**
4224  * flush_work - wait for a work to finish exe    3082  * flush_work - wait for a work to finish executing the last queueing instance
4225  * @work: the work to flush                      3083  * @work: the work to flush
4226  *                                               3084  *
4227  * Wait until @work has finished execution.      3085  * Wait until @work has finished execution.  @work is guaranteed to be idle
4228  * on return if it hasn't been requeued since    3086  * on return if it hasn't been requeued since flush started.
4229  *                                               3087  *
4230  * Return:                                       3088  * Return:
4231  * %true if flush_work() waited for the work     3089  * %true if flush_work() waited for the work to finish execution,
4232  * %false if it was already idle.                3090  * %false if it was already idle.
4233  */                                              3091  */
4234 bool flush_work(struct work_struct *work)        3092 bool flush_work(struct work_struct *work)
4235 {                                                3093 {
4236         might_sleep();                        << 
4237         return __flush_work(work, false);        3094         return __flush_work(work, false);
4238 }                                                3095 }
4239 EXPORT_SYMBOL_GPL(flush_work);                   3096 EXPORT_SYMBOL_GPL(flush_work);
4240                                                  3097 
                                                   >> 3098 struct cwt_wait {
                                                   >> 3099         wait_queue_entry_t              wait;
                                                   >> 3100         struct work_struct      *work;
                                                   >> 3101 };
                                                   >> 3102 
                                                   >> 3103 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
                                                   >> 3104 {
                                                   >> 3105         struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
                                                   >> 3106 
                                                   >> 3107         if (cwait->work != key)
                                                   >> 3108                 return 0;
                                                   >> 3109         return autoremove_wake_function(wait, mode, sync, key);
                                                   >> 3110 }
                                                   >> 3111 
                                                   >> 3112 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
                                                   >> 3113 {
                                                   >> 3114         static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
                                                   >> 3115         unsigned long flags;
                                                   >> 3116         int ret;
                                                   >> 3117 
                                                   >> 3118         do {
                                                   >> 3119                 ret = try_to_grab_pending(work, is_dwork, &flags);
                                                   >> 3120                 /*
                                                   >> 3121                  * If someone else is already canceling, wait for it to
                                                   >> 3122                  * finish.  flush_work() doesn't work for PREEMPT_NONE
                                                   >> 3123                  * because we may get scheduled between @work's completion
                                                   >> 3124                  * and the other canceling task resuming and clearing
                                                   >> 3125                  * CANCELING - flush_work() will return false immediately
                                                   >> 3126                  * as @work is no longer busy, try_to_grab_pending() will
                                                   >> 3127                  * return -ENOENT as @work is still being canceled and the
                                                   >> 3128                  * other canceling task won't be able to clear CANCELING as
                                                   >> 3129                  * we're hogging the CPU.
                                                   >> 3130                  *
                                                   >> 3131                  * Let's wait for completion using a waitqueue.  As this
                                                   >> 3132                  * may lead to the thundering herd problem, use a custom
                                                   >> 3133                  * wake function which matches @work along with exclusive
                                                   >> 3134                  * wait and wakeup.
                                                   >> 3135                  */
                                                   >> 3136                 if (unlikely(ret == -ENOENT)) {
                                                   >> 3137                         struct cwt_wait cwait;
                                                   >> 3138 
                                                   >> 3139                         init_wait(&cwait.wait);
                                                   >> 3140                         cwait.wait.func = cwt_wakefn;
                                                   >> 3141                         cwait.work = work;
                                                   >> 3142 
                                                   >> 3143                         prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
                                                   >> 3144                                                   TASK_UNINTERRUPTIBLE);
                                                   >> 3145                         if (work_is_canceling(work))
                                                   >> 3146                                 schedule();
                                                   >> 3147                         finish_wait(&cancel_waitq, &cwait.wait);
                                                   >> 3148                 }
                                                   >> 3149         } while (unlikely(ret < 0));
                                                   >> 3150 
                                                   >> 3151         /* tell other tasks trying to grab @work to back off */
                                                   >> 3152         mark_work_canceling(work);
                                                   >> 3153         local_irq_restore(flags);
                                                   >> 3154 
                                                   >> 3155         /*
                                                   >> 3156          * This allows canceling during early boot.  We know that @work
                                                   >> 3157          * isn't executing.
                                                   >> 3158          */
                                                   >> 3159         if (wq_online)
                                                   >> 3160                 __flush_work(work, true);
                                                   >> 3161 
                                                   >> 3162         clear_work_data(work);
                                                   >> 3163 
                                                   >> 3164         /*
                                                   >> 3165          * Paired with prepare_to_wait() above so that either
                                                   >> 3166          * waitqueue_active() is visible here or !work_is_canceling() is
                                                   >> 3167          * visible there.
                                                   >> 3168          */
                                                   >> 3169         smp_mb();
                                                   >> 3170         if (waitqueue_active(&cancel_waitq))
                                                   >> 3171                 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
                                                   >> 3172 
                                                   >> 3173         return ret;
                                                   >> 3174 }
                                                   >> 3175 
                                                   >> 3176 /**
                                                   >> 3177  * cancel_work_sync - cancel a work and wait for it to finish
                                                   >> 3178  * @work: the work to cancel
                                                   >> 3179  *
                                                   >> 3180  * Cancel @work and wait for its execution to finish.  This function
                                                   >> 3181  * can be used even if the work re-queues itself or migrates to
                                                   >> 3182  * another workqueue.  On return from this function, @work is
                                                   >> 3183  * guaranteed to be not pending or executing on any CPU.
                                                   >> 3184  *
                                                   >> 3185  * cancel_work_sync(&delayed_work->work) must not be used for
                                                   >> 3186  * delayed_work's.  Use cancel_delayed_work_sync() instead.
                                                   >> 3187  *
                                                   >> 3188  * The caller must ensure that the workqueue on which @work was last
                                                   >> 3189  * queued can't be destroyed before this function returns.
                                                   >> 3190  *
                                                   >> 3191  * Return:
                                                   >> 3192  * %true if @work was pending, %false otherwise.
                                                   >> 3193  */
                                                   >> 3194 bool cancel_work_sync(struct work_struct *work)
                                                   >> 3195 {
                                                   >> 3196         return __cancel_work_timer(work, false);
                                                   >> 3197 }
                                                   >> 3198 EXPORT_SYMBOL_GPL(cancel_work_sync);
                                                   >> 3199 
4241 /**                                              3200 /**
4242  * flush_delayed_work - wait for a dwork to f    3201  * flush_delayed_work - wait for a dwork to finish executing the last queueing
4243  * @dwork: the delayed work to flush             3202  * @dwork: the delayed work to flush
4244  *                                               3203  *
4245  * Delayed timer is cancelled and the pending    3204  * Delayed timer is cancelled and the pending work is queued for
4246  * immediate execution.  Like flush_work(), t    3205  * immediate execution.  Like flush_work(), this function only
4247  * considers the last queueing instance of @d    3206  * considers the last queueing instance of @dwork.
4248  *                                               3207  *
4249  * Return:                                       3208  * Return:
4250  * %true if flush_work() waited for the work     3209  * %true if flush_work() waited for the work to finish execution,
4251  * %false if it was already idle.                3210  * %false if it was already idle.
4252  */                                              3211  */
4253 bool flush_delayed_work(struct delayed_work *    3212 bool flush_delayed_work(struct delayed_work *dwork)
4254 {                                                3213 {
4255         local_irq_disable();                     3214         local_irq_disable();
4256         if (del_timer_sync(&dwork->timer))       3215         if (del_timer_sync(&dwork->timer))
4257                 __queue_work(dwork->cpu, dwor    3216                 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
4258         local_irq_enable();                      3217         local_irq_enable();
4259         return flush_work(&dwork->work);         3218         return flush_work(&dwork->work);
4260 }                                                3219 }
4261 EXPORT_SYMBOL(flush_delayed_work);               3220 EXPORT_SYMBOL(flush_delayed_work);
4262                                                  3221 
4263 /**                                              3222 /**
4264  * flush_rcu_work - wait for a rwork to finis    3223  * flush_rcu_work - wait for a rwork to finish executing the last queueing
4265  * @rwork: the rcu work to flush                 3224  * @rwork: the rcu work to flush
4266  *                                               3225  *
4267  * Return:                                       3226  * Return:
4268  * %true if flush_rcu_work() waited for the w    3227  * %true if flush_rcu_work() waited for the work to finish execution,
4269  * %false if it was already idle.                3228  * %false if it was already idle.
4270  */                                              3229  */
4271 bool flush_rcu_work(struct rcu_work *rwork)      3230 bool flush_rcu_work(struct rcu_work *rwork)
4272 {                                                3231 {
4273         if (test_bit(WORK_STRUCT_PENDING_BIT,    3232         if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
4274                 rcu_barrier();                   3233                 rcu_barrier();
4275                 flush_work(&rwork->work);        3234                 flush_work(&rwork->work);
4276                 return true;                     3235                 return true;
4277         } else {                                 3236         } else {
4278                 return flush_work(&rwork->wor    3237                 return flush_work(&rwork->work);
4279         }                                        3238         }
4280 }                                                3239 }
4281 EXPORT_SYMBOL(flush_rcu_work);                   3240 EXPORT_SYMBOL(flush_rcu_work);
4282                                                  3241 
4283 static void work_offqd_disable(struct work_of !! 3242 static bool __cancel_work(struct work_struct *work, bool is_dwork)
4284 {                                             << 
4285         const unsigned long max = (1lu << WOR << 
4286                                               << 
4287         if (likely(offqd->disable < max))     << 
4288                 offqd->disable++;             << 
4289         else                                  << 
4290                 WARN_ONCE(true, "workqueue: w << 
4291 }                                             << 
4292                                               << 
4293 static void work_offqd_enable(struct work_off << 
4294 {                                             << 
4295         if (likely(offqd->disable > 0))       << 
4296                 offqd->disable--;             << 
4297         else                                  << 
4298                 WARN_ONCE(true, "workqueue: w << 
4299 }                                             << 
4300                                               << 
4301 static bool __cancel_work(struct work_struct  << 
4302 {                                                3243 {
4303         struct work_offq_data offqd;          !! 3244         unsigned long flags;
4304         unsigned long irq_flags;              << 
4305         int ret;                                 3245         int ret;
4306                                                  3246 
4307         ret = work_grab_pending(work, cflags, !! 3247         do {
4308                                               !! 3248                 ret = try_to_grab_pending(work, is_dwork, &flags);
4309         work_offqd_unpack(&offqd, *work_data_ !! 3249         } while (unlikely(ret == -EAGAIN));
4310                                               << 
4311         if (cflags & WORK_CANCEL_DISABLE)     << 
4312                 work_offqd_disable(&offqd);   << 
4313                                               << 
4314         set_work_pool_and_clear_pending(work, << 
4315                                         work_ << 
4316         local_irq_restore(irq_flags);         << 
4317         return ret;                           << 
4318 }                                             << 
4319                                               << 
4320 static bool __cancel_work_sync(struct work_st << 
4321 {                                             << 
4322         bool ret;                             << 
4323                                               << 
4324         ret = __cancel_work(work, cflags | WO << 
4325                                               << 
4326         if (*work_data_bits(work) & WORK_OFFQ << 
4327                 WARN_ON_ONCE(in_hardirq());   << 
4328         else                                  << 
4329                 might_sleep();                << 
4330                                               << 
4331         /*                                    << 
4332          * Skip __flush_work() during early b << 
4333          * executing. This allows canceling d << 
4334          */                                   << 
4335         if (wq_online)                        << 
4336                 __flush_work(work, true);     << 
4337                                                  3250 
4338         if (!(cflags & WORK_CANCEL_DISABLE))  !! 3251         if (unlikely(ret < 0))
4339                 enable_work(work);            !! 3252                 return false;
4340                                                  3253 
                                                   >> 3254         set_work_pool_and_clear_pending(work, get_work_pool_id(work));
                                                   >> 3255         local_irq_restore(flags);
4341         return ret;                              3256         return ret;
4342 }                                                3257 }
4343                                                  3258 
4344 /*                                               3259 /*
4345  * See cancel_delayed_work()                     3260  * See cancel_delayed_work()
4346  */                                              3261  */
4347 bool cancel_work(struct work_struct *work)       3262 bool cancel_work(struct work_struct *work)
4348 {                                                3263 {
4349         return __cancel_work(work, 0);        !! 3264         return __cancel_work(work, false);
4350 }                                                3265 }
4351 EXPORT_SYMBOL(cancel_work);                      3266 EXPORT_SYMBOL(cancel_work);
4352                                                  3267 
4353 /**                                              3268 /**
4354  * cancel_work_sync - cancel a work and wait  << 
4355  * @work: the work to cancel                  << 
4356  *                                            << 
4357  * Cancel @work and wait for its execution to << 
4358  * even if the work re-queues itself or migra << 
4359  * from this function, @work is guaranteed to << 
4360  * CPU as long as there aren't racing enqueue << 
4361  *                                            << 
4362  * cancel_work_sync(&delayed_work->work) must << 
4363  * Use cancel_delayed_work_sync() instead.    << 
4364  *                                            << 
4365  * Must be called from a sleepable context if << 
4366  * workqueue. Can also be called from non-har << 
4367  * if @work was last queued on a BH workqueue << 
4368  *                                            << 
4369  * Returns %true if @work was pending, %false << 
4370  */                                           << 
4371 bool cancel_work_sync(struct work_struct *wor << 
4372 {                                             << 
4373         return __cancel_work_sync(work, 0);   << 
4374 }                                             << 
4375 EXPORT_SYMBOL_GPL(cancel_work_sync);          << 
4376                                               << 
4377 /**                                           << 
4378  * cancel_delayed_work - cancel a delayed wor    3269  * cancel_delayed_work - cancel a delayed work
4379  * @dwork: delayed_work to cancel                3270  * @dwork: delayed_work to cancel
4380  *                                               3271  *
4381  * Kill off a pending delayed_work.              3272  * Kill off a pending delayed_work.
4382  *                                               3273  *
4383  * Return: %true if @dwork was pending and ca    3274  * Return: %true if @dwork was pending and canceled; %false if it wasn't
4384  * pending.                                      3275  * pending.
4385  *                                               3276  *
4386  * Note:                                         3277  * Note:
4387  * The work callback function may still be ru    3278  * The work callback function may still be running on return, unless
4388  * it returns %true and the work doesn't re-a    3279  * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
4389  * use cancel_delayed_work_sync() to wait on     3280  * use cancel_delayed_work_sync() to wait on it.
4390  *                                               3281  *
4391  * This function is safe to call from any con    3282  * This function is safe to call from any context including IRQ handler.
4392  */                                              3283  */
4393 bool cancel_delayed_work(struct delayed_work     3284 bool cancel_delayed_work(struct delayed_work *dwork)
4394 {                                                3285 {
4395         return __cancel_work(&dwork->work, WO !! 3286         return __cancel_work(&dwork->work, true);
4396 }                                                3287 }
4397 EXPORT_SYMBOL(cancel_delayed_work);              3288 EXPORT_SYMBOL(cancel_delayed_work);
4398                                                  3289 
4399 /**                                              3290 /**
4400  * cancel_delayed_work_sync - cancel a delaye    3291  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
4401  * @dwork: the delayed work cancel               3292  * @dwork: the delayed work cancel
4402  *                                               3293  *
4403  * This is cancel_work_sync() for delayed wor    3294  * This is cancel_work_sync() for delayed works.
4404  *                                               3295  *
4405  * Return:                                       3296  * Return:
4406  * %true if @dwork was pending, %false otherw    3297  * %true if @dwork was pending, %false otherwise.
4407  */                                              3298  */
4408 bool cancel_delayed_work_sync(struct delayed_    3299 bool cancel_delayed_work_sync(struct delayed_work *dwork)
4409 {                                                3300 {
4410         return __cancel_work_sync(&dwork->wor !! 3301         return __cancel_work_timer(&dwork->work, true);
4411 }                                                3302 }
4412 EXPORT_SYMBOL(cancel_delayed_work_sync);         3303 EXPORT_SYMBOL(cancel_delayed_work_sync);
4413                                                  3304 
4414 /**                                              3305 /**
4415  * disable_work - Disable and cancel a work i << 
4416  * @work: work item to disable                << 
4417  *                                            << 
4418  * Disable @work by incrementing its disable  << 
4419  * pending. As long as the disable count is n << 
4420  * will fail and return %false. The maximum s << 
4421  * power of %WORK_OFFQ_DISABLE_BITS, currentl << 
4422  *                                            << 
4423  * Can be called from any context. Returns %t << 
4424  * otherwise.                                 << 
4425  */                                           << 
4426 bool disable_work(struct work_struct *work)   << 
4427 {                                             << 
4428         return __cancel_work(work, WORK_CANCE << 
4429 }                                             << 
4430 EXPORT_SYMBOL_GPL(disable_work);              << 
4431                                               << 
4432 /**                                           << 
4433  * disable_work_sync - Disable, cancel and dr << 
4434  * @work: work item to disable                << 
4435  *                                            << 
4436  * Similar to disable_work() but also wait fo << 
4437  * executing.                                 << 
4438  *                                            << 
4439  * Must be called from a sleepable context if << 
4440  * workqueue. Can also be called from non-har << 
4441  * if @work was last queued on a BH workqueue << 
4442  *                                            << 
4443  * Returns %true if @work was pending, %false << 
4444  */                                           << 
4445 bool disable_work_sync(struct work_struct *wo << 
4446 {                                             << 
4447         return __cancel_work_sync(work, WORK_ << 
4448 }                                             << 
4449 EXPORT_SYMBOL_GPL(disable_work_sync);         << 
4450                                               << 
4451 /**                                           << 
4452  * enable_work - Enable a work item           << 
4453  * @work: work item to enable                 << 
4454  *                                            << 
4455  * Undo disable_work[_sync]() by decrementing << 
4456  * only be queued if its disable count is 0.  << 
4457  *                                            << 
4458  * Can be called from any context. Returns %t << 
4459  * Otherwise, %false.                         << 
4460  */                                           << 
4461 bool enable_work(struct work_struct *work)    << 
4462 {                                             << 
4463         struct work_offq_data offqd;          << 
4464         unsigned long irq_flags;              << 
4465                                               << 
4466         work_grab_pending(work, 0, &irq_flags << 
4467                                               << 
4468         work_offqd_unpack(&offqd, *work_data_ << 
4469         work_offqd_enable(&offqd);            << 
4470         set_work_pool_and_clear_pending(work, << 
4471                                         work_ << 
4472         local_irq_restore(irq_flags);         << 
4473                                               << 
4474         return !offqd.disable;                << 
4475 }                                             << 
4476 EXPORT_SYMBOL_GPL(enable_work);               << 
4477                                               << 
4478 /**                                           << 
4479  * disable_delayed_work - Disable and cancel  << 
4480  * @dwork: delayed work item to disable       << 
4481  *                                            << 
4482  * disable_work() for delayed work items.     << 
4483  */                                           << 
4484 bool disable_delayed_work(struct delayed_work << 
4485 {                                             << 
4486         return __cancel_work(&dwork->work,    << 
4487                              WORK_CANCEL_DELA << 
4488 }                                             << 
4489 EXPORT_SYMBOL_GPL(disable_delayed_work);      << 
4490                                               << 
4491 /**                                           << 
4492  * disable_delayed_work_sync - Disable, cance << 
4493  * @dwork: delayed work item to disable       << 
4494  *                                            << 
4495  * disable_work_sync() for delayed work items << 
4496  */                                           << 
4497 bool disable_delayed_work_sync(struct delayed << 
4498 {                                             << 
4499         return __cancel_work_sync(&dwork->wor << 
4500                                   WORK_CANCEL << 
4501 }                                             << 
4502 EXPORT_SYMBOL_GPL(disable_delayed_work_sync); << 
4503                                               << 
4504 /**                                           << 
4505  * enable_delayed_work - Enable a delayed wor << 
4506  * @dwork: delayed work item to enable        << 
4507  *                                            << 
4508  * enable_work() for delayed work items.      << 
4509  */                                           << 
4510 bool enable_delayed_work(struct delayed_work  << 
4511 {                                             << 
4512         return enable_work(&dwork->work);     << 
4513 }                                             << 
4514 EXPORT_SYMBOL_GPL(enable_delayed_work);       << 
4515                                               << 
4516 /**                                           << 
4517  * schedule_on_each_cpu - execute a function     3306  * schedule_on_each_cpu - execute a function synchronously on each online CPU
4518  * @func: the function to call                   3307  * @func: the function to call
4519  *                                               3308  *
4520  * schedule_on_each_cpu() executes @func on e    3309  * schedule_on_each_cpu() executes @func on each online CPU using the
4521  * system workqueue and blocks until all CPUs    3310  * system workqueue and blocks until all CPUs have completed.
4522  * schedule_on_each_cpu() is very slow.          3311  * schedule_on_each_cpu() is very slow.
4523  *                                               3312  *
4524  * Return:                                       3313  * Return:
4525  * 0 on success, -errno on failure.              3314  * 0 on success, -errno on failure.
4526  */                                              3315  */
4527 int schedule_on_each_cpu(work_func_t func)       3316 int schedule_on_each_cpu(work_func_t func)
4528 {                                                3317 {
4529         int cpu;                                 3318         int cpu;
4530         struct work_struct __percpu *works;      3319         struct work_struct __percpu *works;
4531                                                  3320 
4532         works = alloc_percpu(struct work_stru    3321         works = alloc_percpu(struct work_struct);
4533         if (!works)                              3322         if (!works)
4534                 return -ENOMEM;                  3323                 return -ENOMEM;
4535                                                  3324 
4536         cpus_read_lock();                        3325         cpus_read_lock();
4537                                                  3326 
4538         for_each_online_cpu(cpu) {               3327         for_each_online_cpu(cpu) {
4539                 struct work_struct *work = pe    3328                 struct work_struct *work = per_cpu_ptr(works, cpu);
4540                                                  3329 
4541                 INIT_WORK(work, func);           3330                 INIT_WORK(work, func);
4542                 schedule_work_on(cpu, work);     3331                 schedule_work_on(cpu, work);
4543         }                                        3332         }
4544                                                  3333 
4545         for_each_online_cpu(cpu)                 3334         for_each_online_cpu(cpu)
4546                 flush_work(per_cpu_ptr(works,    3335                 flush_work(per_cpu_ptr(works, cpu));
4547                                                  3336 
4548         cpus_read_unlock();                      3337         cpus_read_unlock();
4549         free_percpu(works);                      3338         free_percpu(works);
4550         return 0;                                3339         return 0;
4551 }                                                3340 }
4552                                                  3341 
4553 /**                                              3342 /**
4554  * execute_in_process_context - reliably exec    3343  * execute_in_process_context - reliably execute the routine with user context
4555  * @fn:         the function to execute          3344  * @fn:         the function to execute
4556  * @ew:         guaranteed storage for the ex    3345  * @ew:         guaranteed storage for the execute work structure (must
4557  *              be available when the work ex    3346  *              be available when the work executes)
4558  *                                               3347  *
4559  * Executes the function immediately if proce    3348  * Executes the function immediately if process context is available,
4560  * otherwise schedules the function for delay    3349  * otherwise schedules the function for delayed execution.
4561  *                                               3350  *
4562  * Return:      0 - function was executed        3351  * Return:      0 - function was executed
4563  *              1 - function was scheduled fo    3352  *              1 - function was scheduled for execution
4564  */                                              3353  */
4565 int execute_in_process_context(work_func_t fn    3354 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
4566 {                                                3355 {
4567         if (!in_interrupt()) {                   3356         if (!in_interrupt()) {
4568                 fn(&ew->work);                   3357                 fn(&ew->work);
4569                 return 0;                        3358                 return 0;
4570         }                                        3359         }
4571                                                  3360 
4572         INIT_WORK(&ew->work, fn);                3361         INIT_WORK(&ew->work, fn);
4573         schedule_work(&ew->work);                3362         schedule_work(&ew->work);
4574                                                  3363 
4575         return 1;                                3364         return 1;
4576 }                                                3365 }
4577 EXPORT_SYMBOL_GPL(execute_in_process_context)    3366 EXPORT_SYMBOL_GPL(execute_in_process_context);
4578                                                  3367 
4579 /**                                              3368 /**
4580  * free_workqueue_attrs - free a workqueue_at    3369  * free_workqueue_attrs - free a workqueue_attrs
4581  * @attrs: workqueue_attrs to free               3370  * @attrs: workqueue_attrs to free
4582  *                                               3371  *
4583  * Undo alloc_workqueue_attrs().                 3372  * Undo alloc_workqueue_attrs().
4584  */                                              3373  */
4585 void free_workqueue_attrs(struct workqueue_at    3374 void free_workqueue_attrs(struct workqueue_attrs *attrs)
4586 {                                                3375 {
4587         if (attrs) {                             3376         if (attrs) {
4588                 free_cpumask_var(attrs->cpuma    3377                 free_cpumask_var(attrs->cpumask);
4589                 free_cpumask_var(attrs->__pod << 
4590                 kfree(attrs);                    3378                 kfree(attrs);
4591         }                                        3379         }
4592 }                                                3380 }
4593                                                  3381 
4594 /**                                              3382 /**
4595  * alloc_workqueue_attrs - allocate a workque    3383  * alloc_workqueue_attrs - allocate a workqueue_attrs
4596  *                                               3384  *
4597  * Allocate a new workqueue_attrs, initialize    3385  * Allocate a new workqueue_attrs, initialize with default settings and
4598  * return it.                                    3386  * return it.
4599  *                                               3387  *
4600  * Return: The allocated new workqueue_attr o    3388  * Return: The allocated new workqueue_attr on success. %NULL on failure.
4601  */                                              3389  */
4602 struct workqueue_attrs *alloc_workqueue_attrs    3390 struct workqueue_attrs *alloc_workqueue_attrs(void)
4603 {                                                3391 {
4604         struct workqueue_attrs *attrs;           3392         struct workqueue_attrs *attrs;
4605                                                  3393 
4606         attrs = kzalloc(sizeof(*attrs), GFP_K    3394         attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
4607         if (!attrs)                              3395         if (!attrs)
4608                 goto fail;                       3396                 goto fail;
4609         if (!alloc_cpumask_var(&attrs->cpumas    3397         if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
4610                 goto fail;                       3398                 goto fail;
4611         if (!alloc_cpumask_var(&attrs->__pod_ << 
4612                 goto fail;                    << 
4613                                                  3399 
4614         cpumask_copy(attrs->cpumask, cpu_poss    3400         cpumask_copy(attrs->cpumask, cpu_possible_mask);
4615         attrs->affn_scope = WQ_AFFN_DFL;      << 
4616         return attrs;                            3401         return attrs;
4617 fail:                                            3402 fail:
4618         free_workqueue_attrs(attrs);             3403         free_workqueue_attrs(attrs);
4619         return NULL;                             3404         return NULL;
4620 }                                                3405 }
4621                                                  3406 
4622 static void copy_workqueue_attrs(struct workq    3407 static void copy_workqueue_attrs(struct workqueue_attrs *to,
4623                                  const struct    3408                                  const struct workqueue_attrs *from)
4624 {                                                3409 {
4625         to->nice = from->nice;                   3410         to->nice = from->nice;
4626         cpumask_copy(to->cpumask, from->cpuma    3411         cpumask_copy(to->cpumask, from->cpumask);
4627         cpumask_copy(to->__pod_cpumask, from- << 
4628         to->affn_strict = from->affn_strict;  << 
4629                                               << 
4630         /*                                       3412         /*
4631          * Unlike hash and equality test, cop !! 3413          * Unlike hash and equality test, this function doesn't ignore
4632          * fields as copying is used for both !! 3414          * ->no_numa as it is used for both pool and wq attrs.  Instead,
4633          * get_unbound_pool() explicitly clea !! 3415          * get_unbound_pool() explicitly clears ->no_numa after copying.
4634          */                                      3416          */
4635         to->affn_scope = from->affn_scope;    !! 3417         to->no_numa = from->no_numa;
4636         to->ordered = from->ordered;          << 
4637 }                                             << 
4638                                               << 
4639 /*                                            << 
4640  * Some attrs fields are workqueue-only. Clea << 
4641  * comments in 'struct workqueue_attrs' defin << 
4642  */                                           << 
4643 static void wqattrs_clear_for_pool(struct wor << 
4644 {                                             << 
4645         attrs->affn_scope = WQ_AFFN_NR_TYPES; << 
4646         attrs->ordered = false;               << 
4647         if (attrs->affn_strict)               << 
4648                 cpumask_copy(attrs->cpumask,  << 
4649 }                                                3418 }
4650                                                  3419 
4651 /* hash value of the content of @attr */         3420 /* hash value of the content of @attr */
4652 static u32 wqattrs_hash(const struct workqueu    3421 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
4653 {                                                3422 {
4654         u32 hash = 0;                            3423         u32 hash = 0;
4655                                                  3424 
4656         hash = jhash_1word(attrs->nice, hash)    3425         hash = jhash_1word(attrs->nice, hash);
4657         hash = jhash_1word(attrs->affn_strict !! 3426         hash = jhash(cpumask_bits(attrs->cpumask),
4658         hash = jhash(cpumask_bits(attrs->__po << 
4659                      BITS_TO_LONGS(nr_cpumask    3427                      BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
4660         if (!attrs->affn_strict)              << 
4661                 hash = jhash(cpumask_bits(att << 
4662                              BITS_TO_LONGS(nr << 
4663         return hash;                             3428         return hash;
4664 }                                                3429 }
4665                                                  3430 
4666 /* content equality test */                      3431 /* content equality test */
4667 static bool wqattrs_equal(const struct workqu    3432 static bool wqattrs_equal(const struct workqueue_attrs *a,
4668                           const struct workqu    3433                           const struct workqueue_attrs *b)
4669 {                                                3434 {
4670         if (a->nice != b->nice)                  3435         if (a->nice != b->nice)
4671                 return false;                    3436                 return false;
4672         if (a->affn_strict != b->affn_strict) !! 3437         if (!cpumask_equal(a->cpumask, b->cpumask))
4673                 return false;                 << 
4674         if (!cpumask_equal(a->__pod_cpumask,  << 
4675                 return false;                 << 
4676         if (!a->affn_strict && !cpumask_equal << 
4677                 return false;                    3438                 return false;
4678         return true;                             3439         return true;
4679 }                                                3440 }
4680                                                  3441 
4681 /* Update @attrs with actually available CPUs << 
4682 static void wqattrs_actualize_cpumask(struct  << 
4683                                       const c << 
4684 {                                             << 
4685         /*                                    << 
4686          * Calculate the effective CPU mask o << 
4687          * @attrs->cpumask doesn't overlap wi << 
4688          * @unbound_cpumask.                  << 
4689          */                                   << 
4690         cpumask_and(attrs->cpumask, attrs->cp << 
4691         if (unlikely(cpumask_empty(attrs->cpu << 
4692                 cpumask_copy(attrs->cpumask,  << 
4693 }                                             << 
4694                                               << 
4695 /* find wq_pod_type to use for @attrs */      << 
4696 static const struct wq_pod_type *             << 
4697 wqattrs_pod_type(const struct workqueue_attrs << 
4698 {                                             << 
4699         enum wq_affn_scope scope;             << 
4700         struct wq_pod_type *pt;               << 
4701                                               << 
4702         /* to synchronize access to wq_affn_d << 
4703         lockdep_assert_held(&wq_pool_mutex);  << 
4704                                               << 
4705         if (attrs->affn_scope == WQ_AFFN_DFL) << 
4706                 scope = wq_affn_dfl;          << 
4707         else                                  << 
4708                 scope = attrs->affn_scope;    << 
4709                                               << 
4710         pt = &wq_pod_types[scope];            << 
4711                                               << 
4712         if (!WARN_ON_ONCE(attrs->affn_scope = << 
4713             likely(pt->nr_pods))              << 
4714                 return pt;                    << 
4715                                               << 
4716         /*                                    << 
4717          * Before workqueue_init_topology(),  << 
4718          * initialized in workqueue_init_earl << 
4719          */                                   << 
4720         pt = &wq_pod_types[WQ_AFFN_SYSTEM];   << 
4721         BUG_ON(!pt->nr_pods);                 << 
4722         return pt;                            << 
4723 }                                             << 
4724                                               << 
4725 /**                                              3442 /**
4726  * init_worker_pool - initialize a newly zall    3443  * init_worker_pool - initialize a newly zalloc'd worker_pool
4727  * @pool: worker_pool to initialize              3444  * @pool: worker_pool to initialize
4728  *                                               3445  *
4729  * Initialize a newly zalloc'd @pool.  It als    3446  * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
4730  *                                               3447  *
4731  * Return: 0 on success, -errno on failure.      3448  * Return: 0 on success, -errno on failure.  Even on failure, all fields
4732  * inside @pool proper are initialized and pu    3449  * inside @pool proper are initialized and put_unbound_pool() can be called
4733  * on @pool safely to release it.                3450  * on @pool safely to release it.
4734  */                                              3451  */
4735 static int init_worker_pool(struct worker_poo    3452 static int init_worker_pool(struct worker_pool *pool)
4736 {                                                3453 {
4737         raw_spin_lock_init(&pool->lock);         3454         raw_spin_lock_init(&pool->lock);
4738         pool->id = -1;                           3455         pool->id = -1;
4739         pool->cpu = -1;                          3456         pool->cpu = -1;
4740         pool->node = NUMA_NO_NODE;               3457         pool->node = NUMA_NO_NODE;
4741         pool->flags |= POOL_DISASSOCIATED;       3458         pool->flags |= POOL_DISASSOCIATED;
4742         pool->watchdog_ts = jiffies;             3459         pool->watchdog_ts = jiffies;
4743         INIT_LIST_HEAD(&pool->worklist);         3460         INIT_LIST_HEAD(&pool->worklist);
4744         INIT_LIST_HEAD(&pool->idle_list);        3461         INIT_LIST_HEAD(&pool->idle_list);
4745         hash_init(pool->busy_hash);              3462         hash_init(pool->busy_hash);
4746                                                  3463 
4747         timer_setup(&pool->idle_timer, idle_w    3464         timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
4748         INIT_WORK(&pool->idle_cull_work, idle << 
4749                                                  3465 
4750         timer_setup(&pool->mayday_timer, pool    3466         timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
4751                                                  3467 
4752         INIT_LIST_HEAD(&pool->workers);          3468         INIT_LIST_HEAD(&pool->workers);
4753                                                  3469 
4754         ida_init(&pool->worker_ida);             3470         ida_init(&pool->worker_ida);
4755         INIT_HLIST_NODE(&pool->hash_node);       3471         INIT_HLIST_NODE(&pool->hash_node);
4756         pool->refcnt = 1;                        3472         pool->refcnt = 1;
4757                                                  3473 
4758         /* shouldn't fail above this point */    3474         /* shouldn't fail above this point */
4759         pool->attrs = alloc_workqueue_attrs()    3475         pool->attrs = alloc_workqueue_attrs();
4760         if (!pool->attrs)                        3476         if (!pool->attrs)
4761                 return -ENOMEM;                  3477                 return -ENOMEM;
4762                                               << 
4763         wqattrs_clear_for_pool(pool->attrs);  << 
4764                                               << 
4765         return 0;                                3478         return 0;
4766 }                                                3479 }
4767                                                  3480 
4768 #ifdef CONFIG_LOCKDEP                            3481 #ifdef CONFIG_LOCKDEP
4769 static void wq_init_lockdep(struct workqueue_    3482 static void wq_init_lockdep(struct workqueue_struct *wq)
4770 {                                                3483 {
4771         char *lock_name;                         3484         char *lock_name;
4772                                                  3485 
4773         lockdep_register_key(&wq->key);          3486         lockdep_register_key(&wq->key);
4774         lock_name = kasprintf(GFP_KERNEL, "%s    3487         lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
4775         if (!lock_name)                          3488         if (!lock_name)
4776                 lock_name = wq->name;            3489                 lock_name = wq->name;
4777                                                  3490 
4778         wq->lock_name = lock_name;               3491         wq->lock_name = lock_name;
4779         lockdep_init_map(&wq->lockdep_map, lo    3492         lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
4780 }                                                3493 }
4781                                                  3494 
4782 static void wq_unregister_lockdep(struct work    3495 static void wq_unregister_lockdep(struct workqueue_struct *wq)
4783 {                                                3496 {
4784         lockdep_unregister_key(&wq->key);        3497         lockdep_unregister_key(&wq->key);
4785 }                                                3498 }
4786                                                  3499 
4787 static void wq_free_lockdep(struct workqueue_    3500 static void wq_free_lockdep(struct workqueue_struct *wq)
4788 {                                                3501 {
4789         if (wq->lock_name != wq->name)           3502         if (wq->lock_name != wq->name)
4790                 kfree(wq->lock_name);            3503                 kfree(wq->lock_name);
4791 }                                                3504 }
4792 #else                                            3505 #else
4793 static void wq_init_lockdep(struct workqueue_    3506 static void wq_init_lockdep(struct workqueue_struct *wq)
4794 {                                                3507 {
4795 }                                                3508 }
4796                                                  3509 
4797 static void wq_unregister_lockdep(struct work    3510 static void wq_unregister_lockdep(struct workqueue_struct *wq)
4798 {                                                3511 {
4799 }                                                3512 }
4800                                                  3513 
4801 static void wq_free_lockdep(struct workqueue_    3514 static void wq_free_lockdep(struct workqueue_struct *wq)
4802 {                                                3515 {
4803 }                                                3516 }
4804 #endif                                           3517 #endif
4805                                                  3518 
4806 static void free_node_nr_active(struct wq_nod << 
4807 {                                             << 
4808         int node;                             << 
4809                                               << 
4810         for_each_node(node) {                 << 
4811                 kfree(nna_ar[node]);          << 
4812                 nna_ar[node] = NULL;          << 
4813         }                                     << 
4814                                               << 
4815         kfree(nna_ar[nr_node_ids]);           << 
4816         nna_ar[nr_node_ids] = NULL;           << 
4817 }                                             << 
4818                                               << 
4819 static void init_node_nr_active(struct wq_nod << 
4820 {                                             << 
4821         nna->max = WQ_DFL_MIN_ACTIVE;         << 
4822         atomic_set(&nna->nr, 0);              << 
4823         raw_spin_lock_init(&nna->lock);       << 
4824         INIT_LIST_HEAD(&nna->pending_pwqs);   << 
4825 }                                             << 
4826                                               << 
4827 /*                                            << 
4828  * Each node's nr_active counter will be acce << 
4829  * should be allocated in the node.           << 
4830  */                                           << 
4831 static int alloc_node_nr_active(struct wq_nod << 
4832 {                                             << 
4833         struct wq_node_nr_active *nna;        << 
4834         int node;                             << 
4835                                               << 
4836         for_each_node(node) {                 << 
4837                 nna = kzalloc_node(sizeof(*nn << 
4838                 if (!nna)                     << 
4839                         goto err_free;        << 
4840                 init_node_nr_active(nna);     << 
4841                 nna_ar[node] = nna;           << 
4842         }                                     << 
4843                                               << 
4844         /* [nr_node_ids] is used as the fallb << 
4845         nna = kzalloc_node(sizeof(*nna), GFP_ << 
4846         if (!nna)                             << 
4847                 goto err_free;                << 
4848         init_node_nr_active(nna);             << 
4849         nna_ar[nr_node_ids] = nna;            << 
4850                                               << 
4851         return 0;                             << 
4852                                               << 
4853 err_free:                                     << 
4854         free_node_nr_active(nna_ar);          << 
4855         return -ENOMEM;                       << 
4856 }                                             << 
4857                                               << 
4858 static void rcu_free_wq(struct rcu_head *rcu)    3519 static void rcu_free_wq(struct rcu_head *rcu)
4859 {                                                3520 {
4860         struct workqueue_struct *wq =            3521         struct workqueue_struct *wq =
4861                 container_of(rcu, struct work    3522                 container_of(rcu, struct workqueue_struct, rcu);
4862                                                  3523 
4863         if (wq->flags & WQ_UNBOUND)           << 
4864                 free_node_nr_active(wq->node_ << 
4865                                               << 
4866         wq_free_lockdep(wq);                     3524         wq_free_lockdep(wq);
4867         free_percpu(wq->cpu_pwq);             !! 3525 
4868         free_workqueue_attrs(wq->unbound_attr !! 3526         if (!(wq->flags & WQ_UNBOUND))
                                                   >> 3527                 free_percpu(wq->cpu_pwqs);
                                                   >> 3528         else
                                                   >> 3529                 free_workqueue_attrs(wq->unbound_attrs);
                                                   >> 3530 
4869         kfree(wq);                               3531         kfree(wq);
4870 }                                                3532 }
4871                                                  3533 
4872 static void rcu_free_pool(struct rcu_head *rc    3534 static void rcu_free_pool(struct rcu_head *rcu)
4873 {                                                3535 {
4874         struct worker_pool *pool = container_    3536         struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
4875                                                  3537 
4876         ida_destroy(&pool->worker_ida);          3538         ida_destroy(&pool->worker_ida);
4877         free_workqueue_attrs(pool->attrs);       3539         free_workqueue_attrs(pool->attrs);
4878         kfree(pool);                             3540         kfree(pool);
4879 }                                                3541 }
4880                                                  3542 
                                                   >> 3543 /* This returns with the lock held on success (pool manager is inactive). */
                                                   >> 3544 static bool wq_manager_inactive(struct worker_pool *pool)
                                                   >> 3545 {
                                                   >> 3546         raw_spin_lock_irq(&pool->lock);
                                                   >> 3547 
                                                   >> 3548         if (pool->flags & POOL_MANAGER_ACTIVE) {
                                                   >> 3549                 raw_spin_unlock_irq(&pool->lock);
                                                   >> 3550                 return false;
                                                   >> 3551         }
                                                   >> 3552         return true;
                                                   >> 3553 }
                                                   >> 3554 
4881 /**                                              3555 /**
4882  * put_unbound_pool - put a worker_pool          3556  * put_unbound_pool - put a worker_pool
4883  * @pool: worker_pool to put                     3557  * @pool: worker_pool to put
4884  *                                               3558  *
4885  * Put @pool.  If its refcnt reaches zero, it    3559  * Put @pool.  If its refcnt reaches zero, it gets destroyed in RCU
4886  * safe manner.  get_unbound_pool() calls thi    3560  * safe manner.  get_unbound_pool() calls this function on its failure path
4887  * and this function should be able to releas    3561  * and this function should be able to release pools which went through,
4888  * successfully or not, init_worker_pool().      3562  * successfully or not, init_worker_pool().
4889  *                                               3563  *
4890  * Should be called with wq_pool_mutex held.     3564  * Should be called with wq_pool_mutex held.
4891  */                                              3565  */
4892 static void put_unbound_pool(struct worker_po    3566 static void put_unbound_pool(struct worker_pool *pool)
4893 {                                                3567 {
                                                   >> 3568         DECLARE_COMPLETION_ONSTACK(detach_completion);
4894         struct worker *worker;                   3569         struct worker *worker;
4895         LIST_HEAD(cull_list);                 << 
4896                                                  3570 
4897         lockdep_assert_held(&wq_pool_mutex);     3571         lockdep_assert_held(&wq_pool_mutex);
4898                                                  3572 
4899         if (--pool->refcnt)                      3573         if (--pool->refcnt)
4900                 return;                          3574                 return;
4901                                                  3575 
4902         /* sanity checks */                      3576         /* sanity checks */
4903         if (WARN_ON(!(pool->cpu < 0)) ||         3577         if (WARN_ON(!(pool->cpu < 0)) ||
4904             WARN_ON(!list_empty(&pool->workli    3578             WARN_ON(!list_empty(&pool->worklist)))
4905                 return;                          3579                 return;
4906                                                  3580 
4907         /* release id and unhash */              3581         /* release id and unhash */
4908         if (pool->id >= 0)                       3582         if (pool->id >= 0)
4909                 idr_remove(&worker_pool_idr,     3583                 idr_remove(&worker_pool_idr, pool->id);
4910         hash_del(&pool->hash_node);              3584         hash_del(&pool->hash_node);
4911                                                  3585 
4912         /*                                       3586         /*
4913          * Become the manager and destroy all    3587          * Become the manager and destroy all workers.  This prevents
4914          * @pool's workers from blocking on a    3588          * @pool's workers from blocking on attach_mutex.  We're the last
4915          * manager and @pool gets freed with     3589          * manager and @pool gets freed with the flag set.
4916          *                                    !! 3590          * Because of how wq_manager_inactive() works, we will hold the
4917          * Having a concurrent manager is qui !! 3591          * spinlock after a successful wait.
4918          * only get here with                 << 
4919          *   pwq->refcnt == pool->refcnt == 0 << 
4920          * which implies no work queued to th << 
4921          * become the manager. However a work << 
4922          * manager before the refcnts dropped << 
4923          * drops pool->lock                   << 
4924          */                                      3592          */
4925         while (true) {                        !! 3593         rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool),
4926                 rcuwait_wait_event(&manager_w !! 3594                            TASK_UNINTERRUPTIBLE);
4927                                    !(pool->fl !! 3595         pool->flags |= POOL_MANAGER_ACTIVE;
4928                                    TASK_UNINT << 
4929                                               << 
4930                 mutex_lock(&wq_pool_attach_mu << 
4931                 raw_spin_lock_irq(&pool->lock << 
4932                 if (!(pool->flags & POOL_MANA << 
4933                         pool->flags |= POOL_M << 
4934                         break;                << 
4935                 }                             << 
4936                 raw_spin_unlock_irq(&pool->lo << 
4937                 mutex_unlock(&wq_pool_attach_ << 
4938         }                                     << 
4939                                                  3596 
4940         while ((worker = first_idle_worker(po    3597         while ((worker = first_idle_worker(pool)))
4941                 set_worker_dying(worker, &cul !! 3598                 destroy_worker(worker);
4942         WARN_ON(pool->nr_workers || pool->nr_    3599         WARN_ON(pool->nr_workers || pool->nr_idle);
4943         raw_spin_unlock_irq(&pool->lock);        3600         raw_spin_unlock_irq(&pool->lock);
4944                                                  3601 
4945         detach_dying_workers(&cull_list);     !! 3602         mutex_lock(&wq_pool_attach_mutex);
4946                                               !! 3603         if (!list_empty(&pool->workers))
                                                   >> 3604                 pool->detach_completion = &detach_completion;
4947         mutex_unlock(&wq_pool_attach_mutex);     3605         mutex_unlock(&wq_pool_attach_mutex);
4948                                                  3606 
4949         reap_dying_workers(&cull_list);       !! 3607         if (pool->detach_completion)
                                                   >> 3608                 wait_for_completion(pool->detach_completion);
4950                                                  3609 
4951         /* shut down the timers */               3610         /* shut down the timers */
4952         del_timer_sync(&pool->idle_timer);       3611         del_timer_sync(&pool->idle_timer);
4953         cancel_work_sync(&pool->idle_cull_wor << 
4954         del_timer_sync(&pool->mayday_timer);     3612         del_timer_sync(&pool->mayday_timer);
4955                                                  3613 
4956         /* RCU protected to allow dereference    3614         /* RCU protected to allow dereferences from get_work_pool() */
4957         call_rcu(&pool->rcu, rcu_free_pool);     3615         call_rcu(&pool->rcu, rcu_free_pool);
4958 }                                                3616 }
4959                                                  3617 
4960 /**                                              3618 /**
4961  * get_unbound_pool - get a worker_pool with     3619  * get_unbound_pool - get a worker_pool with the specified attributes
4962  * @attrs: the attributes of the worker_pool     3620  * @attrs: the attributes of the worker_pool to get
4963  *                                               3621  *
4964  * Obtain a worker_pool which has the same at    3622  * Obtain a worker_pool which has the same attributes as @attrs, bump the
4965  * reference count and return it.  If there a    3623  * reference count and return it.  If there already is a matching
4966  * worker_pool, it will be used; otherwise, t    3624  * worker_pool, it will be used; otherwise, this function attempts to
4967  * create a new one.                             3625  * create a new one.
4968  *                                               3626  *
4969  * Should be called with wq_pool_mutex held.     3627  * Should be called with wq_pool_mutex held.
4970  *                                               3628  *
4971  * Return: On success, a worker_pool with the    3629  * Return: On success, a worker_pool with the same attributes as @attrs.
4972  * On failure, %NULL.                            3630  * On failure, %NULL.
4973  */                                              3631  */
4974 static struct worker_pool *get_unbound_pool(c    3632 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
4975 {                                                3633 {
4976         struct wq_pod_type *pt = &wq_pod_type << 
4977         u32 hash = wqattrs_hash(attrs);          3634         u32 hash = wqattrs_hash(attrs);
4978         struct worker_pool *pool;                3635         struct worker_pool *pool;
4979         int pod, node = NUMA_NO_NODE;         !! 3636         int node;
                                                   >> 3637         int target_node = NUMA_NO_NODE;
4980                                                  3638 
4981         lockdep_assert_held(&wq_pool_mutex);     3639         lockdep_assert_held(&wq_pool_mutex);
4982                                                  3640 
4983         /* do we already have a matching pool    3641         /* do we already have a matching pool? */
4984         hash_for_each_possible(unbound_pool_h    3642         hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
4985                 if (wqattrs_equal(pool->attrs    3643                 if (wqattrs_equal(pool->attrs, attrs)) {
4986                         pool->refcnt++;          3644                         pool->refcnt++;
4987                         return pool;             3645                         return pool;
4988                 }                                3646                 }
4989         }                                        3647         }
4990                                                  3648 
4991         /* If __pod_cpumask is contained insi !! 3649         /* if cpumask is contained inside a NUMA node, we belong to that node */
4992         for (pod = 0; pod < pt->nr_pods; pod+ !! 3650         if (wq_numa_enabled) {
4993                 if (cpumask_subset(attrs->__p !! 3651                 for_each_node(node) {
4994                         node = pt->pod_node[p !! 3652                         if (cpumask_subset(attrs->cpumask,
4995                         break;                !! 3653                                            wq_numa_possible_cpumask[node])) {
                                                   >> 3654                                 target_node = node;
                                                   >> 3655                                 break;
                                                   >> 3656                         }
4996                 }                                3657                 }
4997         }                                        3658         }
4998                                                  3659 
4999         /* nope, create a new one */             3660         /* nope, create a new one */
5000         pool = kzalloc_node(sizeof(*pool), GF !! 3661         pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
5001         if (!pool || init_worker_pool(pool) <    3662         if (!pool || init_worker_pool(pool) < 0)
5002                 goto fail;                       3663                 goto fail;
5003                                                  3664 
5004         pool->node = node;                    !! 3665         lockdep_set_subclass(&pool->lock, 1);   /* see put_pwq() */
5005         copy_workqueue_attrs(pool->attrs, att    3666         copy_workqueue_attrs(pool->attrs, attrs);
5006         wqattrs_clear_for_pool(pool->attrs);  !! 3667         pool->node = target_node;
                                                   >> 3668 
                                                   >> 3669         /*
                                                   >> 3670          * no_numa isn't a worker_pool attribute, always clear it.  See
                                                   >> 3671          * 'struct workqueue_attrs' comments for detail.
                                                   >> 3672          */
                                                   >> 3673         pool->attrs->no_numa = false;
5007                                                  3674 
5008         if (worker_pool_assign_id(pool) < 0)     3675         if (worker_pool_assign_id(pool) < 0)
5009                 goto fail;                       3676                 goto fail;
5010                                                  3677 
5011         /* create and start the initial worke    3678         /* create and start the initial worker */
5012         if (wq_online && !create_worker(pool)    3679         if (wq_online && !create_worker(pool))
5013                 goto fail;                       3680                 goto fail;
5014                                                  3681 
5015         /* install */                            3682         /* install */
5016         hash_add(unbound_pool_hash, &pool->ha    3683         hash_add(unbound_pool_hash, &pool->hash_node, hash);
5017                                                  3684 
5018         return pool;                             3685         return pool;
5019 fail:                                            3686 fail:
5020         if (pool)                                3687         if (pool)
5021                 put_unbound_pool(pool);          3688                 put_unbound_pool(pool);
5022         return NULL;                             3689         return NULL;
5023 }                                                3690 }
5024                                                  3691 
                                                   >> 3692 static void rcu_free_pwq(struct rcu_head *rcu)
                                                   >> 3693 {
                                                   >> 3694         kmem_cache_free(pwq_cache,
                                                   >> 3695                         container_of(rcu, struct pool_workqueue, rcu));
                                                   >> 3696 }
                                                   >> 3697 
5025 /*                                               3698 /*
5026  * Scheduled on pwq_release_worker by put_pwq !! 3699  * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
5027  * refcnt and needs to be destroyed.          !! 3700  * and needs to be destroyed.
5028  */                                              3701  */
5029 static void pwq_release_workfn(struct kthread !! 3702 static void pwq_unbound_release_workfn(struct work_struct *work)
5030 {                                                3703 {
5031         struct pool_workqueue *pwq = containe    3704         struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
5032                                               !! 3705                                                   unbound_release_work);
5033         struct workqueue_struct *wq = pwq->wq    3706         struct workqueue_struct *wq = pwq->wq;
5034         struct worker_pool *pool = pwq->pool;    3707         struct worker_pool *pool = pwq->pool;
5035         bool is_last = false;                    3708         bool is_last = false;
5036                                                  3709 
5037         /*                                       3710         /*
5038          * When @pwq is not linked, it doesn' !! 3711          * when @pwq is not linked, it doesn't hold any reference to the
5039          * @wq, and @wq is invalid to access.    3712          * @wq, and @wq is invalid to access.
5040          */                                      3713          */
5041         if (!list_empty(&pwq->pwqs_node)) {      3714         if (!list_empty(&pwq->pwqs_node)) {
                                                   >> 3715                 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
                                                   >> 3716                         return;
                                                   >> 3717 
5042                 mutex_lock(&wq->mutex);          3718                 mutex_lock(&wq->mutex);
5043                 list_del_rcu(&pwq->pwqs_node)    3719                 list_del_rcu(&pwq->pwqs_node);
5044                 is_last = list_empty(&wq->pwq    3720                 is_last = list_empty(&wq->pwqs);
5045                                               << 
5046                 /*                            << 
5047                  * For ordered workqueue with << 
5048                  */                           << 
5049                 if (!is_last && (wq->flags &  << 
5050                         unplug_oldest_pwq(wq) << 
5051                                               << 
5052                 mutex_unlock(&wq->mutex);        3721                 mutex_unlock(&wq->mutex);
5053         }                                        3722         }
5054                                                  3723 
5055         if (wq->flags & WQ_UNBOUND) {         !! 3724         mutex_lock(&wq_pool_mutex);
5056                 mutex_lock(&wq_pool_mutex);   !! 3725         put_unbound_pool(pool);
5057                 put_unbound_pool(pool);       !! 3726         mutex_unlock(&wq_pool_mutex);
5058                 mutex_unlock(&wq_pool_mutex); << 
5059         }                                     << 
5060                                               << 
5061         if (!list_empty(&pwq->pending_node))  << 
5062                 struct wq_node_nr_active *nna << 
5063                         wq_node_nr_active(pwq << 
5064                                               << 
5065                 raw_spin_lock_irq(&nna->lock) << 
5066                 list_del_init(&pwq->pending_n << 
5067                 raw_spin_unlock_irq(&nna->loc << 
5068         }                                     << 
5069                                                  3727 
5070         kfree_rcu(pwq, rcu);                  !! 3728         call_rcu(&pwq->rcu, rcu_free_pwq);
5071                                                  3729 
5072         /*                                       3730         /*
5073          * If we're the last pwq going away,     3731          * If we're the last pwq going away, @wq is already dead and no one
5074          * is gonna access it anymore.  Sched    3732          * is gonna access it anymore.  Schedule RCU free.
5075          */                                      3733          */
5076         if (is_last) {                           3734         if (is_last) {
5077                 wq_unregister_lockdep(wq);       3735                 wq_unregister_lockdep(wq);
5078                 call_rcu(&wq->rcu, rcu_free_w    3736                 call_rcu(&wq->rcu, rcu_free_wq);
5079         }                                        3737         }
5080 }                                                3738 }
5081                                                  3739 
                                                   >> 3740 /**
                                                   >> 3741  * pwq_adjust_max_active - update a pwq's max_active to the current setting
                                                   >> 3742  * @pwq: target pool_workqueue
                                                   >> 3743  *
                                                   >> 3744  * If @pwq isn't freezing, set @pwq->max_active to the associated
                                                   >> 3745  * workqueue's saved_max_active and activate inactive work items
                                                   >> 3746  * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
                                                   >> 3747  */
                                                   >> 3748 static void pwq_adjust_max_active(struct pool_workqueue *pwq)
                                                   >> 3749 {
                                                   >> 3750         struct workqueue_struct *wq = pwq->wq;
                                                   >> 3751         bool freezable = wq->flags & WQ_FREEZABLE;
                                                   >> 3752         unsigned long flags;
                                                   >> 3753 
                                                   >> 3754         /* for @wq->saved_max_active */
                                                   >> 3755         lockdep_assert_held(&wq->mutex);
                                                   >> 3756 
                                                   >> 3757         /* fast exit for non-freezable wqs */
                                                   >> 3758         if (!freezable && pwq->max_active == wq->saved_max_active)
                                                   >> 3759                 return;
                                                   >> 3760 
                                                   >> 3761         /* this function can be called during early boot w/ irq disabled */
                                                   >> 3762         raw_spin_lock_irqsave(&pwq->pool->lock, flags);
                                                   >> 3763 
                                                   >> 3764         /*
                                                   >> 3765          * During [un]freezing, the caller is responsible for ensuring that
                                                   >> 3766          * this function is called at least once after @workqueue_freezing
                                                   >> 3767          * is updated and visible.
                                                   >> 3768          */
                                                   >> 3769         if (!freezable || !workqueue_freezing) {
                                                   >> 3770                 bool kick = false;
                                                   >> 3771 
                                                   >> 3772                 pwq->max_active = wq->saved_max_active;
                                                   >> 3773 
                                                   >> 3774                 while (!list_empty(&pwq->inactive_works) &&
                                                   >> 3775                        pwq->nr_active < pwq->max_active) {
                                                   >> 3776                         pwq_activate_first_inactive(pwq);
                                                   >> 3777                         kick = true;
                                                   >> 3778                 }
                                                   >> 3779 
                                                   >> 3780                 /*
                                                   >> 3781                  * Need to kick a worker after thawed or an unbound wq's
                                                   >> 3782                  * max_active is bumped. In realtime scenarios, always kicking a
                                                   >> 3783                  * worker will cause interference on the isolated cpu cores, so
                                                   >> 3784                  * let's kick iff work items were activated.
                                                   >> 3785                  */
                                                   >> 3786                 if (kick)
                                                   >> 3787                         wake_up_worker(pwq->pool);
                                                   >> 3788         } else {
                                                   >> 3789                 pwq->max_active = 0;
                                                   >> 3790         }
                                                   >> 3791 
                                                   >> 3792         raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
                                                   >> 3793 }
                                                   >> 3794 
5082 /* initialize newly allocated @pwq which is a    3795 /* initialize newly allocated @pwq which is associated with @wq and @pool */
5083 static void init_pwq(struct pool_workqueue *p    3796 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
5084                      struct worker_pool *pool    3797                      struct worker_pool *pool)
5085 {                                                3798 {
5086         BUG_ON((unsigned long)pwq & ~WORK_STR !! 3799         BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
5087                                                  3800 
5088         memset(pwq, 0, sizeof(*pwq));            3801         memset(pwq, 0, sizeof(*pwq));
5089                                                  3802 
5090         pwq->pool = pool;                        3803         pwq->pool = pool;
5091         pwq->wq = wq;                            3804         pwq->wq = wq;
5092         pwq->flush_color = -1;                   3805         pwq->flush_color = -1;
5093         pwq->refcnt = 1;                         3806         pwq->refcnt = 1;
5094         INIT_LIST_HEAD(&pwq->inactive_works);    3807         INIT_LIST_HEAD(&pwq->inactive_works);
5095         INIT_LIST_HEAD(&pwq->pending_node);   << 
5096         INIT_LIST_HEAD(&pwq->pwqs_node);         3808         INIT_LIST_HEAD(&pwq->pwqs_node);
5097         INIT_LIST_HEAD(&pwq->mayday_node);       3809         INIT_LIST_HEAD(&pwq->mayday_node);
5098         kthread_init_work(&pwq->release_work, !! 3810         INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
5099 }                                                3811 }
5100                                                  3812 
5101 /* sync @pwq with the current state of its as    3813 /* sync @pwq with the current state of its associated wq and link it */
5102 static void link_pwq(struct pool_workqueue *p    3814 static void link_pwq(struct pool_workqueue *pwq)
5103 {                                                3815 {
5104         struct workqueue_struct *wq = pwq->wq    3816         struct workqueue_struct *wq = pwq->wq;
5105                                                  3817 
5106         lockdep_assert_held(&wq->mutex);         3818         lockdep_assert_held(&wq->mutex);
5107                                                  3819 
5108         /* may be called multiple times, igno    3820         /* may be called multiple times, ignore if already linked */
5109         if (!list_empty(&pwq->pwqs_node))        3821         if (!list_empty(&pwq->pwqs_node))
5110                 return;                          3822                 return;
5111                                                  3823 
5112         /* set the matching work_color */        3824         /* set the matching work_color */
5113         pwq->work_color = wq->work_color;        3825         pwq->work_color = wq->work_color;
5114                                                  3826 
                                                   >> 3827         /* sync max_active to the current setting */
                                                   >> 3828         pwq_adjust_max_active(pwq);
                                                   >> 3829 
5115         /* link in @pwq */                       3830         /* link in @pwq */
5116         list_add_tail_rcu(&pwq->pwqs_node, &w !! 3831         list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
5117 }                                                3832 }
5118                                                  3833 
5119 /* obtain a pool matching @attr and create a     3834 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
5120 static struct pool_workqueue *alloc_unbound_p    3835 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
5121                                         const    3836                                         const struct workqueue_attrs *attrs)
5122 {                                                3837 {
5123         struct worker_pool *pool;                3838         struct worker_pool *pool;
5124         struct pool_workqueue *pwq;              3839         struct pool_workqueue *pwq;
5125                                                  3840 
5126         lockdep_assert_held(&wq_pool_mutex);     3841         lockdep_assert_held(&wq_pool_mutex);
5127                                                  3842 
5128         pool = get_unbound_pool(attrs);          3843         pool = get_unbound_pool(attrs);
5129         if (!pool)                               3844         if (!pool)
5130                 return NULL;                     3845                 return NULL;
5131                                                  3846 
5132         pwq = kmem_cache_alloc_node(pwq_cache    3847         pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
5133         if (!pwq) {                              3848         if (!pwq) {
5134                 put_unbound_pool(pool);          3849                 put_unbound_pool(pool);
5135                 return NULL;                     3850                 return NULL;
5136         }                                        3851         }
5137                                                  3852 
5138         init_pwq(pwq, wq, pool);                 3853         init_pwq(pwq, wq, pool);
5139         return pwq;                              3854         return pwq;
5140 }                                                3855 }
5141                                                  3856 
5142 static void apply_wqattrs_lock(void)          << 
5143 {                                             << 
5144         mutex_lock(&wq_pool_mutex);           << 
5145 }                                             << 
5146                                               << 
5147 static void apply_wqattrs_unlock(void)        << 
5148 {                                             << 
5149         mutex_unlock(&wq_pool_mutex);         << 
5150 }                                             << 
5151                                               << 
5152 /**                                              3857 /**
5153  * wq_calc_pod_cpumask - calculate a wq_attrs !! 3858  * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
5154  * @attrs: the wq_attrs of the default pwq of    3859  * @attrs: the wq_attrs of the default pwq of the target workqueue
5155  * @cpu: the target CPU                       !! 3860  * @node: the target NUMA node
                                                   >> 3861  * @cpu_going_down: if >= 0, the CPU to consider as offline
                                                   >> 3862  * @cpumask: outarg, the resulting cpumask
                                                   >> 3863  *
                                                   >> 3864  * Calculate the cpumask a workqueue with @attrs should use on @node.  If
                                                   >> 3865  * @cpu_going_down is >= 0, that cpu is considered offline during
                                                   >> 3866  * calculation.  The result is stored in @cpumask.
5156  *                                               3867  *
5157  * Calculate the cpumask a workqueue with @at !! 3868  * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
5158  * The result is stored in @attrs->__pod_cpum !! 3869  * enabled and @node has online CPUs requested by @attrs, the returned
                                                   >> 3870  * cpumask is the intersection of the possible CPUs of @node and
                                                   >> 3871  * @attrs->cpumask.
5159  *                                               3872  *
5160  * If pod affinity is not enabled, @attrs->cp !! 3873  * The caller is responsible for ensuring that the cpumask of @node stays
5161  * and @pod has online CPUs requested by @att !! 3874  * stable.
5162  * intersection of the possible CPUs of @pod  << 
5163  *                                               3875  *
5164  * The caller is responsible for ensuring tha !! 3876  * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
                                                   >> 3877  * %false if equal.
5165  */                                              3878  */
5166 static void wq_calc_pod_cpumask(struct workqu !! 3879 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
                                                   >> 3880                                  int cpu_going_down, cpumask_t *cpumask)
5167 {                                                3881 {
5168         const struct wq_pod_type *pt = wqattr !! 3882         if (!wq_numa_enabled || attrs->no_numa)
5169         int pod = pt->cpu_pod[cpu];           !! 3883                 goto use_dfl;
5170                                                  3884 
5171         /* calculate possible CPUs in @pod th !! 3885         /* does @node have any online CPUs @attrs wants? */
5172         cpumask_and(attrs->__pod_cpumask, pt- !! 3886         cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
5173         /* does @pod have any online CPUs @at !! 3887         if (cpu_going_down >= 0)
5174         if (!cpumask_intersects(attrs->__pod_ !! 3888                 cpumask_clear_cpu(cpu_going_down, cpumask);
5175                 cpumask_copy(attrs->__pod_cpu !! 3889 
5176                 return;                       !! 3890         if (cpumask_empty(cpumask))
                                                   >> 3891                 goto use_dfl;
                                                   >> 3892 
                                                   >> 3893         /* yeap, return possible CPUs in @node that @attrs wants */
                                                   >> 3894         cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
                                                   >> 3895 
                                                   >> 3896         if (cpumask_empty(cpumask)) {
                                                   >> 3897                 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
                                                   >> 3898                                 "possible intersect\n");
                                                   >> 3899                 return false;
5177         }                                        3900         }
                                                   >> 3901 
                                                   >> 3902         return !cpumask_equal(cpumask, attrs->cpumask);
                                                   >> 3903 
                                                   >> 3904 use_dfl:
                                                   >> 3905         cpumask_copy(cpumask, attrs->cpumask);
                                                   >> 3906         return false;
5178 }                                                3907 }
5179                                                  3908 
5180 /* install @pwq into @wq and return the old p !! 3909 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
5181 static struct pool_workqueue *install_unbound !! 3910 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
5182                                         int c !! 3911                                                    int node,
                                                   >> 3912                                                    struct pool_workqueue *pwq)
5183 {                                                3913 {
5184         struct pool_workqueue __rcu **slot =  << 
5185         struct pool_workqueue *old_pwq;          3914         struct pool_workqueue *old_pwq;
5186                                                  3915 
5187         lockdep_assert_held(&wq_pool_mutex);     3916         lockdep_assert_held(&wq_pool_mutex);
5188         lockdep_assert_held(&wq->mutex);         3917         lockdep_assert_held(&wq->mutex);
5189                                                  3918 
5190         /* link_pwq() can handle duplicate ca    3919         /* link_pwq() can handle duplicate calls */
5191         link_pwq(pwq);                           3920         link_pwq(pwq);
5192                                                  3921 
5193         old_pwq = rcu_access_pointer(*slot);  !! 3922         old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
5194         rcu_assign_pointer(*slot, pwq);       !! 3923         rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
5195         return old_pwq;                          3924         return old_pwq;
5196 }                                                3925 }
5197                                                  3926 
5198 /* context to store the prepared attrs & pwqs    3927 /* context to store the prepared attrs & pwqs before applying */
5199 struct apply_wqattrs_ctx {                       3928 struct apply_wqattrs_ctx {
5200         struct workqueue_struct *wq;             3929         struct workqueue_struct *wq;            /* target workqueue */
5201         struct workqueue_attrs  *attrs;          3930         struct workqueue_attrs  *attrs;         /* attrs to apply */
5202         struct list_head        list;            3931         struct list_head        list;           /* queued for batching commit */
5203         struct pool_workqueue   *dfl_pwq;        3932         struct pool_workqueue   *dfl_pwq;
5204         struct pool_workqueue   *pwq_tbl[];      3933         struct pool_workqueue   *pwq_tbl[];
5205 };                                               3934 };
5206                                                  3935 
5207 /* free the resources after success or abort     3936 /* free the resources after success or abort */
5208 static void apply_wqattrs_cleanup(struct appl    3937 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
5209 {                                                3938 {
5210         if (ctx) {                               3939         if (ctx) {
5211                 int cpu;                      !! 3940                 int node;
5212                                                  3941 
5213                 for_each_possible_cpu(cpu)    !! 3942                 for_each_node(node)
5214                         put_pwq_unlocked(ctx- !! 3943                         put_pwq_unlocked(ctx->pwq_tbl[node]);
5215                 put_pwq_unlocked(ctx->dfl_pwq    3944                 put_pwq_unlocked(ctx->dfl_pwq);
5216                                                  3945 
5217                 free_workqueue_attrs(ctx->att    3946                 free_workqueue_attrs(ctx->attrs);
5218                                                  3947 
5219                 kfree(ctx);                      3948                 kfree(ctx);
5220         }                                        3949         }
5221 }                                                3950 }
5222                                                  3951 
5223 /* allocate the attrs and pwqs for later inst    3952 /* allocate the attrs and pwqs for later installation */
5224 static struct apply_wqattrs_ctx *                3953 static struct apply_wqattrs_ctx *
5225 apply_wqattrs_prepare(struct workqueue_struct    3954 apply_wqattrs_prepare(struct workqueue_struct *wq,
5226                       const struct workqueue_ !! 3955                       const struct workqueue_attrs *attrs)
5227                       const cpumask_var_t unb << 
5228 {                                                3956 {
5229         struct apply_wqattrs_ctx *ctx;           3957         struct apply_wqattrs_ctx *ctx;
5230         struct workqueue_attrs *new_attrs;    !! 3958         struct workqueue_attrs *new_attrs, *tmp_attrs;
5231         int cpu;                              !! 3959         int node;
5232                                                  3960 
5233         lockdep_assert_held(&wq_pool_mutex);     3961         lockdep_assert_held(&wq_pool_mutex);
5234                                                  3962 
5235         if (WARN_ON(attrs->affn_scope < 0 ||  !! 3963         ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
5236                     attrs->affn_scope >= WQ_A << 
5237                 return ERR_PTR(-EINVAL);      << 
5238                                               << 
5239         ctx = kzalloc(struct_size(ctx, pwq_tb << 
5240                                                  3964 
5241         new_attrs = alloc_workqueue_attrs();     3965         new_attrs = alloc_workqueue_attrs();
5242         if (!ctx || !new_attrs)               !! 3966         tmp_attrs = alloc_workqueue_attrs();
                                                   >> 3967         if (!ctx || !new_attrs || !tmp_attrs)
5243                 goto out_free;                   3968                 goto out_free;
5244                                                  3969 
5245         /*                                       3970         /*
                                                   >> 3971          * Calculate the attrs of the default pwq.
                                                   >> 3972          * If the user configured cpumask doesn't overlap with the
                                                   >> 3973          * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
                                                   >> 3974          */
                                                   >> 3975         copy_workqueue_attrs(new_attrs, attrs);
                                                   >> 3976         cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
                                                   >> 3977         if (unlikely(cpumask_empty(new_attrs->cpumask)))
                                                   >> 3978                 cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
                                                   >> 3979 
                                                   >> 3980         /*
                                                   >> 3981          * We may create multiple pwqs with differing cpumasks.  Make a
                                                   >> 3982          * copy of @new_attrs which will be modified and used to obtain
                                                   >> 3983          * pools.
                                                   >> 3984          */
                                                   >> 3985         copy_workqueue_attrs(tmp_attrs, new_attrs);
                                                   >> 3986 
                                                   >> 3987         /*
5246          * If something goes wrong during CPU    3988          * If something goes wrong during CPU up/down, we'll fall back to
5247          * the default pwq covering whole @at    3989          * the default pwq covering whole @attrs->cpumask.  Always create
5248          * it even if we don't use it immedia    3990          * it even if we don't use it immediately.
5249          */                                      3991          */
5250         copy_workqueue_attrs(new_attrs, attrs << 
5251         wqattrs_actualize_cpumask(new_attrs,  << 
5252         cpumask_copy(new_attrs->__pod_cpumask << 
5253         ctx->dfl_pwq = alloc_unbound_pwq(wq,     3992         ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
5254         if (!ctx->dfl_pwq)                       3993         if (!ctx->dfl_pwq)
5255                 goto out_free;                   3994                 goto out_free;
5256                                                  3995 
5257         for_each_possible_cpu(cpu) {          !! 3996         for_each_node(node) {
5258                 if (new_attrs->ordered) {     !! 3997                 if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
5259                         ctx->dfl_pwq->refcnt+ !! 3998                         ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
5260                         ctx->pwq_tbl[cpu] = c !! 3999                         if (!ctx->pwq_tbl[node])
5261                 } else {                      << 
5262                         wq_calc_pod_cpumask(n << 
5263                         ctx->pwq_tbl[cpu] = a << 
5264                         if (!ctx->pwq_tbl[cpu << 
5265                                 goto out_free    4000                                 goto out_free;
                                                   >> 4001                 } else {
                                                   >> 4002                         ctx->dfl_pwq->refcnt++;
                                                   >> 4003                         ctx->pwq_tbl[node] = ctx->dfl_pwq;
5266                 }                                4004                 }
5267         }                                        4005         }
5268                                                  4006 
5269         /* save the user configured attrs and    4007         /* save the user configured attrs and sanitize it. */
5270         copy_workqueue_attrs(new_attrs, attrs    4008         copy_workqueue_attrs(new_attrs, attrs);
5271         cpumask_and(new_attrs->cpumask, new_a    4009         cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
5272         cpumask_copy(new_attrs->__pod_cpumask << 
5273         ctx->attrs = new_attrs;                  4010         ctx->attrs = new_attrs;
5274                                                  4011 
5275         /*                                    << 
5276          * For initialized ordered workqueues << 
5277          * (dfl_pwq). Set the plugged flag of << 
5278          * of newly queued work items until e << 
5279          * the old pwq's have completed.      << 
5280          */                                   << 
5281         if ((wq->flags & __WQ_ORDERED) && !li << 
5282                 ctx->dfl_pwq->plugged = true; << 
5283                                               << 
5284         ctx->wq = wq;                            4012         ctx->wq = wq;
                                                   >> 4013         free_workqueue_attrs(tmp_attrs);
5285         return ctx;                              4014         return ctx;
5286                                                  4015 
5287 out_free:                                        4016 out_free:
                                                   >> 4017         free_workqueue_attrs(tmp_attrs);
5288         free_workqueue_attrs(new_attrs);         4018         free_workqueue_attrs(new_attrs);
5289         apply_wqattrs_cleanup(ctx);              4019         apply_wqattrs_cleanup(ctx);
5290         return ERR_PTR(-ENOMEM);              !! 4020         return NULL;
5291 }                                                4021 }
5292                                                  4022 
5293 /* set attrs and install prepared pwqs, @ctx     4023 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
5294 static void apply_wqattrs_commit(struct apply    4024 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
5295 {                                                4025 {
5296         int cpu;                              !! 4026         int node;
5297                                                  4027 
5298         /* all pwqs have been created success    4028         /* all pwqs have been created successfully, let's install'em */
5299         mutex_lock(&ctx->wq->mutex);             4029         mutex_lock(&ctx->wq->mutex);
5300                                                  4030 
5301         copy_workqueue_attrs(ctx->wq->unbound    4031         copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
5302                                                  4032 
5303         /* save the previous pwqs and install !! 4033         /* save the previous pwq and install the new one */
5304         for_each_possible_cpu(cpu)            !! 4034         for_each_node(node)
5305                 ctx->pwq_tbl[cpu] = install_u !! 4035                 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
5306                                               !! 4036                                                           ctx->pwq_tbl[node]);
5307         ctx->dfl_pwq = install_unbound_pwq(ct !! 4037 
5308                                               !! 4038         /* @dfl_pwq might not have been used, ensure it's linked */
5309         /* update node_nr_active->max */      !! 4039         link_pwq(ctx->dfl_pwq);
5310         wq_update_node_max_active(ctx->wq, -1 !! 4040         swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
5311                                               << 
5312         /* rescuer needs to respect wq cpumas << 
5313         if (ctx->wq->rescuer)                 << 
5314                 set_cpus_allowed_ptr(ctx->wq- << 
5315                                      unbound_ << 
5316                                                  4041 
5317         mutex_unlock(&ctx->wq->mutex);           4042         mutex_unlock(&ctx->wq->mutex);
5318 }                                                4043 }
5319                                                  4044 
                                                   >> 4045 static void apply_wqattrs_lock(void)
                                                   >> 4046 {
                                                   >> 4047         /* CPUs should stay stable across pwq creations and installations */
                                                   >> 4048         cpus_read_lock();
                                                   >> 4049         mutex_lock(&wq_pool_mutex);
                                                   >> 4050 }
                                                   >> 4051 
                                                   >> 4052 static void apply_wqattrs_unlock(void)
                                                   >> 4053 {
                                                   >> 4054         mutex_unlock(&wq_pool_mutex);
                                                   >> 4055         cpus_read_unlock();
                                                   >> 4056 }
                                                   >> 4057 
5320 static int apply_workqueue_attrs_locked(struc    4058 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
5321                                         const    4059                                         const struct workqueue_attrs *attrs)
5322 {                                                4060 {
5323         struct apply_wqattrs_ctx *ctx;           4061         struct apply_wqattrs_ctx *ctx;
5324                                                  4062 
5325         /* only unbound workqueues can change    4063         /* only unbound workqueues can change attributes */
5326         if (WARN_ON(!(wq->flags & WQ_UNBOUND)    4064         if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
5327                 return -EINVAL;                  4065                 return -EINVAL;
5328                                                  4066 
5329         ctx = apply_wqattrs_prepare(wq, attrs !! 4067         /* creating multiple pwqs breaks ordering guarantee */
5330         if (IS_ERR(ctx))                      !! 4068         if (!list_empty(&wq->pwqs)) {
5331                 return PTR_ERR(ctx);          !! 4069                 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
                                                   >> 4070                         return -EINVAL;
                                                   >> 4071 
                                                   >> 4072                 wq->flags &= ~__WQ_ORDERED;
                                                   >> 4073         }
                                                   >> 4074 
                                                   >> 4075         ctx = apply_wqattrs_prepare(wq, attrs);
                                                   >> 4076         if (!ctx)
                                                   >> 4077                 return -ENOMEM;
5332                                                  4078 
5333         /* the ctx has been prepared successf    4079         /* the ctx has been prepared successfully, let's commit it */
5334         apply_wqattrs_commit(ctx);               4080         apply_wqattrs_commit(ctx);
5335         apply_wqattrs_cleanup(ctx);              4081         apply_wqattrs_cleanup(ctx);
5336                                                  4082 
5337         return 0;                                4083         return 0;
5338 }                                                4084 }
5339                                                  4085 
5340 /**                                              4086 /**
5341  * apply_workqueue_attrs - apply new workqueu    4087  * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
5342  * @wq: the target workqueue                     4088  * @wq: the target workqueue
5343  * @attrs: the workqueue_attrs to apply, allo    4089  * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
5344  *                                               4090  *
5345  * Apply @attrs to an unbound workqueue @wq.  !! 4091  * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
5346  * a separate pwq to each CPU pod with possib !! 4092  * machines, this function maps a separate pwq to each NUMA node with
5347  * work items are affine to the pod it was is !! 4093  * possibles CPUs in @attrs->cpumask so that work items are affine to the
5348  * in-flight work items finish. Note that a w !! 4094  * NUMA node it was issued on.  Older pwqs are released as in-flight work
5349  * itself back-to-back will stay on its curre !! 4095  * items finish.  Note that a work item which repeatedly requeues itself
                                                   >> 4096  * back-to-back will stay on its current pwq.
5350  *                                               4097  *
5351  * Performs GFP_KERNEL allocations.              4098  * Performs GFP_KERNEL allocations.
5352  *                                               4099  *
                                                   >> 4100  * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
                                                   >> 4101  *
5353  * Return: 0 on success and -errno on failure    4102  * Return: 0 on success and -errno on failure.
5354  */                                              4103  */
5355 int apply_workqueue_attrs(struct workqueue_st    4104 int apply_workqueue_attrs(struct workqueue_struct *wq,
5356                           const struct workqu    4105                           const struct workqueue_attrs *attrs)
5357 {                                                4106 {
5358         int ret;                                 4107         int ret;
5359                                                  4108 
                                                   >> 4109         lockdep_assert_cpus_held();
                                                   >> 4110 
5360         mutex_lock(&wq_pool_mutex);              4111         mutex_lock(&wq_pool_mutex);
5361         ret = apply_workqueue_attrs_locked(wq    4112         ret = apply_workqueue_attrs_locked(wq, attrs);
5362         mutex_unlock(&wq_pool_mutex);            4113         mutex_unlock(&wq_pool_mutex);
5363                                                  4114 
5364         return ret;                              4115         return ret;
5365 }                                                4116 }
5366                                                  4117 
5367 /**                                              4118 /**
5368  * unbound_wq_update_pwq - update a pwq slot  !! 4119  * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
5369  * @wq: the target workqueue                     4120  * @wq: the target workqueue
5370  * @cpu: the CPU to update the pwq slot for   !! 4121  * @cpu: the CPU coming up or going down
                                                   >> 4122  * @online: whether @cpu is coming up or going down
5371  *                                               4123  *
5372  * This function is to be called from %CPU_DO    4124  * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
5373  * %CPU_DOWN_FAILED.  @cpu is in the same pod !! 4125  * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
5374  *                                            !! 4126  * @wq accordingly.
5375  *                                               4127  *
5376  * If pod affinity can't be adjusted due to m !! 4128  * If NUMA affinity can't be adjusted due to memory allocation failure, it
5377  * back to @wq->dfl_pwq which may not be opti !! 4129  * falls back to @wq->dfl_pwq which may not be optimal but is always
5378  *                                            !! 4130  * correct.
5379  * Note that when the last allowed CPU of a p !! 4131  *
5380  * with a cpumask spanning multiple pods, the !! 4132  * Note that when the last allowed CPU of a NUMA node goes offline for a
5381  * executing the work items for the workqueue !! 4133  * workqueue with a cpumask spanning multiple nodes, the workers which were
5382  * may execute on any CPU. This is similar to !! 4134  * already executing the work items for the workqueue will lose their CPU
5383  * CPU_DOWN. If a workqueue user wants strict !! 4135  * affinity and may execute on any CPU.  This is similar to how per-cpu
5384  * responsibility to flush the work item from !! 4136  * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
                                                   >> 4137  * affinity, it's the user's responsibility to flush the work item from
                                                   >> 4138  * CPU_DOWN_PREPARE.
5385  */                                              4139  */
5386 static void unbound_wq_update_pwq(struct work !! 4140 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
                                                   >> 4141                                    bool online)
5387 {                                                4142 {
                                                   >> 4143         int node = cpu_to_node(cpu);
                                                   >> 4144         int cpu_off = online ? -1 : cpu;
5388         struct pool_workqueue *old_pwq = NULL    4145         struct pool_workqueue *old_pwq = NULL, *pwq;
5389         struct workqueue_attrs *target_attrs;    4146         struct workqueue_attrs *target_attrs;
                                                   >> 4147         cpumask_t *cpumask;
5390                                                  4148 
5391         lockdep_assert_held(&wq_pool_mutex);     4149         lockdep_assert_held(&wq_pool_mutex);
5392                                                  4150 
5393         if (!(wq->flags & WQ_UNBOUND) || wq-> !! 4151         if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
                                                   >> 4152             wq->unbound_attrs->no_numa)
5394                 return;                          4153                 return;
5395                                                  4154 
5396         /*                                       4155         /*
5397          * We don't wanna alloc/free wq_attrs    4156          * We don't wanna alloc/free wq_attrs for each wq for each CPU.
5398          * Let's use a preallocated one.  The    4157          * Let's use a preallocated one.  The following buf is protected by
5399          * CPU hotplug exclusion.                4158          * CPU hotplug exclusion.
5400          */                                      4159          */
5401         target_attrs = unbound_wq_update_pwq_ !! 4160         target_attrs = wq_update_unbound_numa_attrs_buf;
                                                   >> 4161         cpumask = target_attrs->cpumask;
5402                                                  4162 
5403         copy_workqueue_attrs(target_attrs, wq    4163         copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
5404         wqattrs_actualize_cpumask(target_attr !! 4164         pwq = unbound_pwq_by_node(wq, node);
5405                                                  4165 
5406         /* nothing to do if the target cpumas !! 4166         /*
5407         wq_calc_pod_cpumask(target_attrs, cpu !! 4167          * Let's determine what needs to be done.  If the target cpumask is
5408         if (wqattrs_equal(target_attrs, unbou !! 4168          * different from the default pwq's, we need to compare it to @pwq's
5409                 return;                       !! 4169          * and create a new one if they don't match.  If the target cpumask
                                                   >> 4170          * equals the default pwq's, the default pwq should be used.
                                                   >> 4171          */
                                                   >> 4172         if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
                                                   >> 4173                 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
                                                   >> 4174                         return;
                                                   >> 4175         } else {
                                                   >> 4176                 goto use_dfl_pwq;
                                                   >> 4177         }
5410                                                  4178 
5411         /* create a new pwq */                   4179         /* create a new pwq */
5412         pwq = alloc_unbound_pwq(wq, target_at    4180         pwq = alloc_unbound_pwq(wq, target_attrs);
5413         if (!pwq) {                              4181         if (!pwq) {
5414                 pr_warn("workqueue: allocatio !! 4182                 pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
5415                         wq->name);               4183                         wq->name);
5416                 goto use_dfl_pwq;                4184                 goto use_dfl_pwq;
5417         }                                        4185         }
5418                                                  4186 
5419         /* Install the new pwq. */               4187         /* Install the new pwq. */
5420         mutex_lock(&wq->mutex);                  4188         mutex_lock(&wq->mutex);
5421         old_pwq = install_unbound_pwq(wq, cpu !! 4189         old_pwq = numa_pwq_tbl_install(wq, node, pwq);
5422         goto out_unlock;                         4190         goto out_unlock;
5423                                                  4191 
5424 use_dfl_pwq:                                     4192 use_dfl_pwq:
5425         mutex_lock(&wq->mutex);                  4193         mutex_lock(&wq->mutex);
5426         pwq = unbound_pwq(wq, -1);            !! 4194         raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
5427         raw_spin_lock_irq(&pwq->pool->lock);  !! 4195         get_pwq(wq->dfl_pwq);
5428         get_pwq(pwq);                         !! 4196         raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
5429         raw_spin_unlock_irq(&pwq->pool->lock) !! 4197         old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
5430         old_pwq = install_unbound_pwq(wq, cpu << 
5431 out_unlock:                                      4198 out_unlock:
5432         mutex_unlock(&wq->mutex);                4199         mutex_unlock(&wq->mutex);
5433         put_pwq_unlocked(old_pwq);               4200         put_pwq_unlocked(old_pwq);
5434 }                                                4201 }
5435                                                  4202 
5436 static int alloc_and_link_pwqs(struct workque    4203 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
5437 {                                                4204 {
5438         bool highpri = wq->flags & WQ_HIGHPRI    4205         bool highpri = wq->flags & WQ_HIGHPRI;
5439         int cpu, ret;                            4206         int cpu, ret;
5440                                                  4207 
5441         lockdep_assert_held(&wq_pool_mutex);  << 
5442                                               << 
5443         wq->cpu_pwq = alloc_percpu(struct poo << 
5444         if (!wq->cpu_pwq)                     << 
5445                 goto enomem;                  << 
5446                                               << 
5447         if (!(wq->flags & WQ_UNBOUND)) {         4208         if (!(wq->flags & WQ_UNBOUND)) {
5448                 struct worker_pool __percpu * !! 4209                 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
5449                                               !! 4210                 if (!wq->cpu_pwqs)
5450                 if (wq->flags & WQ_BH)        !! 4211                         return -ENOMEM;
5451                         pools = bh_worker_poo << 
5452                 else                          << 
5453                         pools = cpu_worker_po << 
5454                                                  4212 
5455                 for_each_possible_cpu(cpu) {     4213                 for_each_possible_cpu(cpu) {
5456                         struct pool_workqueue !! 4214                         struct pool_workqueue *pwq =
5457                         struct worker_pool *p !! 4215                                 per_cpu_ptr(wq->cpu_pwqs, cpu);
5458                                               !! 4216                         struct worker_pool *cpu_pools =
5459                         pool = &(per_cpu_ptr( !! 4217                                 per_cpu(cpu_worker_pools, cpu);
5460                         pwq_p = per_cpu_ptr(w << 
5461                                                  4218 
5462                         *pwq_p = kmem_cache_a !! 4219                         init_pwq(pwq, wq, &cpu_pools[highpri]);
5463                                               << 
5464                         if (!*pwq_p)          << 
5465                                 goto enomem;  << 
5466                                               << 
5467                         init_pwq(*pwq_p, wq,  << 
5468                                                  4220 
5469                         mutex_lock(&wq->mutex    4221                         mutex_lock(&wq->mutex);
5470                         link_pwq(*pwq_p);     !! 4222                         link_pwq(pwq);
5471                         mutex_unlock(&wq->mut    4223                         mutex_unlock(&wq->mutex);
5472                 }                                4224                 }
5473                 return 0;                        4225                 return 0;
5474         }                                        4226         }
5475                                                  4227 
                                                   >> 4228         cpus_read_lock();
5476         if (wq->flags & __WQ_ORDERED) {          4229         if (wq->flags & __WQ_ORDERED) {
5477                 struct pool_workqueue *dfl_pw !! 4230                 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
5478                                               << 
5479                 ret = apply_workqueue_attrs_l << 
5480                 /* there should only be singl    4231                 /* there should only be single pwq for ordering guarantee */
5481                 dfl_pwq = rcu_access_pointer( !! 4232                 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
5482                 WARN(!ret && (wq->pwqs.next ! !! 4233                               wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
5483                               wq->pwqs.prev ! << 
5484                      "ordering guarantee brok    4234                      "ordering guarantee broken for workqueue %s\n", wq->name);
5485         } else {                                 4235         } else {
5486                 ret = apply_workqueue_attrs_l !! 4236                 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
5487         }                                        4237         }
                                                   >> 4238         cpus_read_unlock();
5488                                                  4239 
5489         return ret;                              4240         return ret;
5490                                               << 
5491 enomem:                                       << 
5492         if (wq->cpu_pwq) {                    << 
5493                 for_each_possible_cpu(cpu) {  << 
5494                         struct pool_workqueue << 
5495                                               << 
5496                         if (pwq)              << 
5497                                 kmem_cache_fr << 
5498                 }                             << 
5499                 free_percpu(wq->cpu_pwq);     << 
5500                 wq->cpu_pwq = NULL;           << 
5501         }                                     << 
5502         return -ENOMEM;                       << 
5503 }                                                4241 }
5504                                                  4242 
5505 static int wq_clamp_max_active(int max_active    4243 static int wq_clamp_max_active(int max_active, unsigned int flags,
5506                                const char *na    4244                                const char *name)
5507 {                                                4245 {
5508         if (max_active < 1 || max_active > WQ !! 4246         int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
                                                   >> 4247 
                                                   >> 4248         if (max_active < 1 || max_active > lim)
5509                 pr_warn("workqueue: max_activ    4249                 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
5510                         max_active, name, 1,  !! 4250                         max_active, name, 1, lim);
5511                                                  4251 
5512         return clamp_val(max_active, 1, WQ_MA !! 4252         return clamp_val(max_active, 1, lim);
5513 }                                                4253 }
5514                                                  4254 
5515 /*                                               4255 /*
5516  * Workqueues which may be used during memory    4256  * Workqueues which may be used during memory reclaim should have a rescuer
5517  * to guarantee forward progress.                4257  * to guarantee forward progress.
5518  */                                              4258  */
5519 static int init_rescuer(struct workqueue_stru    4259 static int init_rescuer(struct workqueue_struct *wq)
5520 {                                                4260 {
5521         struct worker *rescuer;                  4261         struct worker *rescuer;
5522         char id_buf[WORKER_ID_LEN];           << 
5523         int ret;                                 4262         int ret;
5524                                                  4263 
5525         lockdep_assert_held(&wq_pool_mutex);  << 
5526                                               << 
5527         if (!(wq->flags & WQ_MEM_RECLAIM))       4264         if (!(wq->flags & WQ_MEM_RECLAIM))
5528                 return 0;                        4265                 return 0;
5529                                                  4266 
5530         rescuer = alloc_worker(NUMA_NO_NODE);    4267         rescuer = alloc_worker(NUMA_NO_NODE);
5531         if (!rescuer) {                       !! 4268         if (!rescuer)
5532                 pr_err("workqueue: Failed to  << 
5533                        wq->name);             << 
5534                 return -ENOMEM;                  4269                 return -ENOMEM;
5535         }                                     << 
5536                                                  4270 
5537         rescuer->rescue_wq = wq;                 4271         rescuer->rescue_wq = wq;
5538         format_worker_id(id_buf, sizeof(id_bu !! 4272         rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
5539                                               << 
5540         rescuer->task = kthread_create(rescue << 
5541         if (IS_ERR(rescuer->task)) {             4273         if (IS_ERR(rescuer->task)) {
5542                 ret = PTR_ERR(rescuer->task);    4274                 ret = PTR_ERR(rescuer->task);
5543                 pr_err("workqueue: Failed to  << 
5544                        wq->name, ERR_PTR(ret) << 
5545                 kfree(rescuer);                  4275                 kfree(rescuer);
5546                 return ret;                      4276                 return ret;
5547         }                                        4277         }
5548                                                  4278 
5549         wq->rescuer = rescuer;                   4279         wq->rescuer = rescuer;
5550         if (wq->flags & WQ_UNBOUND)           !! 4280         kthread_bind_mask(rescuer->task, cpu_possible_mask);
5551                 kthread_bind_mask(rescuer->ta << 
5552         else                                  << 
5553                 kthread_bind_mask(rescuer->ta << 
5554         wake_up_process(rescuer->task);          4281         wake_up_process(rescuer->task);
5555                                                  4282 
5556         return 0;                                4283         return 0;
5557 }                                                4284 }
5558                                                  4285 
5559 /**                                           << 
5560  * wq_adjust_max_active - update a wq's max_a << 
5561  * @wq: target workqueue                      << 
5562  *                                            << 
5563  * If @wq isn't freezing, set @wq->max_active << 
5564  * activate inactive work items accordingly.  << 
5565  * @wq->max_active to zero.                   << 
5566  */                                           << 
5567 static void wq_adjust_max_active(struct workq << 
5568 {                                             << 
5569         bool activated;                       << 
5570         int new_max, new_min;                 << 
5571                                               << 
5572         lockdep_assert_held(&wq->mutex);      << 
5573                                               << 
5574         if ((wq->flags & WQ_FREEZABLE) && wor << 
5575                 new_max = 0;                  << 
5576                 new_min = 0;                  << 
5577         } else {                              << 
5578                 new_max = wq->saved_max_activ << 
5579                 new_min = wq->saved_min_activ << 
5580         }                                     << 
5581                                               << 
5582         if (wq->max_active == new_max && wq-> << 
5583                 return;                       << 
5584                                               << 
5585         /*                                    << 
5586          * Update @wq->max/min_active and the << 
5587          * active work items are allowed. Thi << 
5588          * because new work items are always  << 
5589          * work items if there are any.       << 
5590          */                                   << 
5591         WRITE_ONCE(wq->max_active, new_max);  << 
5592         WRITE_ONCE(wq->min_active, new_min);  << 
5593                                               << 
5594         if (wq->flags & WQ_UNBOUND)           << 
5595                 wq_update_node_max_active(wq, << 
5596                                               << 
5597         if (new_max == 0)                     << 
5598                 return;                       << 
5599                                               << 
5600         /*                                    << 
5601          * Round-robin through pwq's activati << 
5602          * until max_active is filled.        << 
5603          */                                   << 
5604         do {                                  << 
5605                 struct pool_workqueue *pwq;   << 
5606                                               << 
5607                 activated = false;            << 
5608                 for_each_pwq(pwq, wq) {       << 
5609                         unsigned long irq_fla << 
5610                                               << 
5611                         /* can be called duri << 
5612                         raw_spin_lock_irqsave << 
5613                         if (pwq_activate_firs << 
5614                                 activated = t << 
5615                                 kick_pool(pwq << 
5616                         }                     << 
5617                         raw_spin_unlock_irqre << 
5618                 }                             << 
5619         } while (activated);                  << 
5620 }                                             << 
5621                                               << 
5622 __printf(1, 4)                                   4286 __printf(1, 4)
5623 struct workqueue_struct *alloc_workqueue(cons    4287 struct workqueue_struct *alloc_workqueue(const char *fmt,
5624                                          unsi    4288                                          unsigned int flags,
5625                                          int     4289                                          int max_active, ...)
5626 {                                                4290 {
                                                   >> 4291         size_t tbl_size = 0;
5627         va_list args;                            4292         va_list args;
5628         struct workqueue_struct *wq;             4293         struct workqueue_struct *wq;
5629         size_t wq_size;                       !! 4294         struct pool_workqueue *pwq;
5630         int name_len;                         << 
5631                                                  4295 
5632         if (flags & WQ_BH) {                  !! 4296         /*
5633                 if (WARN_ON_ONCE(flags & ~__W !! 4297          * Unbound && max_active == 1 used to imply ordered, which is no
5634                         return NULL;          !! 4298          * longer the case on NUMA machines due to per-node pools.  While
5635                 if (WARN_ON_ONCE(max_active)) !! 4299          * alloc_ordered_workqueue() is the right way to create an ordered
5636                         return NULL;          !! 4300          * workqueue, keep the previous behavior to avoid subtle breakages
5637         }                                     !! 4301          * on NUMA.
                                                   >> 4302          */
                                                   >> 4303         if ((flags & WQ_UNBOUND) && max_active == 1)
                                                   >> 4304                 flags |= __WQ_ORDERED;
5638                                                  4305 
5639         /* see the comment above the definiti    4306         /* see the comment above the definition of WQ_POWER_EFFICIENT */
5640         if ((flags & WQ_POWER_EFFICIENT) && w    4307         if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
5641                 flags |= WQ_UNBOUND;             4308                 flags |= WQ_UNBOUND;
5642                                                  4309 
5643         /* allocate wq and format name */        4310         /* allocate wq and format name */
5644         if (flags & WQ_UNBOUND)                  4311         if (flags & WQ_UNBOUND)
5645                 wq_size = struct_size(wq, nod !! 4312                 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
5646         else                                  << 
5647                 wq_size = sizeof(*wq);        << 
5648                                                  4313 
5649         wq = kzalloc(wq_size, GFP_KERNEL);    !! 4314         wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
5650         if (!wq)                                 4315         if (!wq)
5651                 return NULL;                     4316                 return NULL;
5652                                                  4317 
5653         if (flags & WQ_UNBOUND) {                4318         if (flags & WQ_UNBOUND) {
5654                 wq->unbound_attrs = alloc_wor    4319                 wq->unbound_attrs = alloc_workqueue_attrs();
5655                 if (!wq->unbound_attrs)          4320                 if (!wq->unbound_attrs)
5656                         goto err_free_wq;        4321                         goto err_free_wq;
5657         }                                        4322         }
5658                                                  4323 
5659         va_start(args, max_active);              4324         va_start(args, max_active);
5660         name_len = vsnprintf(wq->name, sizeof !! 4325         vsnprintf(wq->name, sizeof(wq->name), fmt, args);
5661         va_end(args);                            4326         va_end(args);
5662                                                  4327 
5663         if (name_len >= WQ_NAME_LEN)          !! 4328         max_active = max_active ?: WQ_DFL_ACTIVE;
5664                 pr_warn_once("workqueue: name !! 4329         max_active = wq_clamp_max_active(max_active, flags, wq->name);
5665                              wq->name);       << 
5666                                               << 
5667         if (flags & WQ_BH) {                  << 
5668                 /*                            << 
5669                  * BH workqueues always share << 
5670                  * and don't impose any max_a << 
5671                  */                           << 
5672                 max_active = INT_MAX;         << 
5673         } else {                              << 
5674                 max_active = max_active ?: WQ << 
5675                 max_active = wq_clamp_max_act << 
5676         }                                     << 
5677                                                  4330 
5678         /* init wq */                            4331         /* init wq */
5679         wq->flags = flags;                       4332         wq->flags = flags;
5680         wq->max_active = max_active;          !! 4333         wq->saved_max_active = max_active;
5681         wq->min_active = min(max_active, WQ_D << 
5682         wq->saved_max_active = wq->max_active << 
5683         wq->saved_min_active = wq->min_active << 
5684         mutex_init(&wq->mutex);                  4334         mutex_init(&wq->mutex);
5685         atomic_set(&wq->nr_pwqs_to_flush, 0);    4335         atomic_set(&wq->nr_pwqs_to_flush, 0);
5686         INIT_LIST_HEAD(&wq->pwqs);               4336         INIT_LIST_HEAD(&wq->pwqs);
5687         INIT_LIST_HEAD(&wq->flusher_queue);      4337         INIT_LIST_HEAD(&wq->flusher_queue);
5688         INIT_LIST_HEAD(&wq->flusher_overflow)    4338         INIT_LIST_HEAD(&wq->flusher_overflow);
5689         INIT_LIST_HEAD(&wq->maydays);            4339         INIT_LIST_HEAD(&wq->maydays);
5690                                                  4340 
5691         wq_init_lockdep(wq);                     4341         wq_init_lockdep(wq);
5692         INIT_LIST_HEAD(&wq->list);               4342         INIT_LIST_HEAD(&wq->list);
5693                                                  4343 
5694         if (flags & WQ_UNBOUND) {             !! 4344         if (alloc_and_link_pwqs(wq) < 0)
5695                 if (alloc_node_nr_active(wq-> !! 4345                 goto err_unreg_lockdep;
5696                         goto err_unreg_lockde !! 4346 
5697         }                                     !! 4347         if (wq_online && init_rescuer(wq) < 0)
                                                   >> 4348                 goto err_destroy;
                                                   >> 4349 
                                                   >> 4350         if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
                                                   >> 4351                 goto err_destroy;
5698                                                  4352 
5699         /*                                       4353         /*
5700          * wq_pool_mutex protects the workque !! 4354          * wq_pool_mutex protects global freeze state and workqueues list.
5701          * and the global freeze state.       !! 4355          * Grab it, adjust max_active and add the new @wq to workqueues
                                                   >> 4356          * list.
5702          */                                      4357          */
5703         apply_wqattrs_lock();                 !! 4358         mutex_lock(&wq_pool_mutex);
5704                                               << 
5705         if (alloc_and_link_pwqs(wq) < 0)      << 
5706                 goto err_unlock_free_node_nr_ << 
5707                                                  4359 
5708         mutex_lock(&wq->mutex);                  4360         mutex_lock(&wq->mutex);
5709         wq_adjust_max_active(wq);             !! 4361         for_each_pwq(pwq, wq)
                                                   >> 4362                 pwq_adjust_max_active(pwq);
5710         mutex_unlock(&wq->mutex);                4363         mutex_unlock(&wq->mutex);
5711                                                  4364 
5712         list_add_tail_rcu(&wq->list, &workque    4365         list_add_tail_rcu(&wq->list, &workqueues);
5713                                                  4366 
5714         if (wq_online && init_rescuer(wq) < 0 !! 4367         mutex_unlock(&wq_pool_mutex);
5715                 goto err_unlock_destroy;      << 
5716                                               << 
5717         apply_wqattrs_unlock();               << 
5718                                               << 
5719         if ((wq->flags & WQ_SYSFS) && workque << 
5720                 goto err_destroy;             << 
5721                                                  4368 
5722         return wq;                               4369         return wq;
5723                                                  4370 
5724 err_unlock_free_node_nr_active:               << 
5725         apply_wqattrs_unlock();               << 
5726         /*                                    << 
5727          * Failed alloc_and_link_pwqs() may l << 
5728          * flushing the pwq_release_worker en << 
5729          * completes before calling kfree(wq) << 
5730          */                                   << 
5731         if (wq->flags & WQ_UNBOUND) {         << 
5732                 kthread_flush_worker(pwq_rele << 
5733                 free_node_nr_active(wq->node_ << 
5734         }                                     << 
5735 err_unreg_lockdep:                               4371 err_unreg_lockdep:
5736         wq_unregister_lockdep(wq);               4372         wq_unregister_lockdep(wq);
5737         wq_free_lockdep(wq);                     4373         wq_free_lockdep(wq);
5738 err_free_wq:                                     4374 err_free_wq:
5739         free_workqueue_attrs(wq->unbound_attr    4375         free_workqueue_attrs(wq->unbound_attrs);
5740         kfree(wq);                               4376         kfree(wq);
5741         return NULL;                             4377         return NULL;
5742 err_unlock_destroy:                           << 
5743         apply_wqattrs_unlock();               << 
5744 err_destroy:                                     4378 err_destroy:
5745         destroy_workqueue(wq);                   4379         destroy_workqueue(wq);
5746         return NULL;                             4380         return NULL;
5747 }                                                4381 }
5748 EXPORT_SYMBOL_GPL(alloc_workqueue);              4382 EXPORT_SYMBOL_GPL(alloc_workqueue);
5749                                                  4383 
5750 static bool pwq_busy(struct pool_workqueue *p    4384 static bool pwq_busy(struct pool_workqueue *pwq)
5751 {                                                4385 {
5752         int i;                                   4386         int i;
5753                                                  4387 
5754         for (i = 0; i < WORK_NR_COLORS; i++)     4388         for (i = 0; i < WORK_NR_COLORS; i++)
5755                 if (pwq->nr_in_flight[i])        4389                 if (pwq->nr_in_flight[i])
5756                         return true;             4390                         return true;
5757                                                  4391 
5758         if ((pwq != rcu_access_pointer(pwq->w !! 4392         if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
5759                 return true;                     4393                 return true;
5760         if (!pwq_is_empty(pwq))               !! 4394         if (pwq->nr_active || !list_empty(&pwq->inactive_works))
5761                 return true;                     4395                 return true;
5762                                                  4396 
5763         return false;                            4397         return false;
5764 }                                                4398 }
5765                                                  4399 
5766 /**                                              4400 /**
5767  * destroy_workqueue - safely terminate a wor    4401  * destroy_workqueue - safely terminate a workqueue
5768  * @wq: target workqueue                         4402  * @wq: target workqueue
5769  *                                               4403  *
5770  * Safely destroy a workqueue. All work curre    4404  * Safely destroy a workqueue. All work currently pending will be done first.
5771  */                                              4405  */
5772 void destroy_workqueue(struct workqueue_struc    4406 void destroy_workqueue(struct workqueue_struct *wq)
5773 {                                                4407 {
5774         struct pool_workqueue *pwq;              4408         struct pool_workqueue *pwq;
5775         int cpu;                              !! 4409         int node;
5776                                                  4410 
5777         /*                                       4411         /*
5778          * Remove it from sysfs first so that    4412          * Remove it from sysfs first so that sanity check failure doesn't
5779          * lead to sysfs name conflicts.         4413          * lead to sysfs name conflicts.
5780          */                                      4414          */
5781         workqueue_sysfs_unregister(wq);          4415         workqueue_sysfs_unregister(wq);
5782                                                  4416 
5783         /* mark the workqueue destruction is  << 
5784         mutex_lock(&wq->mutex);               << 
5785         wq->flags |= __WQ_DESTROYING;         << 
5786         mutex_unlock(&wq->mutex);             << 
5787                                               << 
5788         /* drain it before proceeding with de    4417         /* drain it before proceeding with destruction */
5789         drain_workqueue(wq);                     4418         drain_workqueue(wq);
5790                                                  4419 
5791         /* kill rescuer, if sanity checks fai    4420         /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
5792         if (wq->rescuer) {                       4421         if (wq->rescuer) {
5793                 struct worker *rescuer = wq->    4422                 struct worker *rescuer = wq->rescuer;
5794                                                  4423 
5795                 /* this prevents new queueing    4424                 /* this prevents new queueing */
5796                 raw_spin_lock_irq(&wq_mayday_    4425                 raw_spin_lock_irq(&wq_mayday_lock);
5797                 wq->rescuer = NULL;              4426                 wq->rescuer = NULL;
5798                 raw_spin_unlock_irq(&wq_mayda    4427                 raw_spin_unlock_irq(&wq_mayday_lock);
5799                                                  4428 
5800                 /* rescuer will empty maydays    4429                 /* rescuer will empty maydays list before exiting */
5801                 kthread_stop(rescuer->task);     4430                 kthread_stop(rescuer->task);
5802                 kfree(rescuer);                  4431                 kfree(rescuer);
5803         }                                        4432         }
5804                                                  4433 
5805         /*                                       4434         /*
5806          * Sanity checks - grab all the locks    4435          * Sanity checks - grab all the locks so that we wait for all
5807          * in-flight operations which may do     4436          * in-flight operations which may do put_pwq().
5808          */                                      4437          */
5809         mutex_lock(&wq_pool_mutex);              4438         mutex_lock(&wq_pool_mutex);
5810         mutex_lock(&wq->mutex);                  4439         mutex_lock(&wq->mutex);
5811         for_each_pwq(pwq, wq) {                  4440         for_each_pwq(pwq, wq) {
5812                 raw_spin_lock_irq(&pwq->pool-    4441                 raw_spin_lock_irq(&pwq->pool->lock);
5813                 if (WARN_ON(pwq_busy(pwq))) {    4442                 if (WARN_ON(pwq_busy(pwq))) {
5814                         pr_warn("%s: %s has t    4443                         pr_warn("%s: %s has the following busy pwq\n",
5815                                 __func__, wq-    4444                                 __func__, wq->name);
5816                         show_pwq(pwq);           4445                         show_pwq(pwq);
5817                         raw_spin_unlock_irq(&    4446                         raw_spin_unlock_irq(&pwq->pool->lock);
5818                         mutex_unlock(&wq->mut    4447                         mutex_unlock(&wq->mutex);
5819                         mutex_unlock(&wq_pool    4448                         mutex_unlock(&wq_pool_mutex);
5820                         show_one_workqueue(wq    4449                         show_one_workqueue(wq);
5821                         return;                  4450                         return;
5822                 }                                4451                 }
5823                 raw_spin_unlock_irq(&pwq->poo    4452                 raw_spin_unlock_irq(&pwq->pool->lock);
5824         }                                        4453         }
5825         mutex_unlock(&wq->mutex);                4454         mutex_unlock(&wq->mutex);
5826                                                  4455 
5827         /*                                       4456         /*
5828          * wq list is used to freeze wq, remo    4457          * wq list is used to freeze wq, remove from list after
5829          * flushing is complete in case freez    4458          * flushing is complete in case freeze races us.
5830          */                                      4459          */
5831         list_del_rcu(&wq->list);                 4460         list_del_rcu(&wq->list);
5832         mutex_unlock(&wq_pool_mutex);            4461         mutex_unlock(&wq_pool_mutex);
5833                                                  4462 
5834         /*                                    !! 4463         if (!(wq->flags & WQ_UNBOUND)) {
5835          * We're the sole accessor of @wq. Di !! 4464                 wq_unregister_lockdep(wq);
5836          * to put the base refs. @wq will be  !! 4465                 /*
5837          * pwq_put. RCU read lock prevents @w !! 4466                  * The base ref is never dropped on per-cpu pwqs.  Directly
5838          */                                   !! 4467                  * schedule RCU free.
5839         rcu_read_lock();                      !! 4468                  */
                                                   >> 4469                 call_rcu(&wq->rcu, rcu_free_wq);
                                                   >> 4470         } else {
                                                   >> 4471                 /*
                                                   >> 4472                  * We're the sole accessor of @wq at this point.  Directly
                                                   >> 4473                  * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
                                                   >> 4474                  * @wq will be freed when the last pwq is released.
                                                   >> 4475                  */
                                                   >> 4476                 for_each_node(node) {
                                                   >> 4477                         pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
                                                   >> 4478                         RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
                                                   >> 4479                         put_pwq_unlocked(pwq);
                                                   >> 4480                 }
5840                                                  4481 
5841         for_each_possible_cpu(cpu) {          !! 4482                 /*
5842                 put_pwq_unlocked(unbound_pwq( !! 4483                  * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
5843                 RCU_INIT_POINTER(*unbound_pwq !! 4484                  * put.  Don't access it afterwards.
                                                   >> 4485                  */
                                                   >> 4486                 pwq = wq->dfl_pwq;
                                                   >> 4487                 wq->dfl_pwq = NULL;
                                                   >> 4488                 put_pwq_unlocked(pwq);
5844         }                                        4489         }
5845                                               << 
5846         put_pwq_unlocked(unbound_pwq(wq, -1)) << 
5847         RCU_INIT_POINTER(*unbound_pwq_slot(wq << 
5848                                               << 
5849         rcu_read_unlock();                    << 
5850 }                                                4490 }
5851 EXPORT_SYMBOL_GPL(destroy_workqueue);            4491 EXPORT_SYMBOL_GPL(destroy_workqueue);
5852                                                  4492 
5853 /**                                              4493 /**
5854  * workqueue_set_max_active - adjust max_acti    4494  * workqueue_set_max_active - adjust max_active of a workqueue
5855  * @wq: target workqueue                         4495  * @wq: target workqueue
5856  * @max_active: new max_active value.            4496  * @max_active: new max_active value.
5857  *                                               4497  *
5858  * Set max_active of @wq to @max_active. See  !! 4498  * Set max_active of @wq to @max_active.
5859  * comment.                                   << 
5860  *                                               4499  *
5861  * CONTEXT:                                      4500  * CONTEXT:
5862  * Don't call from IRQ context.                  4501  * Don't call from IRQ context.
5863  */                                              4502  */
5864 void workqueue_set_max_active(struct workqueu    4503 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
5865 {                                                4504 {
5866         /* max_active doesn't mean anything f !! 4505         struct pool_workqueue *pwq;
5867         if (WARN_ON(wq->flags & WQ_BH))       !! 4506 
5868                 return;                       << 
5869         /* disallow meddling with max_active     4507         /* disallow meddling with max_active for ordered workqueues */
5870         if (WARN_ON(wq->flags & __WQ_ORDERED) !! 4508         if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5871                 return;                          4509                 return;
5872                                                  4510 
5873         max_active = wq_clamp_max_active(max_    4511         max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
5874                                                  4512 
5875         mutex_lock(&wq->mutex);                  4513         mutex_lock(&wq->mutex);
5876                                                  4514 
                                                   >> 4515         wq->flags &= ~__WQ_ORDERED;
5877         wq->saved_max_active = max_active;       4516         wq->saved_max_active = max_active;
5878         if (wq->flags & WQ_UNBOUND)           << 
5879                 wq->saved_min_active = min(wq << 
5880                                                  4517 
5881         wq_adjust_max_active(wq);             !! 4518         for_each_pwq(pwq, wq)
                                                   >> 4519                 pwq_adjust_max_active(pwq);
5882                                                  4520 
5883         mutex_unlock(&wq->mutex);                4521         mutex_unlock(&wq->mutex);
5884 }                                                4522 }
5885 EXPORT_SYMBOL_GPL(workqueue_set_max_active);     4523 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
5886                                                  4524 
5887 /**                                              4525 /**
5888  * workqueue_set_min_active - adjust min_acti << 
5889  * @wq: target unbound workqueue              << 
5890  * @min_active: new min_active value          << 
5891  *                                            << 
5892  * Set min_active of an unbound workqueue. Un << 
5893  * unbound workqueue is not guaranteed to be  << 
5894  * interdependent work items. Instead, an unb << 
5895  * able to process min_active number of inter << 
5896  * %WQ_DFL_MIN_ACTIVE by default.             << 
5897  *                                            << 
5898  * Use this function to adjust the min_active << 
5899  * max_active.                                << 
5900  */                                           << 
5901 void workqueue_set_min_active(struct workqueu << 
5902 {                                             << 
5903         /* min_active is only meaningful for  << 
5904         if (WARN_ON((wq->flags & (WQ_BH | WQ_ << 
5905                     WQ_UNBOUND))              << 
5906                 return;                       << 
5907                                               << 
5908         mutex_lock(&wq->mutex);               << 
5909         wq->saved_min_active = clamp(min_acti << 
5910         wq_adjust_max_active(wq);             << 
5911         mutex_unlock(&wq->mutex);             << 
5912 }                                             << 
5913                                               << 
5914 /**                                           << 
5915  * current_work - retrieve %current task's wo    4526  * current_work - retrieve %current task's work struct
5916  *                                               4527  *
5917  * Determine if %current task is a workqueue     4528  * Determine if %current task is a workqueue worker and what it's working on.
5918  * Useful to find out the context that the %c    4529  * Useful to find out the context that the %current task is running in.
5919  *                                               4530  *
5920  * Return: work struct if %current task is a     4531  * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
5921  */                                              4532  */
5922 struct work_struct *current_work(void)           4533 struct work_struct *current_work(void)
5923 {                                                4534 {
5924         struct worker *worker = current_wq_wo    4535         struct worker *worker = current_wq_worker();
5925                                                  4536 
5926         return worker ? worker->current_work     4537         return worker ? worker->current_work : NULL;
5927 }                                                4538 }
5928 EXPORT_SYMBOL(current_work);                     4539 EXPORT_SYMBOL(current_work);
5929                                                  4540 
5930 /**                                              4541 /**
5931  * current_is_workqueue_rescuer - is %current    4542  * current_is_workqueue_rescuer - is %current workqueue rescuer?
5932  *                                               4543  *
5933  * Determine whether %current is a workqueue     4544  * Determine whether %current is a workqueue rescuer.  Can be used from
5934  * work functions to determine whether it's b    4545  * work functions to determine whether it's being run off the rescuer task.
5935  *                                               4546  *
5936  * Return: %true if %current is a workqueue r    4547  * Return: %true if %current is a workqueue rescuer. %false otherwise.
5937  */                                              4548  */
5938 bool current_is_workqueue_rescuer(void)          4549 bool current_is_workqueue_rescuer(void)
5939 {                                                4550 {
5940         struct worker *worker = current_wq_wo    4551         struct worker *worker = current_wq_worker();
5941                                                  4552 
5942         return worker && worker->rescue_wq;      4553         return worker && worker->rescue_wq;
5943 }                                                4554 }
5944                                                  4555 
5945 /**                                              4556 /**
5946  * workqueue_congested - test whether a workq    4557  * workqueue_congested - test whether a workqueue is congested
5947  * @cpu: CPU in question                         4558  * @cpu: CPU in question
5948  * @wq: target workqueue                         4559  * @wq: target workqueue
5949  *                                               4560  *
5950  * Test whether @wq's cpu workqueue for @cpu     4561  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
5951  * no synchronization around this function an    4562  * no synchronization around this function and the test result is
5952  * unreliable and only useful as advisory hin    4563  * unreliable and only useful as advisory hints or for debugging.
5953  *                                               4564  *
5954  * If @cpu is WORK_CPU_UNBOUND, the test is p    4565  * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
5955  *                                            !! 4566  * Note that both per-cpu and unbound workqueues may be associated with
5956  * With the exception of ordered workqueues,  !! 4567  * multiple pool_workqueues which have separate congested states.  A
5957  * pool_workqueues, each with its own congest !! 4568  * workqueue being congested on one CPU doesn't mean the workqueue is also
5958  * congested on one CPU doesn't mean that the !! 4569  * contested on other CPUs / NUMA nodes.
5959  * other CPUs.                                << 
5960  *                                               4570  *
5961  * Return:                                       4571  * Return:
5962  * %true if congested, %false otherwise.         4572  * %true if congested, %false otherwise.
5963  */                                              4573  */
5964 bool workqueue_congested(int cpu, struct work    4574 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
5965 {                                                4575 {
5966         struct pool_workqueue *pwq;              4576         struct pool_workqueue *pwq;
5967         bool ret;                                4577         bool ret;
5968                                                  4578 
5969         rcu_read_lock();                         4579         rcu_read_lock();
5970         preempt_disable();                       4580         preempt_disable();
5971                                                  4581 
5972         if (cpu == WORK_CPU_UNBOUND)             4582         if (cpu == WORK_CPU_UNBOUND)
5973                 cpu = smp_processor_id();        4583                 cpu = smp_processor_id();
5974                                                  4584 
5975         pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); !! 4585         if (!(wq->flags & WQ_UNBOUND))
5976         ret = !list_empty(&pwq->inactive_work !! 4586                 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
                                                   >> 4587         else
                                                   >> 4588                 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
5977                                                  4589 
                                                   >> 4590         ret = !list_empty(&pwq->inactive_works);
5978         preempt_enable();                        4591         preempt_enable();
5979         rcu_read_unlock();                       4592         rcu_read_unlock();
5980                                                  4593 
5981         return ret;                              4594         return ret;
5982 }                                                4595 }
5983 EXPORT_SYMBOL_GPL(workqueue_congested);          4596 EXPORT_SYMBOL_GPL(workqueue_congested);
5984                                                  4597 
5985 /**                                              4598 /**
5986  * work_busy - test whether a work is current    4599  * work_busy - test whether a work is currently pending or running
5987  * @work: the work to be tested                  4600  * @work: the work to be tested
5988  *                                               4601  *
5989  * Test whether @work is currently pending or    4602  * Test whether @work is currently pending or running.  There is no
5990  * synchronization around this function and t    4603  * synchronization around this function and the test result is
5991  * unreliable and only useful as advisory hin    4604  * unreliable and only useful as advisory hints or for debugging.
5992  *                                               4605  *
5993  * Return:                                       4606  * Return:
5994  * OR'd bitmask of WORK_BUSY_* bits.             4607  * OR'd bitmask of WORK_BUSY_* bits.
5995  */                                              4608  */
5996 unsigned int work_busy(struct work_struct *wo    4609 unsigned int work_busy(struct work_struct *work)
5997 {                                                4610 {
5998         struct worker_pool *pool;                4611         struct worker_pool *pool;
5999         unsigned long irq_flags;              !! 4612         unsigned long flags;
6000         unsigned int ret = 0;                    4613         unsigned int ret = 0;
6001                                                  4614 
6002         if (work_pending(work))                  4615         if (work_pending(work))
6003                 ret |= WORK_BUSY_PENDING;        4616                 ret |= WORK_BUSY_PENDING;
6004                                                  4617 
6005         rcu_read_lock();                         4618         rcu_read_lock();
6006         pool = get_work_pool(work);              4619         pool = get_work_pool(work);
6007         if (pool) {                              4620         if (pool) {
6008                 raw_spin_lock_irqsave(&pool-> !! 4621                 raw_spin_lock_irqsave(&pool->lock, flags);
6009                 if (find_worker_executing_wor    4622                 if (find_worker_executing_work(pool, work))
6010                         ret |= WORK_BUSY_RUNN    4623                         ret |= WORK_BUSY_RUNNING;
6011                 raw_spin_unlock_irqrestore(&p !! 4624                 raw_spin_unlock_irqrestore(&pool->lock, flags);
6012         }                                        4625         }
6013         rcu_read_unlock();                       4626         rcu_read_unlock();
6014                                                  4627 
6015         return ret;                              4628         return ret;
6016 }                                                4629 }
6017 EXPORT_SYMBOL_GPL(work_busy);                    4630 EXPORT_SYMBOL_GPL(work_busy);
6018                                                  4631 
6019 /**                                              4632 /**
6020  * set_worker_desc - set description for the     4633  * set_worker_desc - set description for the current work item
6021  * @fmt: printf-style format string              4634  * @fmt: printf-style format string
6022  * @...: arguments for the format string         4635  * @...: arguments for the format string
6023  *                                               4636  *
6024  * This function can be called by a running w    4637  * This function can be called by a running work function to describe what
6025  * the work item is about.  If the worker tas    4638  * the work item is about.  If the worker task gets dumped, this
6026  * information will be printed out together t    4639  * information will be printed out together to help debugging.  The
6027  * description can be at most WORKER_DESC_LEN    4640  * description can be at most WORKER_DESC_LEN including the trailing '\0'.
6028  */                                              4641  */
6029 void set_worker_desc(const char *fmt, ...)       4642 void set_worker_desc(const char *fmt, ...)
6030 {                                                4643 {
6031         struct worker *worker = current_wq_wo    4644         struct worker *worker = current_wq_worker();
6032         va_list args;                            4645         va_list args;
6033                                                  4646 
6034         if (worker) {                            4647         if (worker) {
6035                 va_start(args, fmt);             4648                 va_start(args, fmt);
6036                 vsnprintf(worker->desc, sizeo    4649                 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
6037                 va_end(args);                    4650                 va_end(args);
6038         }                                        4651         }
6039 }                                                4652 }
6040 EXPORT_SYMBOL_GPL(set_worker_desc);              4653 EXPORT_SYMBOL_GPL(set_worker_desc);
6041                                                  4654 
6042 /**                                              4655 /**
6043  * print_worker_info - print out worker infor    4656  * print_worker_info - print out worker information and description
6044  * @log_lvl: the log level to use when printi    4657  * @log_lvl: the log level to use when printing
6045  * @task: target task                            4658  * @task: target task
6046  *                                               4659  *
6047  * If @task is a worker and currently executi    4660  * If @task is a worker and currently executing a work item, print out the
6048  * name of the workqueue being serviced and w    4661  * name of the workqueue being serviced and worker description set with
6049  * set_worker_desc() by the currently executi    4662  * set_worker_desc() by the currently executing work item.
6050  *                                               4663  *
6051  * This function can be safely called on any     4664  * This function can be safely called on any task as long as the
6052  * task_struct itself is accessible.  While s    4665  * task_struct itself is accessible.  While safe, this function isn't
6053  * synchronized and may print out mixups or g    4666  * synchronized and may print out mixups or garbages of limited length.
6054  */                                              4667  */
6055 void print_worker_info(const char *log_lvl, s    4668 void print_worker_info(const char *log_lvl, struct task_struct *task)
6056 {                                                4669 {
6057         work_func_t *fn = NULL;                  4670         work_func_t *fn = NULL;
6058         char name[WQ_NAME_LEN] = { };            4671         char name[WQ_NAME_LEN] = { };
6059         char desc[WORKER_DESC_LEN] = { };        4672         char desc[WORKER_DESC_LEN] = { };
6060         struct pool_workqueue *pwq = NULL;       4673         struct pool_workqueue *pwq = NULL;
6061         struct workqueue_struct *wq = NULL;      4674         struct workqueue_struct *wq = NULL;
6062         struct worker *worker;                   4675         struct worker *worker;
6063                                                  4676 
6064         if (!(task->flags & PF_WQ_WORKER))       4677         if (!(task->flags & PF_WQ_WORKER))
6065                 return;                          4678                 return;
6066                                                  4679 
6067         /*                                       4680         /*
6068          * This function is called without an    4681          * This function is called without any synchronization and @task
6069          * could be in any state.  Be careful    4682          * could be in any state.  Be careful with dereferences.
6070          */                                      4683          */
6071         worker = kthread_probe_data(task);       4684         worker = kthread_probe_data(task);
6072                                                  4685 
6073         /*                                       4686         /*
6074          * Carefully copy the associated work    4687          * Carefully copy the associated workqueue's workfn, name and desc.
6075          * Keep the original last '\0' in cas    4688          * Keep the original last '\0' in case the original is garbage.
6076          */                                      4689          */
6077         copy_from_kernel_nofault(&fn, &worker    4690         copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
6078         copy_from_kernel_nofault(&pwq, &worke    4691         copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
6079         copy_from_kernel_nofault(&wq, &pwq->w    4692         copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
6080         copy_from_kernel_nofault(name, wq->na    4693         copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
6081         copy_from_kernel_nofault(desc, worker    4694         copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
6082                                                  4695 
6083         if (fn || name[0] || desc[0]) {          4696         if (fn || name[0] || desc[0]) {
6084                 printk("%sWorkqueue: %s %ps",    4697                 printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
6085                 if (strcmp(name, desc))          4698                 if (strcmp(name, desc))
6086                         pr_cont(" (%s)", desc    4699                         pr_cont(" (%s)", desc);
6087                 pr_cont("\n");                   4700                 pr_cont("\n");
6088         }                                        4701         }
6089 }                                                4702 }
6090                                                  4703 
6091 static void pr_cont_pool_info(struct worker_p    4704 static void pr_cont_pool_info(struct worker_pool *pool)
6092 {                                                4705 {
6093         pr_cont(" cpus=%*pbl", nr_cpumask_bit    4706         pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
6094         if (pool->node != NUMA_NO_NODE)          4707         if (pool->node != NUMA_NO_NODE)
6095                 pr_cont(" node=%d", pool->nod    4708                 pr_cont(" node=%d", pool->node);
6096         pr_cont(" flags=0x%x", pool->flags);  !! 4709         pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
6097         if (pool->flags & POOL_BH)            << 
6098                 pr_cont(" bh%s",              << 
6099                         pool->attrs->nice ==  << 
6100         else                                  << 
6101                 pr_cont(" nice=%d", pool->att << 
6102 }                                             << 
6103                                               << 
6104 static void pr_cont_worker_id(struct worker * << 
6105 {                                             << 
6106         struct worker_pool *pool = worker->po << 
6107                                               << 
6108         if (pool->flags & WQ_BH)              << 
6109                 pr_cont("bh%s",               << 
6110                         pool->attrs->nice ==  << 
6111         else                                  << 
6112                 pr_cont("%d%s", task_pid_nr(w << 
6113                         worker->rescue_wq ? " << 
6114 }                                             << 
6115                                               << 
6116 struct pr_cont_work_struct {                  << 
6117         bool comma;                           << 
6118         work_func_t func;                     << 
6119         long ctr;                             << 
6120 };                                            << 
6121                                               << 
6122 static void pr_cont_work_flush(bool comma, wo << 
6123 {                                             << 
6124         if (!pcwsp->ctr)                      << 
6125                 goto out_record;              << 
6126         if (func == pcwsp->func) {            << 
6127                 pcwsp->ctr++;                 << 
6128                 return;                       << 
6129         }                                     << 
6130         if (pcwsp->ctr == 1)                  << 
6131                 pr_cont("%s %ps", pcwsp->comm << 
6132         else                                  << 
6133                 pr_cont("%s %ld*%ps", pcwsp-> << 
6134         pcwsp->ctr = 0;                       << 
6135 out_record:                                   << 
6136         if ((long)func == -1L)                << 
6137                 return;                       << 
6138         pcwsp->comma = comma;                 << 
6139         pcwsp->func = func;                   << 
6140         pcwsp->ctr = 1;                       << 
6141 }                                                4710 }
6142                                                  4711 
6143 static void pr_cont_work(bool comma, struct w !! 4712 static void pr_cont_work(bool comma, struct work_struct *work)
6144 {                                                4713 {
6145         if (work->func == wq_barrier_func) {     4714         if (work->func == wq_barrier_func) {
6146                 struct wq_barrier *barr;         4715                 struct wq_barrier *barr;
6147                                                  4716 
6148                 barr = container_of(work, str    4717                 barr = container_of(work, struct wq_barrier, work);
6149                                                  4718 
6150                 pr_cont_work_flush(comma, (wo << 
6151                 pr_cont("%s BAR(%d)", comma ?    4719                 pr_cont("%s BAR(%d)", comma ? "," : "",
6152                         task_pid_nr(barr->tas    4720                         task_pid_nr(barr->task));
6153         } else {                                 4721         } else {
6154                 if (!comma)                   !! 4722                 pr_cont("%s %ps", comma ? "," : "", work->func);
6155                         pr_cont_work_flush(co << 
6156                 pr_cont_work_flush(comma, wor << 
6157         }                                        4723         }
6158 }                                                4724 }
6159                                                  4725 
6160 static void show_pwq(struct pool_workqueue *p    4726 static void show_pwq(struct pool_workqueue *pwq)
6161 {                                                4727 {
6162         struct pr_cont_work_struct pcws = { . << 
6163         struct worker_pool *pool = pwq->pool;    4728         struct worker_pool *pool = pwq->pool;
6164         struct work_struct *work;                4729         struct work_struct *work;
6165         struct worker *worker;                   4730         struct worker *worker;
6166         bool has_in_flight = false, has_pendi    4731         bool has_in_flight = false, has_pending = false;
6167         int bkt;                                 4732         int bkt;
6168                                                  4733 
6169         pr_info("  pwq %d:", pool->id);          4734         pr_info("  pwq %d:", pool->id);
6170         pr_cont_pool_info(pool);                 4735         pr_cont_pool_info(pool);
6171                                                  4736 
6172         pr_cont(" active=%d refcnt=%d%s\n",   !! 4737         pr_cont(" active=%d/%d refcnt=%d%s\n",
6173                 pwq->nr_active, pwq->refcnt,  !! 4738                 pwq->nr_active, pwq->max_active, pwq->refcnt,
6174                 !list_empty(&pwq->mayday_node    4739                 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
6175                                                  4740 
6176         hash_for_each(pool->busy_hash, bkt, w    4741         hash_for_each(pool->busy_hash, bkt, worker, hentry) {
6177                 if (worker->current_pwq == pw    4742                 if (worker->current_pwq == pwq) {
6178                         has_in_flight = true;    4743                         has_in_flight = true;
6179                         break;                   4744                         break;
6180                 }                                4745                 }
6181         }                                        4746         }
6182         if (has_in_flight) {                     4747         if (has_in_flight) {
6183                 bool comma = false;              4748                 bool comma = false;
6184                                                  4749 
6185                 pr_info("    in-flight:");       4750                 pr_info("    in-flight:");
6186                 hash_for_each(pool->busy_hash    4751                 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
6187                         if (worker->current_p    4752                         if (worker->current_pwq != pwq)
6188                                 continue;        4753                                 continue;
6189                                                  4754 
6190                         pr_cont(" %s", comma  !! 4755                         pr_cont("%s %d%s:%ps", comma ? "," : "",
6191                         pr_cont_worker_id(wor !! 4756                                 task_pid_nr(worker->task),
6192                         pr_cont(":%ps", worke !! 4757                                 worker->rescue_wq ? "(RESCUER)" : "",
                                                   >> 4758                                 worker->current_func);
6193                         list_for_each_entry(w    4759                         list_for_each_entry(work, &worker->scheduled, entry)
6194                                 pr_cont_work( !! 4760                                 pr_cont_work(false, work);
6195                         pr_cont_work_flush(co << 
6196                         comma = true;            4761                         comma = true;
6197                 }                                4762                 }
6198                 pr_cont("\n");                   4763                 pr_cont("\n");
6199         }                                        4764         }
6200                                                  4765 
6201         list_for_each_entry(work, &pool->work    4766         list_for_each_entry(work, &pool->worklist, entry) {
6202                 if (get_work_pwq(work) == pwq    4767                 if (get_work_pwq(work) == pwq) {
6203                         has_pending = true;      4768                         has_pending = true;
6204                         break;                   4769                         break;
6205                 }                                4770                 }
6206         }                                        4771         }
6207         if (has_pending) {                       4772         if (has_pending) {
6208                 bool comma = false;              4773                 bool comma = false;
6209                                                  4774 
6210                 pr_info("    pending:");         4775                 pr_info("    pending:");
6211                 list_for_each_entry(work, &po    4776                 list_for_each_entry(work, &pool->worklist, entry) {
6212                         if (get_work_pwq(work    4777                         if (get_work_pwq(work) != pwq)
6213                                 continue;        4778                                 continue;
6214                                                  4779 
6215                         pr_cont_work(comma, w !! 4780                         pr_cont_work(comma, work);
6216                         comma = !(*work_data_    4781                         comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
6217                 }                                4782                 }
6218                 pr_cont_work_flush(comma, (wo << 
6219                 pr_cont("\n");                   4783                 pr_cont("\n");
6220         }                                        4784         }
6221                                                  4785 
6222         if (!list_empty(&pwq->inactive_works)    4786         if (!list_empty(&pwq->inactive_works)) {
6223                 bool comma = false;              4787                 bool comma = false;
6224                                                  4788 
6225                 pr_info("    inactive:");        4789                 pr_info("    inactive:");
6226                 list_for_each_entry(work, &pw    4790                 list_for_each_entry(work, &pwq->inactive_works, entry) {
6227                         pr_cont_work(comma, w !! 4791                         pr_cont_work(comma, work);
6228                         comma = !(*work_data_    4792                         comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
6229                 }                                4793                 }
6230                 pr_cont_work_flush(comma, (wo << 
6231                 pr_cont("\n");                   4794                 pr_cont("\n");
6232         }                                        4795         }
6233 }                                                4796 }
6234                                                  4797 
6235 /**                                              4798 /**
6236  * show_one_workqueue - dump state of specifi    4799  * show_one_workqueue - dump state of specified workqueue
6237  * @wq: workqueue whose state will be printed    4800  * @wq: workqueue whose state will be printed
6238  */                                              4801  */
6239 void show_one_workqueue(struct workqueue_stru    4802 void show_one_workqueue(struct workqueue_struct *wq)
6240 {                                                4803 {
6241         struct pool_workqueue *pwq;              4804         struct pool_workqueue *pwq;
6242         bool idle = true;                        4805         bool idle = true;
6243         unsigned long irq_flags;              !! 4806         unsigned long flags;
6244                                                  4807 
6245         for_each_pwq(pwq, wq) {                  4808         for_each_pwq(pwq, wq) {
6246                 if (!pwq_is_empty(pwq)) {     !! 4809                 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
6247                         idle = false;            4810                         idle = false;
6248                         break;                   4811                         break;
6249                 }                                4812                 }
6250         }                                        4813         }
6251         if (idle) /* Nothing to print for idl    4814         if (idle) /* Nothing to print for idle workqueue */
6252                 return;                          4815                 return;
6253                                                  4816 
6254         pr_info("workqueue %s: flags=0x%x\n",    4817         pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
6255                                                  4818 
6256         for_each_pwq(pwq, wq) {                  4819         for_each_pwq(pwq, wq) {
6257                 raw_spin_lock_irqsave(&pwq->p !! 4820                 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
6258                 if (!pwq_is_empty(pwq)) {     !! 4821                 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
6259                         /*                       4822                         /*
6260                          * Defer printing to     4823                          * Defer printing to avoid deadlocks in console
6261                          * drivers that queue    4824                          * drivers that queue work while holding locks
6262                          * also taken in thei    4825                          * also taken in their write paths.
6263                          */                      4826                          */
6264                         printk_deferred_enter    4827                         printk_deferred_enter();
6265                         show_pwq(pwq);           4828                         show_pwq(pwq);
6266                         printk_deferred_exit(    4829                         printk_deferred_exit();
6267                 }                                4830                 }
6268                 raw_spin_unlock_irqrestore(&p !! 4831                 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
6269                 /*                               4832                 /*
6270                  * We could be printing a lot    4833                  * We could be printing a lot from atomic context, e.g.
6271                  * sysrq-t -> show_all_workqu    4834                  * sysrq-t -> show_all_workqueues(). Avoid triggering
6272                  * hard lockup.                  4835                  * hard lockup.
6273                  */                              4836                  */
6274                 touch_nmi_watchdog();            4837                 touch_nmi_watchdog();
6275         }                                        4838         }
6276                                                  4839 
6277 }                                                4840 }
6278                                                  4841 
6279 /**                                              4842 /**
6280  * show_one_worker_pool - dump state of speci    4843  * show_one_worker_pool - dump state of specified worker pool
6281  * @pool: worker pool whose state will be pri    4844  * @pool: worker pool whose state will be printed
6282  */                                              4845  */
6283 static void show_one_worker_pool(struct worke    4846 static void show_one_worker_pool(struct worker_pool *pool)
6284 {                                                4847 {
6285         struct worker *worker;                   4848         struct worker *worker;
6286         bool first = true;                       4849         bool first = true;
6287         unsigned long irq_flags;              !! 4850         unsigned long flags;
6288         unsigned long hung = 0;               << 
6289                                                  4851 
6290         raw_spin_lock_irqsave(&pool->lock, ir !! 4852         raw_spin_lock_irqsave(&pool->lock, flags);
6291         if (pool->nr_workers == pool->nr_idle    4853         if (pool->nr_workers == pool->nr_idle)
6292                 goto next_pool;                  4854                 goto next_pool;
6293                                               << 
6294         /* How long the first pending work is << 
6295         if (!list_empty(&pool->worklist))     << 
6296                 hung = jiffies_to_msecs(jiffi << 
6297                                               << 
6298         /*                                       4855         /*
6299          * Defer printing to avoid deadlocks     4856          * Defer printing to avoid deadlocks in console drivers that
6300          * queue work while holding locks als    4857          * queue work while holding locks also taken in their write
6301          * paths.                                4858          * paths.
6302          */                                      4859          */
6303         printk_deferred_enter();                 4860         printk_deferred_enter();
6304         pr_info("pool %d:", pool->id);           4861         pr_info("pool %d:", pool->id);
6305         pr_cont_pool_info(pool);                 4862         pr_cont_pool_info(pool);
6306         pr_cont(" hung=%lus workers=%d", hung !! 4863         pr_cont(" hung=%us workers=%d",
                                                   >> 4864                 jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
                                                   >> 4865                 pool->nr_workers);
6307         if (pool->manager)                       4866         if (pool->manager)
6308                 pr_cont(" manager: %d",          4867                 pr_cont(" manager: %d",
6309                         task_pid_nr(pool->man    4868                         task_pid_nr(pool->manager->task));
6310         list_for_each_entry(worker, &pool->id    4869         list_for_each_entry(worker, &pool->idle_list, entry) {
6311                 pr_cont(" %s", first ? "idle: !! 4870                 pr_cont(" %s%d", first ? "idle: " : "",
6312                 pr_cont_worker_id(worker);    !! 4871                         task_pid_nr(worker->task));
6313                 first = false;                   4872                 first = false;
6314         }                                        4873         }
6315         pr_cont("\n");                           4874         pr_cont("\n");
6316         printk_deferred_exit();                  4875         printk_deferred_exit();
6317 next_pool:                                       4876 next_pool:
6318         raw_spin_unlock_irqrestore(&pool->loc !! 4877         raw_spin_unlock_irqrestore(&pool->lock, flags);
6319         /*                                       4878         /*
6320          * We could be printing a lot from at    4879          * We could be printing a lot from atomic context, e.g.
6321          * sysrq-t -> show_all_workqueues().     4880          * sysrq-t -> show_all_workqueues(). Avoid triggering
6322          * hard lockup.                          4881          * hard lockup.
6323          */                                      4882          */
6324         touch_nmi_watchdog();                    4883         touch_nmi_watchdog();
6325                                                  4884 
6326 }                                                4885 }
6327                                                  4886 
6328 /**                                              4887 /**
6329  * show_all_workqueues - dump workqueue state    4888  * show_all_workqueues - dump workqueue state
6330  *                                               4889  *
6331  * Called from a sysrq handler and prints out !! 4890  * Called from a sysrq handler or try_to_freeze_tasks() and prints out
                                                   >> 4891  * all busy workqueues and pools.
6332  */                                              4892  */
6333 void show_all_workqueues(void)                   4893 void show_all_workqueues(void)
6334 {                                                4894 {
6335         struct workqueue_struct *wq;             4895         struct workqueue_struct *wq;
6336         struct worker_pool *pool;                4896         struct worker_pool *pool;
6337         int pi;                                  4897         int pi;
6338                                                  4898 
6339         rcu_read_lock();                         4899         rcu_read_lock();
6340                                                  4900 
6341         pr_info("Showing busy workqueues and     4901         pr_info("Showing busy workqueues and worker pools:\n");
6342                                                  4902 
6343         list_for_each_entry_rcu(wq, &workqueu    4903         list_for_each_entry_rcu(wq, &workqueues, list)
6344                 show_one_workqueue(wq);          4904                 show_one_workqueue(wq);
6345                                                  4905 
6346         for_each_pool(pool, pi)                  4906         for_each_pool(pool, pi)
6347                 show_one_worker_pool(pool);      4907                 show_one_worker_pool(pool);
6348                                                  4908 
6349         rcu_read_unlock();                       4909         rcu_read_unlock();
6350 }                                                4910 }
6351                                                  4911 
6352 /**                                           << 
6353  * show_freezable_workqueues - dump freezable << 
6354  *                                            << 
6355  * Called from try_to_freeze_tasks() and prin << 
6356  * still busy.                                << 
6357  */                                           << 
6358 void show_freezable_workqueues(void)          << 
6359 {                                             << 
6360         struct workqueue_struct *wq;          << 
6361                                               << 
6362         rcu_read_lock();                      << 
6363                                               << 
6364         pr_info("Showing freezable workqueues << 
6365                                               << 
6366         list_for_each_entry_rcu(wq, &workqueu << 
6367                 if (!(wq->flags & WQ_FREEZABL << 
6368                         continue;             << 
6369                 show_one_workqueue(wq);       << 
6370         }                                     << 
6371                                               << 
6372         rcu_read_unlock();                    << 
6373 }                                             << 
6374                                               << 
6375 /* used to show worker information through /p    4912 /* used to show worker information through /proc/PID/{comm,stat,status} */
6376 void wq_worker_comm(char *buf, size_t size, s    4913 void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
6377 {                                                4914 {
                                                   >> 4915         int off;
                                                   >> 4916 
                                                   >> 4917         /* always show the actual comm */
                                                   >> 4918         off = strscpy(buf, task->comm, size);
                                                   >> 4919         if (off < 0)
                                                   >> 4920                 return;
                                                   >> 4921 
6378         /* stabilize PF_WQ_WORKER and worker     4922         /* stabilize PF_WQ_WORKER and worker pool association */
6379         mutex_lock(&wq_pool_attach_mutex);       4923         mutex_lock(&wq_pool_attach_mutex);
6380                                                  4924 
6381         if (task->flags & PF_WQ_WORKER) {        4925         if (task->flags & PF_WQ_WORKER) {
6382                 struct worker *worker = kthre    4926                 struct worker *worker = kthread_data(task);
6383                 struct worker_pool *pool = wo    4927                 struct worker_pool *pool = worker->pool;
6384                 int off;                      << 
6385                                               << 
6386                 off = format_worker_id(buf, s << 
6387                                                  4928 
6388                 if (pool) {                      4929                 if (pool) {
6389                         raw_spin_lock_irq(&po    4930                         raw_spin_lock_irq(&pool->lock);
6390                         /*                       4931                         /*
6391                          * ->desc tracks info    4932                          * ->desc tracks information (wq name or
6392                          * set_worker_desc())    4933                          * set_worker_desc()) for the latest execution.  If
6393                          * current, prepend '    4934                          * current, prepend '+', otherwise '-'.
6394                          */                      4935                          */
6395                         if (worker->desc[0] !    4936                         if (worker->desc[0] != '\0') {
6396                                 if (worker->c    4937                                 if (worker->current_work)
6397                                         scnpr    4938                                         scnprintf(buf + off, size - off, "+%s",
6398                                                  4939                                                   worker->desc);
6399                                 else             4940                                 else
6400                                         scnpr    4941                                         scnprintf(buf + off, size - off, "-%s",
6401                                                  4942                                                   worker->desc);
6402                         }                        4943                         }
6403                         raw_spin_unlock_irq(&    4944                         raw_spin_unlock_irq(&pool->lock);
6404                 }                                4945                 }
6405         } else {                              << 
6406                 strscpy(buf, task->comm, size << 
6407         }                                        4946         }
6408                                                  4947 
6409         mutex_unlock(&wq_pool_attach_mutex);     4948         mutex_unlock(&wq_pool_attach_mutex);
6410 }                                                4949 }
6411                                                  4950 
6412 #ifdef CONFIG_SMP                                4951 #ifdef CONFIG_SMP
6413                                                  4952 
6414 /*                                               4953 /*
6415  * CPU hotplug.                                  4954  * CPU hotplug.
6416  *                                               4955  *
6417  * There are two challenges in supporting CPU    4956  * There are two challenges in supporting CPU hotplug.  Firstly, there
6418  * are a lot of assumptions on strong associa    4957  * are a lot of assumptions on strong associations among work, pwq and
6419  * pool which make migrating pending and sche    4958  * pool which make migrating pending and scheduled works very
6420  * difficult to implement without impacting h    4959  * difficult to implement without impacting hot paths.  Secondly,
6421  * worker pools serve mix of short, long and     4960  * worker pools serve mix of short, long and very long running works making
6422  * blocked draining impractical.                 4961  * blocked draining impractical.
6423  *                                               4962  *
6424  * This is solved by allowing the pools to be    4963  * This is solved by allowing the pools to be disassociated from the CPU
6425  * running as an unbound one and allowing it     4964  * running as an unbound one and allowing it to be reattached later if the
6426  * cpu comes back online.                        4965  * cpu comes back online.
6427  */                                              4966  */
6428                                                  4967 
6429 static void unbind_workers(int cpu)              4968 static void unbind_workers(int cpu)
6430 {                                                4969 {
6431         struct worker_pool *pool;                4970         struct worker_pool *pool;
6432         struct worker *worker;                   4971         struct worker *worker;
6433                                                  4972 
6434         for_each_cpu_worker_pool(pool, cpu) {    4973         for_each_cpu_worker_pool(pool, cpu) {
6435                 mutex_lock(&wq_pool_attach_mu    4974                 mutex_lock(&wq_pool_attach_mutex);
6436                 raw_spin_lock_irq(&pool->lock    4975                 raw_spin_lock_irq(&pool->lock);
6437                                                  4976 
6438                 /*                               4977                 /*
6439                  * We've blocked all attach/d    4978                  * We've blocked all attach/detach operations. Make all workers
6440                  * unbound and set DISASSOCIA    4979                  * unbound and set DISASSOCIATED.  Before this, all workers
6441                  * must be on the cpu.  After    4980                  * must be on the cpu.  After this, they may become diasporas.
6442                  * And the preemption disable    4981                  * And the preemption disabled section in their sched callbacks
6443                  * are guaranteed to see WORK    4982                  * are guaranteed to see WORKER_UNBOUND since the code here
6444                  * is on the same cpu.           4983                  * is on the same cpu.
6445                  */                              4984                  */
6446                 for_each_pool_worker(worker,     4985                 for_each_pool_worker(worker, pool)
6447                         worker->flags |= WORK    4986                         worker->flags |= WORKER_UNBOUND;
6448                                                  4987 
6449                 pool->flags |= POOL_DISASSOCI    4988                 pool->flags |= POOL_DISASSOCIATED;
6450                                                  4989 
6451                 /*                               4990                 /*
6452                  * The handling of nr_running    4991                  * The handling of nr_running in sched callbacks are disabled
6453                  * now.  Zap nr_running.  Aft    4992                  * now.  Zap nr_running.  After this, nr_running stays zero and
6454                  * need_more_worker() and kee    4993                  * need_more_worker() and keep_working() are always true as
6455                  * long as the worklist is no    4994                  * long as the worklist is not empty.  This pool now behaves as
6456                  * an unbound (in terms of co    4995                  * an unbound (in terms of concurrency management) pool which
6457                  * are served by workers tied    4996                  * are served by workers tied to the pool.
6458                  */                              4997                  */
6459                 pool->nr_running = 0;            4998                 pool->nr_running = 0;
6460                                                  4999 
6461                 /*                               5000                 /*
6462                  * With concurrency managemen    5001                  * With concurrency management just turned off, a busy
6463                  * worker blocking could lead    5002                  * worker blocking could lead to lengthy stalls.  Kick off
6464                  * unbound chain execution of    5003                  * unbound chain execution of currently pending work items.
6465                  */                              5004                  */
6466                 kick_pool(pool);              !! 5005                 wake_up_worker(pool);
6467                                                  5006 
6468                 raw_spin_unlock_irq(&pool->lo    5007                 raw_spin_unlock_irq(&pool->lock);
6469                                                  5008 
6470                 for_each_pool_worker(worker,  !! 5009                 for_each_pool_worker(worker, pool) {
6471                         unbind_worker(worker) !! 5010                         kthread_set_per_cpu(worker->task, -1);
                                                   >> 5011                         if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
                                                   >> 5012                                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
                                                   >> 5013                         else
                                                   >> 5014                                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
                                                   >> 5015                 }
6472                                                  5016 
6473                 mutex_unlock(&wq_pool_attach_    5017                 mutex_unlock(&wq_pool_attach_mutex);
6474         }                                        5018         }
6475 }                                                5019 }
6476                                                  5020 
6477 /**                                              5021 /**
6478  * rebind_workers - rebind all workers of a p    5022  * rebind_workers - rebind all workers of a pool to the associated CPU
6479  * @pool: pool of interest                       5023  * @pool: pool of interest
6480  *                                               5024  *
6481  * @pool->cpu is coming online.  Rebind all w    5025  * @pool->cpu is coming online.  Rebind all workers to the CPU.
6482  */                                              5026  */
6483 static void rebind_workers(struct worker_pool    5027 static void rebind_workers(struct worker_pool *pool)
6484 {                                                5028 {
6485         struct worker *worker;                   5029         struct worker *worker;
6486                                                  5030 
6487         lockdep_assert_held(&wq_pool_attach_m    5031         lockdep_assert_held(&wq_pool_attach_mutex);
6488                                                  5032 
6489         /*                                       5033         /*
6490          * Restore CPU affinity of all worker    5034          * Restore CPU affinity of all workers.  As all idle workers should
6491          * be on the run-queue of the associa    5035          * be on the run-queue of the associated CPU before any local
6492          * wake-ups for concurrency managemen    5036          * wake-ups for concurrency management happen, restore CPU affinity
6493          * of all workers first and then clea    5037          * of all workers first and then clear UNBOUND.  As we're called
6494          * from CPU_ONLINE, the following sho    5038          * from CPU_ONLINE, the following shouldn't fail.
6495          */                                      5039          */
6496         for_each_pool_worker(worker, pool) {     5040         for_each_pool_worker(worker, pool) {
6497                 kthread_set_per_cpu(worker->t    5041                 kthread_set_per_cpu(worker->task, pool->cpu);
6498                 WARN_ON_ONCE(set_cpus_allowed    5042                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
6499                                               !! 5043                                                   pool->attrs->cpumask) < 0);
6500         }                                        5044         }
6501                                                  5045 
6502         raw_spin_lock_irq(&pool->lock);          5046         raw_spin_lock_irq(&pool->lock);
6503                                                  5047 
6504         pool->flags &= ~POOL_DISASSOCIATED;      5048         pool->flags &= ~POOL_DISASSOCIATED;
6505                                                  5049 
6506         for_each_pool_worker(worker, pool) {     5050         for_each_pool_worker(worker, pool) {
6507                 unsigned int worker_flags = w    5051                 unsigned int worker_flags = worker->flags;
6508                                                  5052 
6509                 /*                               5053                 /*
6510                  * We want to clear UNBOUND b    5054                  * We want to clear UNBOUND but can't directly call
6511                  * worker_clr_flags() or adju    5055                  * worker_clr_flags() or adjust nr_running.  Atomically
6512                  * replace UNBOUND with anoth    5056                  * replace UNBOUND with another NOT_RUNNING flag REBOUND.
6513                  * @worker will clear REBOUND    5057                  * @worker will clear REBOUND using worker_clr_flags() when
6514                  * it initiates the next exec    5058                  * it initiates the next execution cycle thus restoring
6515                  * concurrency management.  N    5059                  * concurrency management.  Note that when or whether
6516                  * @worker clears REBOUND doe    5060                  * @worker clears REBOUND doesn't affect correctness.
6517                  *                               5061                  *
6518                  * WRITE_ONCE() is necessary     5062                  * WRITE_ONCE() is necessary because @worker->flags may be
6519                  * tested without holding any    5063                  * tested without holding any lock in
6520                  * wq_worker_running().  With    5064                  * wq_worker_running().  Without it, NOT_RUNNING test may
6521                  * fail incorrectly leading t    5065                  * fail incorrectly leading to premature concurrency
6522                  * management operations.        5066                  * management operations.
6523                  */                              5067                  */
6524                 WARN_ON_ONCE(!(worker_flags &    5068                 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
6525                 worker_flags |= WORKER_REBOUN    5069                 worker_flags |= WORKER_REBOUND;
6526                 worker_flags &= ~WORKER_UNBOU    5070                 worker_flags &= ~WORKER_UNBOUND;
6527                 WRITE_ONCE(worker->flags, wor    5071                 WRITE_ONCE(worker->flags, worker_flags);
6528         }                                        5072         }
6529                                                  5073 
6530         raw_spin_unlock_irq(&pool->lock);        5074         raw_spin_unlock_irq(&pool->lock);
6531 }                                                5075 }
6532                                                  5076 
6533 /**                                              5077 /**
6534  * restore_unbound_workers_cpumask - restore     5078  * restore_unbound_workers_cpumask - restore cpumask of unbound workers
6535  * @pool: unbound pool of interest               5079  * @pool: unbound pool of interest
6536  * @cpu: the CPU which is coming up              5080  * @cpu: the CPU which is coming up
6537  *                                               5081  *
6538  * An unbound pool may end up with a cpumask     5082  * An unbound pool may end up with a cpumask which doesn't have any online
6539  * CPUs.  When a worker of such pool get sche    5083  * CPUs.  When a worker of such pool get scheduled, the scheduler resets
6540  * its cpus_allowed.  If @cpu is in @pool's c    5084  * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
6541  * online CPU before, cpus_allowed of all its    5085  * online CPU before, cpus_allowed of all its workers should be restored.
6542  */                                              5086  */
6543 static void restore_unbound_workers_cpumask(s    5087 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
6544 {                                                5088 {
6545         static cpumask_t cpumask;                5089         static cpumask_t cpumask;
6546         struct worker *worker;                   5090         struct worker *worker;
6547                                                  5091 
6548         lockdep_assert_held(&wq_pool_attach_m    5092         lockdep_assert_held(&wq_pool_attach_mutex);
6549                                                  5093 
6550         /* is @cpu allowed for @pool? */         5094         /* is @cpu allowed for @pool? */
6551         if (!cpumask_test_cpu(cpu, pool->attr    5095         if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
6552                 return;                          5096                 return;
6553                                                  5097 
6554         cpumask_and(&cpumask, pool->attrs->cp    5098         cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
6555                                                  5099 
6556         /* as we're called from CPU_ONLINE, t    5100         /* as we're called from CPU_ONLINE, the following shouldn't fail */
6557         for_each_pool_worker(worker, pool)       5101         for_each_pool_worker(worker, pool)
6558                 WARN_ON_ONCE(set_cpus_allowed    5102                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
6559 }                                                5103 }
6560                                                  5104 
6561 int workqueue_prepare_cpu(unsigned int cpu)      5105 int workqueue_prepare_cpu(unsigned int cpu)
6562 {                                                5106 {
6563         struct worker_pool *pool;                5107         struct worker_pool *pool;
6564                                                  5108 
6565         for_each_cpu_worker_pool(pool, cpu) {    5109         for_each_cpu_worker_pool(pool, cpu) {
6566                 if (pool->nr_workers)            5110                 if (pool->nr_workers)
6567                         continue;                5111                         continue;
6568                 if (!create_worker(pool))        5112                 if (!create_worker(pool))
6569                         return -ENOMEM;          5113                         return -ENOMEM;
6570         }                                        5114         }
6571         return 0;                                5115         return 0;
6572 }                                                5116 }
6573                                                  5117 
6574 int workqueue_online_cpu(unsigned int cpu)       5118 int workqueue_online_cpu(unsigned int cpu)
6575 {                                                5119 {
6576         struct worker_pool *pool;                5120         struct worker_pool *pool;
6577         struct workqueue_struct *wq;             5121         struct workqueue_struct *wq;
6578         int pi;                                  5122         int pi;
6579                                                  5123 
6580         mutex_lock(&wq_pool_mutex);              5124         mutex_lock(&wq_pool_mutex);
6581                                                  5125 
6582         cpumask_set_cpu(cpu, wq_online_cpumas << 
6583                                               << 
6584         for_each_pool(pool, pi) {                5126         for_each_pool(pool, pi) {
6585                 /* BH pools aren't affected b << 
6586                 if (pool->flags & POOL_BH)    << 
6587                         continue;             << 
6588                                               << 
6589                 mutex_lock(&wq_pool_attach_mu    5127                 mutex_lock(&wq_pool_attach_mutex);
                                                   >> 5128 
6590                 if (pool->cpu == cpu)            5129                 if (pool->cpu == cpu)
6591                         rebind_workers(pool);    5130                         rebind_workers(pool);
6592                 else if (pool->cpu < 0)          5131                 else if (pool->cpu < 0)
6593                         restore_unbound_worke    5132                         restore_unbound_workers_cpumask(pool, cpu);
                                                   >> 5133 
6594                 mutex_unlock(&wq_pool_attach_    5134                 mutex_unlock(&wq_pool_attach_mutex);
6595         }                                        5135         }
6596                                                  5136 
6597         /* update pod affinity of unbound wor !! 5137         /* update NUMA affinity of unbound workqueues */
6598         list_for_each_entry(wq, &workqueues,  !! 5138         list_for_each_entry(wq, &workqueues, list)
6599                 struct workqueue_attrs *attrs !! 5139                 wq_update_unbound_numa(wq, cpu, true);
6600                                               << 
6601                 if (attrs) {                  << 
6602                         const struct wq_pod_t << 
6603                         int tcpu;             << 
6604                                               << 
6605                         for_each_cpu(tcpu, pt << 
6606                                 unbound_wq_up << 
6607                                               << 
6608                         mutex_lock(&wq->mutex << 
6609                         wq_update_node_max_ac << 
6610                         mutex_unlock(&wq->mut << 
6611                 }                             << 
6612         }                                     << 
6613                                                  5140 
6614         mutex_unlock(&wq_pool_mutex);            5141         mutex_unlock(&wq_pool_mutex);
6615         return 0;                                5142         return 0;
6616 }                                                5143 }
6617                                                  5144 
6618 int workqueue_offline_cpu(unsigned int cpu)      5145 int workqueue_offline_cpu(unsigned int cpu)
6619 {                                                5146 {
6620         struct workqueue_struct *wq;             5147         struct workqueue_struct *wq;
6621                                                  5148 
6622         /* unbinding per-cpu workers should h    5149         /* unbinding per-cpu workers should happen on the local CPU */
6623         if (WARN_ON(cpu != smp_processor_id()    5150         if (WARN_ON(cpu != smp_processor_id()))
6624                 return -1;                       5151                 return -1;
6625                                                  5152 
6626         unbind_workers(cpu);                     5153         unbind_workers(cpu);
6627                                                  5154 
6628         /* update pod affinity of unbound wor !! 5155         /* update NUMA affinity of unbound workqueues */
6629         mutex_lock(&wq_pool_mutex);              5156         mutex_lock(&wq_pool_mutex);
6630                                               !! 5157         list_for_each_entry(wq, &workqueues, list)
6631         cpumask_clear_cpu(cpu, wq_online_cpum !! 5158                 wq_update_unbound_numa(wq, cpu, false);
6632                                               << 
6633         list_for_each_entry(wq, &workqueues,  << 
6634                 struct workqueue_attrs *attrs << 
6635                                               << 
6636                 if (attrs) {                  << 
6637                         const struct wq_pod_t << 
6638                         int tcpu;             << 
6639                                               << 
6640                         for_each_cpu(tcpu, pt << 
6641                                 unbound_wq_up << 
6642                                               << 
6643                         mutex_lock(&wq->mutex << 
6644                         wq_update_node_max_ac << 
6645                         mutex_unlock(&wq->mut << 
6646                 }                             << 
6647         }                                     << 
6648         mutex_unlock(&wq_pool_mutex);            5159         mutex_unlock(&wq_pool_mutex);
6649                                                  5160 
6650         return 0;                                5161         return 0;
6651 }                                                5162 }
6652                                                  5163 
6653 struct work_for_cpu {                            5164 struct work_for_cpu {
6654         struct work_struct work;                 5165         struct work_struct work;
6655         long (*fn)(void *);                      5166         long (*fn)(void *);
6656         void *arg;                               5167         void *arg;
6657         long ret;                                5168         long ret;
6658 };                                               5169 };
6659                                                  5170 
6660 static void work_for_cpu_fn(struct work_struc    5171 static void work_for_cpu_fn(struct work_struct *work)
6661 {                                                5172 {
6662         struct work_for_cpu *wfc = container_    5173         struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
6663                                                  5174 
6664         wfc->ret = wfc->fn(wfc->arg);            5175         wfc->ret = wfc->fn(wfc->arg);
6665 }                                                5176 }
6666                                                  5177 
6667 /**                                              5178 /**
6668  * work_on_cpu_key - run a function in thread !! 5179  * work_on_cpu - run a function in thread context on a particular cpu
6669  * @cpu: the cpu to run on                       5180  * @cpu: the cpu to run on
6670  * @fn: the function to run                      5181  * @fn: the function to run
6671  * @arg: the function arg                        5182  * @arg: the function arg
6672  * @key: The lock class key for lock debuggin << 
6673  *                                               5183  *
6674  * It is up to the caller to ensure that the     5184  * It is up to the caller to ensure that the cpu doesn't go offline.
6675  * The caller must not hold any locks which w    5185  * The caller must not hold any locks which would prevent @fn from completing.
6676  *                                               5186  *
6677  * Return: The value @fn returns.                5187  * Return: The value @fn returns.
6678  */                                              5188  */
6679 long work_on_cpu_key(int cpu, long (*fn)(void !! 5189 long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
6680                      void *arg, struct lock_c << 
6681 {                                                5190 {
6682         struct work_for_cpu wfc = { .fn = fn,    5191         struct work_for_cpu wfc = { .fn = fn, .arg = arg };
6683                                                  5192 
6684         INIT_WORK_ONSTACK_KEY(&wfc.work, work !! 5193         INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
6685         schedule_work_on(cpu, &wfc.work);        5194         schedule_work_on(cpu, &wfc.work);
6686         flush_work(&wfc.work);                   5195         flush_work(&wfc.work);
6687         destroy_work_on_stack(&wfc.work);        5196         destroy_work_on_stack(&wfc.work);
6688         return wfc.ret;                          5197         return wfc.ret;
6689 }                                                5198 }
6690 EXPORT_SYMBOL_GPL(work_on_cpu_key);           !! 5199 EXPORT_SYMBOL_GPL(work_on_cpu);
6691                                                  5200 
6692 /**                                              5201 /**
6693  * work_on_cpu_safe_key - run a function in t !! 5202  * work_on_cpu_safe - run a function in thread context on a particular cpu
6694  * @cpu: the cpu to run on                       5203  * @cpu: the cpu to run on
6695  * @fn:  the function to run                     5204  * @fn:  the function to run
6696  * @arg: the function argument                   5205  * @arg: the function argument
6697  * @key: The lock class key for lock debuggin << 
6698  *                                               5206  *
6699  * Disables CPU hotplug and calls work_on_cpu    5207  * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
6700  * any locks which would prevent @fn from com    5208  * any locks which would prevent @fn from completing.
6701  *                                               5209  *
6702  * Return: The value @fn returns.                5210  * Return: The value @fn returns.
6703  */                                              5211  */
6704 long work_on_cpu_safe_key(int cpu, long (*fn) !! 5212 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
6705                           void *arg, struct l << 
6706 {                                                5213 {
6707         long ret = -ENODEV;                      5214         long ret = -ENODEV;
6708                                                  5215 
6709         cpus_read_lock();                        5216         cpus_read_lock();
6710         if (cpu_online(cpu))                     5217         if (cpu_online(cpu))
6711                 ret = work_on_cpu_key(cpu, fn !! 5218                 ret = work_on_cpu(cpu, fn, arg);
6712         cpus_read_unlock();                      5219         cpus_read_unlock();
6713         return ret;                              5220         return ret;
6714 }                                                5221 }
6715 EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);      !! 5222 EXPORT_SYMBOL_GPL(work_on_cpu_safe);
6716 #endif /* CONFIG_SMP */                          5223 #endif /* CONFIG_SMP */
6717                                                  5224 
6718 #ifdef CONFIG_FREEZER                            5225 #ifdef CONFIG_FREEZER
6719                                                  5226 
6720 /**                                              5227 /**
6721  * freeze_workqueues_begin - begin freezing w    5228  * freeze_workqueues_begin - begin freezing workqueues
6722  *                                               5229  *
6723  * Start freezing workqueues.  After this fun    5230  * Start freezing workqueues.  After this function returns, all freezable
6724  * workqueues will queue new works to their i    5231  * workqueues will queue new works to their inactive_works list instead of
6725  * pool->worklist.                               5232  * pool->worklist.
6726  *                                               5233  *
6727  * CONTEXT:                                      5234  * CONTEXT:
6728  * Grabs and releases wq_pool_mutex, wq->mute    5235  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
6729  */                                              5236  */
6730 void freeze_workqueues_begin(void)               5237 void freeze_workqueues_begin(void)
6731 {                                                5238 {
6732         struct workqueue_struct *wq;             5239         struct workqueue_struct *wq;
                                                   >> 5240         struct pool_workqueue *pwq;
6733                                                  5241 
6734         mutex_lock(&wq_pool_mutex);              5242         mutex_lock(&wq_pool_mutex);
6735                                                  5243 
6736         WARN_ON_ONCE(workqueue_freezing);        5244         WARN_ON_ONCE(workqueue_freezing);
6737         workqueue_freezing = true;               5245         workqueue_freezing = true;
6738                                                  5246 
6739         list_for_each_entry(wq, &workqueues,     5247         list_for_each_entry(wq, &workqueues, list) {
6740                 mutex_lock(&wq->mutex);          5248                 mutex_lock(&wq->mutex);
6741                 wq_adjust_max_active(wq);     !! 5249                 for_each_pwq(pwq, wq)
                                                   >> 5250                         pwq_adjust_max_active(pwq);
6742                 mutex_unlock(&wq->mutex);        5251                 mutex_unlock(&wq->mutex);
6743         }                                        5252         }
6744                                                  5253 
6745         mutex_unlock(&wq_pool_mutex);            5254         mutex_unlock(&wq_pool_mutex);
6746 }                                                5255 }
6747                                                  5256 
6748 /**                                              5257 /**
6749  * freeze_workqueues_busy - are freezable wor    5258  * freeze_workqueues_busy - are freezable workqueues still busy?
6750  *                                               5259  *
6751  * Check whether freezing is complete.  This     5260  * Check whether freezing is complete.  This function must be called
6752  * between freeze_workqueues_begin() and thaw    5261  * between freeze_workqueues_begin() and thaw_workqueues().
6753  *                                               5262  *
6754  * CONTEXT:                                      5263  * CONTEXT:
6755  * Grabs and releases wq_pool_mutex.             5264  * Grabs and releases wq_pool_mutex.
6756  *                                               5265  *
6757  * Return:                                       5266  * Return:
6758  * %true if some freezable workqueues are sti    5267  * %true if some freezable workqueues are still busy.  %false if freezing
6759  * is complete.                                  5268  * is complete.
6760  */                                              5269  */
6761 bool freeze_workqueues_busy(void)                5270 bool freeze_workqueues_busy(void)
6762 {                                                5271 {
6763         bool busy = false;                       5272         bool busy = false;
6764         struct workqueue_struct *wq;             5273         struct workqueue_struct *wq;
6765         struct pool_workqueue *pwq;              5274         struct pool_workqueue *pwq;
6766                                                  5275 
6767         mutex_lock(&wq_pool_mutex);              5276         mutex_lock(&wq_pool_mutex);
6768                                                  5277 
6769         WARN_ON_ONCE(!workqueue_freezing);       5278         WARN_ON_ONCE(!workqueue_freezing);
6770                                                  5279 
6771         list_for_each_entry(wq, &workqueues,     5280         list_for_each_entry(wq, &workqueues, list) {
6772                 if (!(wq->flags & WQ_FREEZABL    5281                 if (!(wq->flags & WQ_FREEZABLE))
6773                         continue;                5282                         continue;
6774                 /*                               5283                 /*
6775                  * nr_active is monotonically    5284                  * nr_active is monotonically decreasing.  It's safe
6776                  * to peek without lock.         5285                  * to peek without lock.
6777                  */                              5286                  */
6778                 rcu_read_lock();                 5287                 rcu_read_lock();
6779                 for_each_pwq(pwq, wq) {          5288                 for_each_pwq(pwq, wq) {
6780                         WARN_ON_ONCE(pwq->nr_    5289                         WARN_ON_ONCE(pwq->nr_active < 0);
6781                         if (pwq->nr_active) {    5290                         if (pwq->nr_active) {
6782                                 busy = true;     5291                                 busy = true;
6783                                 rcu_read_unlo    5292                                 rcu_read_unlock();
6784                                 goto out_unlo    5293                                 goto out_unlock;
6785                         }                        5294                         }
6786                 }                                5295                 }
6787                 rcu_read_unlock();               5296                 rcu_read_unlock();
6788         }                                        5297         }
6789 out_unlock:                                      5298 out_unlock:
6790         mutex_unlock(&wq_pool_mutex);            5299         mutex_unlock(&wq_pool_mutex);
6791         return busy;                             5300         return busy;
6792 }                                                5301 }
6793                                                  5302 
6794 /**                                              5303 /**
6795  * thaw_workqueues - thaw workqueues             5304  * thaw_workqueues - thaw workqueues
6796  *                                               5305  *
6797  * Thaw workqueues.  Normal queueing is resto    5306  * Thaw workqueues.  Normal queueing is restored and all collected
6798  * frozen works are transferred to their resp    5307  * frozen works are transferred to their respective pool worklists.
6799  *                                               5308  *
6800  * CONTEXT:                                      5309  * CONTEXT:
6801  * Grabs and releases wq_pool_mutex, wq->mute    5310  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
6802  */                                              5311  */
6803 void thaw_workqueues(void)                       5312 void thaw_workqueues(void)
6804 {                                                5313 {
6805         struct workqueue_struct *wq;             5314         struct workqueue_struct *wq;
                                                   >> 5315         struct pool_workqueue *pwq;
6806                                                  5316 
6807         mutex_lock(&wq_pool_mutex);              5317         mutex_lock(&wq_pool_mutex);
6808                                                  5318 
6809         if (!workqueue_freezing)                 5319         if (!workqueue_freezing)
6810                 goto out_unlock;                 5320                 goto out_unlock;
6811                                                  5321 
6812         workqueue_freezing = false;              5322         workqueue_freezing = false;
6813                                                  5323 
6814         /* restore max_active and repopulate     5324         /* restore max_active and repopulate worklist */
6815         list_for_each_entry(wq, &workqueues,     5325         list_for_each_entry(wq, &workqueues, list) {
6816                 mutex_lock(&wq->mutex);          5326                 mutex_lock(&wq->mutex);
6817                 wq_adjust_max_active(wq);     !! 5327                 for_each_pwq(pwq, wq)
                                                   >> 5328                         pwq_adjust_max_active(pwq);
6818                 mutex_unlock(&wq->mutex);        5329                 mutex_unlock(&wq->mutex);
6819         }                                        5330         }
6820                                                  5331 
6821 out_unlock:                                      5332 out_unlock:
6822         mutex_unlock(&wq_pool_mutex);            5333         mutex_unlock(&wq_pool_mutex);
6823 }                                                5334 }
6824 #endif /* CONFIG_FREEZER */                      5335 #endif /* CONFIG_FREEZER */
6825                                                  5336 
6826 static int workqueue_apply_unbound_cpumask(co !! 5337 static int workqueue_apply_unbound_cpumask(void)
6827 {                                                5338 {
6828         LIST_HEAD(ctxs);                         5339         LIST_HEAD(ctxs);
6829         int ret = 0;                             5340         int ret = 0;
6830         struct workqueue_struct *wq;             5341         struct workqueue_struct *wq;
6831         struct apply_wqattrs_ctx *ctx, *n;       5342         struct apply_wqattrs_ctx *ctx, *n;
6832                                                  5343 
6833         lockdep_assert_held(&wq_pool_mutex);     5344         lockdep_assert_held(&wq_pool_mutex);
6834                                                  5345 
6835         list_for_each_entry(wq, &workqueues,     5346         list_for_each_entry(wq, &workqueues, list) {
6836                 if (!(wq->flags & WQ_UNBOUND) !! 5347                 if (!(wq->flags & WQ_UNBOUND))
                                                   >> 5348                         continue;
                                                   >> 5349                 /* creating multiple pwqs breaks ordering guarantee */
                                                   >> 5350                 if (wq->flags & __WQ_ORDERED)
6837                         continue;                5351                         continue;
6838                                                  5352 
6839                 ctx = apply_wqattrs_prepare(w !! 5353                 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
6840                 if (IS_ERR(ctx)) {            !! 5354                 if (!ctx) {
6841                         ret = PTR_ERR(ctx);   !! 5355                         ret = -ENOMEM;
6842                         break;                   5356                         break;
6843                 }                                5357                 }
6844                                                  5358 
6845                 list_add_tail(&ctx->list, &ct    5359                 list_add_tail(&ctx->list, &ctxs);
6846         }                                        5360         }
6847                                                  5361 
6848         list_for_each_entry_safe(ctx, n, &ctx    5362         list_for_each_entry_safe(ctx, n, &ctxs, list) {
6849                 if (!ret)                        5363                 if (!ret)
6850                         apply_wqattrs_commit(    5364                         apply_wqattrs_commit(ctx);
6851                 apply_wqattrs_cleanup(ctx);      5365                 apply_wqattrs_cleanup(ctx);
6852         }                                        5366         }
6853                                                  5367 
6854         if (!ret) {                           << 
6855                 mutex_lock(&wq_pool_attach_mu << 
6856                 cpumask_copy(wq_unbound_cpuma << 
6857                 mutex_unlock(&wq_pool_attach_ << 
6858         }                                     << 
6859         return ret;                              5368         return ret;
6860 }                                                5369 }
6861                                                  5370 
6862 /**                                              5371 /**
6863  * workqueue_unbound_exclude_cpumask - Exclud !! 5372  *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
6864  * @exclude_cpumask: the cpumask to be exclud !! 5373  *  @cpumask: the cpumask to set
6865  *                                               5374  *
6866  * This function can be called from cpuset co !! 5375  *  The low-level workqueues cpumask is a global cpumask that limits
6867  * CPUs that should be excluded from wq_unbou !! 5376  *  the affinity of all unbound workqueues.  This function check the @cpumask
                                                   >> 5377  *  and apply it to all unbound workqueues and updates all pwqs of them.
                                                   >> 5378  *
                                                   >> 5379  *  Return:     0       - Success
                                                   >> 5380  *              -EINVAL - Invalid @cpumask
                                                   >> 5381  *              -ENOMEM - Failed to allocate memory for attrs or pwqs.
6868  */                                              5382  */
6869 int workqueue_unbound_exclude_cpumask(cpumask !! 5383 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
6870 {                                                5384 {
6871         cpumask_var_t cpumask;                !! 5385         int ret = -EINVAL;
6872         int ret = 0;                          !! 5386         cpumask_var_t saved_cpumask;
6873                                               << 
6874         if (!zalloc_cpumask_var(&cpumask, GFP << 
6875                 return -ENOMEM;               << 
6876                                               << 
6877         mutex_lock(&wq_pool_mutex);           << 
6878                                                  5387 
6879         /*                                       5388         /*
6880          * If the operation fails, it will fa !! 5389          * Not excluding isolated cpus on purpose.
6881          * wq_requested_unbound_cpumask which !! 5390          * If the user wishes to include them, we allow that.
6882          * (HK_TYPE_WQ ∩ HK_TYPE_DOMAIN) ho !! 5391          */
6883          * by any subsequent write to workque !! 5392         cpumask_and(cpumask, cpumask, cpu_possible_mask);
6884          */                                   !! 5393         if (!cpumask_empty(cpumask)) {
6885         if (!cpumask_andnot(cpumask, wq_reque !! 5394                 apply_wqattrs_lock();
6886                 cpumask_copy(cpumask, wq_requ !! 5395                 if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
6887         if (!cpumask_equal(cpumask, wq_unboun !! 5396                         ret = 0;
6888                 ret = workqueue_apply_unbound !! 5397                         goto out_unlock;
6889                                               !! 5398                 }
6890         /* Save the current isolated cpumask  << 
6891         if (!ret)                             << 
6892                 cpumask_copy(wq_isolated_cpum << 
6893                                               << 
6894         mutex_unlock(&wq_pool_mutex);         << 
6895         free_cpumask_var(cpumask);            << 
6896         return ret;                           << 
6897 }                                             << 
6898                                               << 
6899 static int parse_affn_scope(const char *val)  << 
6900 {                                             << 
6901         int i;                                << 
6902                                               << 
6903         for (i = 0; i < ARRAY_SIZE(wq_affn_na << 
6904                 if (!strncasecmp(val, wq_affn << 
6905                         return i;             << 
6906         }                                     << 
6907         return -EINVAL;                       << 
6908 }                                             << 
6909                                                  5399 
6910 static int wq_affn_dfl_set(const char *val, c !! 5400                 if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) {
6911 {                                             !! 5401                         ret = -ENOMEM;
6912         struct workqueue_struct *wq;          !! 5402                         goto out_unlock;
6913         int affn, cpu;                        !! 5403                 }
6914                                                  5404 
6915         affn = parse_affn_scope(val);         !! 5405                 /* save the old wq_unbound_cpumask. */
6916         if (affn < 0)                         !! 5406                 cpumask_copy(saved_cpumask, wq_unbound_cpumask);
6917                 return affn;                  << 
6918         if (affn == WQ_AFFN_DFL)              << 
6919                 return -EINVAL;               << 
6920                                                  5407 
6921         cpus_read_lock();                     !! 5408                 /* update wq_unbound_cpumask at first and apply it to wqs. */
6922         mutex_lock(&wq_pool_mutex);           !! 5409                 cpumask_copy(wq_unbound_cpumask, cpumask);
                                                   >> 5410                 ret = workqueue_apply_unbound_cpumask();
6923                                                  5411 
6924         wq_affn_dfl = affn;                   !! 5412                 /* restore the wq_unbound_cpumask when failed. */
                                                   >> 5413                 if (ret < 0)
                                                   >> 5414                         cpumask_copy(wq_unbound_cpumask, saved_cpumask);
6925                                                  5415 
6926         list_for_each_entry(wq, &workqueues,  !! 5416                 free_cpumask_var(saved_cpumask);
6927                 for_each_online_cpu(cpu)      !! 5417 out_unlock:
6928                         unbound_wq_update_pwq !! 5418                 apply_wqattrs_unlock();
6929         }                                        5419         }
6930                                                  5420 
6931         mutex_unlock(&wq_pool_mutex);         !! 5421         return ret;
6932         cpus_read_unlock();                   << 
6933                                               << 
6934         return 0;                             << 
6935 }                                             << 
6936                                               << 
6937 static int wq_affn_dfl_get(char *buffer, cons << 
6938 {                                             << 
6939         return scnprintf(buffer, PAGE_SIZE, " << 
6940 }                                                5422 }
6941                                                  5423 
6942 static const struct kernel_param_ops wq_affn_ << 
6943         .set    = wq_affn_dfl_set,            << 
6944         .get    = wq_affn_dfl_get,            << 
6945 };                                            << 
6946                                               << 
6947 module_param_cb(default_affinity_scope, &wq_a << 
6948                                               << 
6949 #ifdef CONFIG_SYSFS                              5424 #ifdef CONFIG_SYSFS
6950 /*                                               5425 /*
6951  * Workqueues with WQ_SYSFS flag set is visib    5426  * Workqueues with WQ_SYSFS flag set is visible to userland via
6952  * /sys/bus/workqueue/devices/WQ_NAME.  All v    5427  * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
6953  * following attributes.                         5428  * following attributes.
6954  *                                               5429  *
6955  *  per_cpu             RO bool : whether the !! 5430  *  per_cpu     RO bool : whether the workqueue is per-cpu or unbound
6956  *  max_active          RW int  : maximum num !! 5431  *  max_active  RW int  : maximum number of in-flight work items
6957  *                                               5432  *
6958  * Unbound workqueues have the following extr    5433  * Unbound workqueues have the following extra attributes.
6959  *                                               5434  *
6960  *  nice                RW int  : nice value  !! 5435  *  pool_ids    RO int  : the associated pool IDs for each node
6961  *  cpumask             RW mask : bitmask of  !! 5436  *  nice        RW int  : nice value of the workers
6962  *  affinity_scope      RW str  : worker CPU  !! 5437  *  cpumask     RW mask : bitmask of allowed CPUs for the workers
6963  *  affinity_strict     RW bool : worker CPU  !! 5438  *  numa        RW bool : whether enable NUMA affinity
6964  */                                              5439  */
6965 struct wq_device {                               5440 struct wq_device {
6966         struct workqueue_struct         *wq;     5441         struct workqueue_struct         *wq;
6967         struct device                   dev;     5442         struct device                   dev;
6968 };                                               5443 };
6969                                                  5444 
6970 static struct workqueue_struct *dev_to_wq(str    5445 static struct workqueue_struct *dev_to_wq(struct device *dev)
6971 {                                                5446 {
6972         struct wq_device *wq_dev = container_    5447         struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
6973                                                  5448 
6974         return wq_dev->wq;                       5449         return wq_dev->wq;
6975 }                                                5450 }
6976                                                  5451 
6977 static ssize_t per_cpu_show(struct device *de    5452 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
6978                             char *buf)           5453                             char *buf)
6979 {                                                5454 {
6980         struct workqueue_struct *wq = dev_to_    5455         struct workqueue_struct *wq = dev_to_wq(dev);
6981                                                  5456 
6982         return scnprintf(buf, PAGE_SIZE, "%d\    5457         return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
6983 }                                                5458 }
6984 static DEVICE_ATTR_RO(per_cpu);                  5459 static DEVICE_ATTR_RO(per_cpu);
6985                                                  5460 
6986 static ssize_t max_active_show(struct device     5461 static ssize_t max_active_show(struct device *dev,
6987                                struct device_    5462                                struct device_attribute *attr, char *buf)
6988 {                                                5463 {
6989         struct workqueue_struct *wq = dev_to_    5464         struct workqueue_struct *wq = dev_to_wq(dev);
6990                                                  5465 
6991         return scnprintf(buf, PAGE_SIZE, "%d\    5466         return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
6992 }                                                5467 }
6993                                                  5468 
6994 static ssize_t max_active_store(struct device    5469 static ssize_t max_active_store(struct device *dev,
6995                                 struct device    5470                                 struct device_attribute *attr, const char *buf,
6996                                 size_t count)    5471                                 size_t count)
6997 {                                                5472 {
6998         struct workqueue_struct *wq = dev_to_    5473         struct workqueue_struct *wq = dev_to_wq(dev);
6999         int val;                                 5474         int val;
7000                                                  5475 
7001         if (sscanf(buf, "%d", &val) != 1 || v    5476         if (sscanf(buf, "%d", &val) != 1 || val <= 0)
7002                 return -EINVAL;                  5477                 return -EINVAL;
7003                                                  5478 
7004         workqueue_set_max_active(wq, val);       5479         workqueue_set_max_active(wq, val);
7005         return count;                            5480         return count;
7006 }                                                5481 }
7007 static DEVICE_ATTR_RW(max_active);               5482 static DEVICE_ATTR_RW(max_active);
7008                                                  5483 
7009 static struct attribute *wq_sysfs_attrs[] = {    5484 static struct attribute *wq_sysfs_attrs[] = {
7010         &dev_attr_per_cpu.attr,                  5485         &dev_attr_per_cpu.attr,
7011         &dev_attr_max_active.attr,               5486         &dev_attr_max_active.attr,
7012         NULL,                                    5487         NULL,
7013 };                                               5488 };
7014 ATTRIBUTE_GROUPS(wq_sysfs);                      5489 ATTRIBUTE_GROUPS(wq_sysfs);
7015                                                  5490 
                                                   >> 5491 static ssize_t wq_pool_ids_show(struct device *dev,
                                                   >> 5492                                 struct device_attribute *attr, char *buf)
                                                   >> 5493 {
                                                   >> 5494         struct workqueue_struct *wq = dev_to_wq(dev);
                                                   >> 5495         const char *delim = "";
                                                   >> 5496         int node, written = 0;
                                                   >> 5497 
                                                   >> 5498         cpus_read_lock();
                                                   >> 5499         rcu_read_lock();
                                                   >> 5500         for_each_node(node) {
                                                   >> 5501                 written += scnprintf(buf + written, PAGE_SIZE - written,
                                                   >> 5502                                      "%s%d:%d", delim, node,
                                                   >> 5503                                      unbound_pwq_by_node(wq, node)->pool->id);
                                                   >> 5504                 delim = " ";
                                                   >> 5505         }
                                                   >> 5506         written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
                                                   >> 5507         rcu_read_unlock();
                                                   >> 5508         cpus_read_unlock();
                                                   >> 5509 
                                                   >> 5510         return written;
                                                   >> 5511 }
                                                   >> 5512 
7016 static ssize_t wq_nice_show(struct device *de    5513 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
7017                             char *buf)           5514                             char *buf)
7018 {                                                5515 {
7019         struct workqueue_struct *wq = dev_to_    5516         struct workqueue_struct *wq = dev_to_wq(dev);
7020         int written;                             5517         int written;
7021                                                  5518 
7022         mutex_lock(&wq->mutex);                  5519         mutex_lock(&wq->mutex);
7023         written = scnprintf(buf, PAGE_SIZE, "    5520         written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
7024         mutex_unlock(&wq->mutex);                5521         mutex_unlock(&wq->mutex);
7025                                                  5522 
7026         return written;                          5523         return written;
7027 }                                                5524 }
7028                                                  5525 
7029 /* prepare workqueue_attrs for sysfs store op    5526 /* prepare workqueue_attrs for sysfs store operations */
7030 static struct workqueue_attrs *wq_sysfs_prep_    5527 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
7031 {                                                5528 {
7032         struct workqueue_attrs *attrs;           5529         struct workqueue_attrs *attrs;
7033                                                  5530 
7034         lockdep_assert_held(&wq_pool_mutex);     5531         lockdep_assert_held(&wq_pool_mutex);
7035                                                  5532 
7036         attrs = alloc_workqueue_attrs();         5533         attrs = alloc_workqueue_attrs();
7037         if (!attrs)                              5534         if (!attrs)
7038                 return NULL;                     5535                 return NULL;
7039                                                  5536 
7040         copy_workqueue_attrs(attrs, wq->unbou    5537         copy_workqueue_attrs(attrs, wq->unbound_attrs);
7041         return attrs;                            5538         return attrs;
7042 }                                                5539 }
7043                                                  5540 
7044 static ssize_t wq_nice_store(struct device *d    5541 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
7045                              const char *buf,    5542                              const char *buf, size_t count)
7046 {                                                5543 {
7047         struct workqueue_struct *wq = dev_to_    5544         struct workqueue_struct *wq = dev_to_wq(dev);
7048         struct workqueue_attrs *attrs;           5545         struct workqueue_attrs *attrs;
7049         int ret = -ENOMEM;                       5546         int ret = -ENOMEM;
7050                                                  5547 
7051         apply_wqattrs_lock();                    5548         apply_wqattrs_lock();
7052                                                  5549 
7053         attrs = wq_sysfs_prep_attrs(wq);         5550         attrs = wq_sysfs_prep_attrs(wq);
7054         if (!attrs)                              5551         if (!attrs)
7055                 goto out_unlock;                 5552                 goto out_unlock;
7056                                                  5553 
7057         if (sscanf(buf, "%d", &attrs->nice) =    5554         if (sscanf(buf, "%d", &attrs->nice) == 1 &&
7058             attrs->nice >= MIN_NICE && attrs-    5555             attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
7059                 ret = apply_workqueue_attrs_l    5556                 ret = apply_workqueue_attrs_locked(wq, attrs);
7060         else                                     5557         else
7061                 ret = -EINVAL;                   5558                 ret = -EINVAL;
7062                                                  5559 
7063 out_unlock:                                      5560 out_unlock:
7064         apply_wqattrs_unlock();                  5561         apply_wqattrs_unlock();
7065         free_workqueue_attrs(attrs);             5562         free_workqueue_attrs(attrs);
7066         return ret ?: count;                     5563         return ret ?: count;
7067 }                                                5564 }
7068                                                  5565 
7069 static ssize_t wq_cpumask_show(struct device     5566 static ssize_t wq_cpumask_show(struct device *dev,
7070                                struct device_    5567                                struct device_attribute *attr, char *buf)
7071 {                                                5568 {
7072         struct workqueue_struct *wq = dev_to_    5569         struct workqueue_struct *wq = dev_to_wq(dev);
7073         int written;                             5570         int written;
7074                                                  5571 
7075         mutex_lock(&wq->mutex);                  5572         mutex_lock(&wq->mutex);
7076         written = scnprintf(buf, PAGE_SIZE, "    5573         written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
7077                             cpumask_pr_args(w    5574                             cpumask_pr_args(wq->unbound_attrs->cpumask));
7078         mutex_unlock(&wq->mutex);                5575         mutex_unlock(&wq->mutex);
7079         return written;                          5576         return written;
7080 }                                                5577 }
7081                                                  5578 
7082 static ssize_t wq_cpumask_store(struct device    5579 static ssize_t wq_cpumask_store(struct device *dev,
7083                                 struct device    5580                                 struct device_attribute *attr,
7084                                 const char *b    5581                                 const char *buf, size_t count)
7085 {                                                5582 {
7086         struct workqueue_struct *wq = dev_to_    5583         struct workqueue_struct *wq = dev_to_wq(dev);
7087         struct workqueue_attrs *attrs;           5584         struct workqueue_attrs *attrs;
7088         int ret = -ENOMEM;                       5585         int ret = -ENOMEM;
7089                                                  5586 
7090         apply_wqattrs_lock();                    5587         apply_wqattrs_lock();
7091                                                  5588 
7092         attrs = wq_sysfs_prep_attrs(wq);         5589         attrs = wq_sysfs_prep_attrs(wq);
7093         if (!attrs)                              5590         if (!attrs)
7094                 goto out_unlock;                 5591                 goto out_unlock;
7095                                                  5592 
7096         ret = cpumask_parse(buf, attrs->cpuma    5593         ret = cpumask_parse(buf, attrs->cpumask);
7097         if (!ret)                                5594         if (!ret)
7098                 ret = apply_workqueue_attrs_l    5595                 ret = apply_workqueue_attrs_locked(wq, attrs);
7099                                                  5596 
7100 out_unlock:                                      5597 out_unlock:
7101         apply_wqattrs_unlock();                  5598         apply_wqattrs_unlock();
7102         free_workqueue_attrs(attrs);             5599         free_workqueue_attrs(attrs);
7103         return ret ?: count;                     5600         return ret ?: count;
7104 }                                                5601 }
7105                                                  5602 
7106 static ssize_t wq_affn_scope_show(struct devi !! 5603 static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
7107                                   struct devi !! 5604                             char *buf)
7108 {                                                5605 {
7109         struct workqueue_struct *wq = dev_to_    5606         struct workqueue_struct *wq = dev_to_wq(dev);
7110         int written;                             5607         int written;
7111                                                  5608 
7112         mutex_lock(&wq->mutex);                  5609         mutex_lock(&wq->mutex);
7113         if (wq->unbound_attrs->affn_scope ==  !! 5610         written = scnprintf(buf, PAGE_SIZE, "%d\n",
7114                 written = scnprintf(buf, PAGE !! 5611                             !wq->unbound_attrs->no_numa);
7115                                     wq_affn_n << 
7116                                     wq_affn_n << 
7117         else                                  << 
7118                 written = scnprintf(buf, PAGE << 
7119                                     wq_affn_n << 
7120         mutex_unlock(&wq->mutex);                5612         mutex_unlock(&wq->mutex);
7121                                                  5613 
7122         return written;                          5614         return written;
7123 }                                                5615 }
7124                                                  5616 
7125 static ssize_t wq_affn_scope_store(struct dev !! 5617 static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
7126                                    struct dev !! 5618                              const char *buf, size_t count)
7127                                    const char << 
7128 {                                             << 
7129         struct workqueue_struct *wq = dev_to_ << 
7130         struct workqueue_attrs *attrs;        << 
7131         int affn, ret = -ENOMEM;              << 
7132                                               << 
7133         affn = parse_affn_scope(buf);         << 
7134         if (affn < 0)                         << 
7135                 return affn;                  << 
7136                                               << 
7137         apply_wqattrs_lock();                 << 
7138         attrs = wq_sysfs_prep_attrs(wq);      << 
7139         if (attrs) {                          << 
7140                 attrs->affn_scope = affn;     << 
7141                 ret = apply_workqueue_attrs_l << 
7142         }                                     << 
7143         apply_wqattrs_unlock();               << 
7144         free_workqueue_attrs(attrs);          << 
7145         return ret ?: count;                  << 
7146 }                                             << 
7147                                               << 
7148 static ssize_t wq_affinity_strict_show(struct << 
7149                                        struct << 
7150 {                                             << 
7151         struct workqueue_struct *wq = dev_to_ << 
7152                                               << 
7153         return scnprintf(buf, PAGE_SIZE, "%d\ << 
7154                          wq->unbound_attrs->a << 
7155 }                                             << 
7156                                               << 
7157 static ssize_t wq_affinity_strict_store(struc << 
7158                                         struc << 
7159                                         const << 
7160 {                                                5619 {
7161         struct workqueue_struct *wq = dev_to_    5620         struct workqueue_struct *wq = dev_to_wq(dev);
7162         struct workqueue_attrs *attrs;           5621         struct workqueue_attrs *attrs;
7163         int v, ret = -ENOMEM;                    5622         int v, ret = -ENOMEM;
7164                                                  5623 
7165         if (sscanf(buf, "%d", &v) != 1)       << 
7166                 return -EINVAL;               << 
7167                                               << 
7168         apply_wqattrs_lock();                    5624         apply_wqattrs_lock();
                                                   >> 5625 
7169         attrs = wq_sysfs_prep_attrs(wq);         5626         attrs = wq_sysfs_prep_attrs(wq);
7170         if (attrs) {                          !! 5627         if (!attrs)
7171                 attrs->affn_strict = (bool)v; !! 5628                 goto out_unlock;
                                                   >> 5629 
                                                   >> 5630         ret = -EINVAL;
                                                   >> 5631         if (sscanf(buf, "%d", &v) == 1) {
                                                   >> 5632                 attrs->no_numa = !v;
7172                 ret = apply_workqueue_attrs_l    5633                 ret = apply_workqueue_attrs_locked(wq, attrs);
7173         }                                        5634         }
                                                   >> 5635 
                                                   >> 5636 out_unlock:
7174         apply_wqattrs_unlock();                  5637         apply_wqattrs_unlock();
7175         free_workqueue_attrs(attrs);             5638         free_workqueue_attrs(attrs);
7176         return ret ?: count;                     5639         return ret ?: count;
7177 }                                                5640 }
7178                                                  5641 
7179 static struct device_attribute wq_sysfs_unbou    5642 static struct device_attribute wq_sysfs_unbound_attrs[] = {
                                                   >> 5643         __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
7180         __ATTR(nice, 0644, wq_nice_show, wq_n    5644         __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
7181         __ATTR(cpumask, 0644, wq_cpumask_show    5645         __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
7182         __ATTR(affinity_scope, 0644, wq_affn_ !! 5646         __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
7183         __ATTR(affinity_strict, 0644, wq_affi << 
7184         __ATTR_NULL,                             5647         __ATTR_NULL,
7185 };                                               5648 };
7186                                                  5649 
7187 static const struct bus_type wq_subsys = {    !! 5650 static struct bus_type wq_subsys = {
7188         .name                           = "wo    5651         .name                           = "workqueue",
7189         .dev_groups                     = wq_    5652         .dev_groups                     = wq_sysfs_groups,
7190 };                                               5653 };
7191                                                  5654 
7192 /**                                           !! 5655 static ssize_t wq_unbound_cpumask_show(struct device *dev,
7193  *  workqueue_set_unbound_cpumask - Set the l !! 5656                 struct device_attribute *attr, char *buf)
7194  *  @cpumask: the cpumask to set              << 
7195  *                                            << 
7196  *  The low-level workqueues cpumask is a glo << 
7197  *  the affinity of all unbound workqueues.   << 
7198  *  and apply it to all unbound workqueues an << 
7199  *                                            << 
7200  *  Return:     0       - Success             << 
7201  *              -EINVAL - Invalid @cpumask    << 
7202  *              -ENOMEM - Failed to allocate  << 
7203  */                                           << 
7204 static int workqueue_set_unbound_cpumask(cpum << 
7205 {                                             << 
7206         int ret = -EINVAL;                    << 
7207                                               << 
7208         /*                                    << 
7209          * Not excluding isolated cpus on pur << 
7210          * If the user wishes to include them << 
7211          */                                   << 
7212         cpumask_and(cpumask, cpumask, cpu_pos << 
7213         if (!cpumask_empty(cpumask)) {        << 
7214                 ret = 0;                      << 
7215                 apply_wqattrs_lock();         << 
7216                 if (!cpumask_equal(cpumask, w << 
7217                         ret = workqueue_apply << 
7218                 if (!ret)                     << 
7219                         cpumask_copy(wq_reque << 
7220                 apply_wqattrs_unlock();       << 
7221         }                                     << 
7222                                               << 
7223         return ret;                           << 
7224 }                                             << 
7225                                               << 
7226 static ssize_t __wq_cpumask_show(struct devic << 
7227                 struct device_attribute *attr << 
7228 {                                                5657 {
7229         int written;                             5658         int written;
7230                                                  5659 
7231         mutex_lock(&wq_pool_mutex);              5660         mutex_lock(&wq_pool_mutex);
7232         written = scnprintf(buf, PAGE_SIZE, " !! 5661         written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
                                                   >> 5662                             cpumask_pr_args(wq_unbound_cpumask));
7233         mutex_unlock(&wq_pool_mutex);            5663         mutex_unlock(&wq_pool_mutex);
7234                                                  5664 
7235         return written;                          5665         return written;
7236 }                                                5666 }
7237                                                  5667 
7238 static ssize_t cpumask_requested_show(struct  !! 5668 static ssize_t wq_unbound_cpumask_store(struct device *dev,
7239                 struct device_attribute *attr << 
7240 {                                             << 
7241         return __wq_cpumask_show(dev, attr, b << 
7242 }                                             << 
7243 static DEVICE_ATTR_RO(cpumask_requested);     << 
7244                                               << 
7245 static ssize_t cpumask_isolated_show(struct d << 
7246                 struct device_attribute *attr << 
7247 {                                             << 
7248         return __wq_cpumask_show(dev, attr, b << 
7249 }                                             << 
7250 static DEVICE_ATTR_RO(cpumask_isolated);      << 
7251                                               << 
7252 static ssize_t cpumask_show(struct device *de << 
7253                 struct device_attribute *attr << 
7254 {                                             << 
7255         return __wq_cpumask_show(dev, attr, b << 
7256 }                                             << 
7257                                               << 
7258 static ssize_t cpumask_store(struct device *d << 
7259                 struct device_attribute *attr    5669                 struct device_attribute *attr, const char *buf, size_t count)
7260 {                                                5670 {
7261         cpumask_var_t cpumask;                   5671         cpumask_var_t cpumask;
7262         int ret;                                 5672         int ret;
7263                                                  5673 
7264         if (!zalloc_cpumask_var(&cpumask, GFP    5674         if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
7265                 return -ENOMEM;                  5675                 return -ENOMEM;
7266                                                  5676 
7267         ret = cpumask_parse(buf, cpumask);       5677         ret = cpumask_parse(buf, cpumask);
7268         if (!ret)                                5678         if (!ret)
7269                 ret = workqueue_set_unbound_c    5679                 ret = workqueue_set_unbound_cpumask(cpumask);
7270                                                  5680 
7271         free_cpumask_var(cpumask);               5681         free_cpumask_var(cpumask);
7272         return ret ? ret : count;                5682         return ret ? ret : count;
7273 }                                                5683 }
7274 static DEVICE_ATTR_RW(cpumask);               << 
7275                                                  5684 
7276 static struct attribute *wq_sysfs_cpumask_att !! 5685 static struct device_attribute wq_sysfs_cpumask_attr =
7277         &dev_attr_cpumask.attr,               !! 5686         __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
7278         &dev_attr_cpumask_requested.attr,     !! 5687                wq_unbound_cpumask_store);
7279         &dev_attr_cpumask_isolated.attr,      << 
7280         NULL,                                 << 
7281 };                                            << 
7282 ATTRIBUTE_GROUPS(wq_sysfs_cpumask);           << 
7283                                                  5688 
7284 static int __init wq_sysfs_init(void)            5689 static int __init wq_sysfs_init(void)
7285 {                                                5690 {
7286         return subsys_virtual_register(&wq_su !! 5691         int err;
                                                   >> 5692 
                                                   >> 5693         err = subsys_virtual_register(&wq_subsys, NULL);
                                                   >> 5694         if (err)
                                                   >> 5695                 return err;
                                                   >> 5696 
                                                   >> 5697         return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
7287 }                                                5698 }
7288 core_initcall(wq_sysfs_init);                    5699 core_initcall(wq_sysfs_init);
7289                                                  5700 
7290 static void wq_device_release(struct device *    5701 static void wq_device_release(struct device *dev)
7291 {                                                5702 {
7292         struct wq_device *wq_dev = container_    5703         struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
7293                                                  5704 
7294         kfree(wq_dev);                           5705         kfree(wq_dev);
7295 }                                                5706 }
7296                                                  5707 
7297 /**                                              5708 /**
7298  * workqueue_sysfs_register - make a workqueu    5709  * workqueue_sysfs_register - make a workqueue visible in sysfs
7299  * @wq: the workqueue to register                5710  * @wq: the workqueue to register
7300  *                                               5711  *
7301  * Expose @wq in sysfs under /sys/bus/workque    5712  * Expose @wq in sysfs under /sys/bus/workqueue/devices.
7302  * alloc_workqueue*() automatically calls thi    5713  * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
7303  * which is the preferred method.                5714  * which is the preferred method.
7304  *                                               5715  *
7305  * Workqueue user should use this function di    5716  * Workqueue user should use this function directly iff it wants to apply
7306  * workqueue_attrs before making the workqueu    5717  * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
7307  * apply_workqueue_attrs() may race against u    5718  * apply_workqueue_attrs() may race against userland updating the
7308  * attributes.                                   5719  * attributes.
7309  *                                               5720  *
7310  * Return: 0 on success, -errno on failure.      5721  * Return: 0 on success, -errno on failure.
7311  */                                              5722  */
7312 int workqueue_sysfs_register(struct workqueue    5723 int workqueue_sysfs_register(struct workqueue_struct *wq)
7313 {                                                5724 {
7314         struct wq_device *wq_dev;                5725         struct wq_device *wq_dev;
7315         int ret;                                 5726         int ret;
7316                                                  5727 
7317         /*                                       5728         /*
7318          * Adjusting max_active breaks orderi !! 5729          * Adjusting max_active or creating new pwqs by applying
7319          * ordered workqueues.                !! 5730          * attributes breaks ordering guarantee.  Disallow exposing ordered
                                                   >> 5731          * workqueues.
7320          */                                      5732          */
7321         if (WARN_ON(wq->flags & __WQ_ORDERED) !! 5733         if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
7322                 return -EINVAL;                  5734                 return -EINVAL;
7323                                                  5735 
7324         wq->wq_dev = wq_dev = kzalloc(sizeof(    5736         wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
7325         if (!wq_dev)                             5737         if (!wq_dev)
7326                 return -ENOMEM;                  5738                 return -ENOMEM;
7327                                                  5739 
7328         wq_dev->wq = wq;                         5740         wq_dev->wq = wq;
7329         wq_dev->dev.bus = &wq_subsys;            5741         wq_dev->dev.bus = &wq_subsys;
7330         wq_dev->dev.release = wq_device_relea    5742         wq_dev->dev.release = wq_device_release;
7331         dev_set_name(&wq_dev->dev, "%s", wq->    5743         dev_set_name(&wq_dev->dev, "%s", wq->name);
7332                                                  5744 
7333         /*                                       5745         /*
7334          * unbound_attrs are created separate    5746          * unbound_attrs are created separately.  Suppress uevent until
7335          * everything is ready.                  5747          * everything is ready.
7336          */                                      5748          */
7337         dev_set_uevent_suppress(&wq_dev->dev,    5749         dev_set_uevent_suppress(&wq_dev->dev, true);
7338                                                  5750 
7339         ret = device_register(&wq_dev->dev);     5751         ret = device_register(&wq_dev->dev);
7340         if (ret) {                               5752         if (ret) {
7341                 put_device(&wq_dev->dev);        5753                 put_device(&wq_dev->dev);
7342                 wq->wq_dev = NULL;               5754                 wq->wq_dev = NULL;
7343                 return ret;                      5755                 return ret;
7344         }                                        5756         }
7345                                                  5757 
7346         if (wq->flags & WQ_UNBOUND) {            5758         if (wq->flags & WQ_UNBOUND) {
7347                 struct device_attribute *attr    5759                 struct device_attribute *attr;
7348                                                  5760 
7349                 for (attr = wq_sysfs_unbound_    5761                 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
7350                         ret = device_create_f    5762                         ret = device_create_file(&wq_dev->dev, attr);
7351                         if (ret) {               5763                         if (ret) {
7352                                 device_unregi    5764                                 device_unregister(&wq_dev->dev);
7353                                 wq->wq_dev =     5765                                 wq->wq_dev = NULL;
7354                                 return ret;      5766                                 return ret;
7355                         }                        5767                         }
7356                 }                                5768                 }
7357         }                                        5769         }
7358                                                  5770 
7359         dev_set_uevent_suppress(&wq_dev->dev,    5771         dev_set_uevent_suppress(&wq_dev->dev, false);
7360         kobject_uevent(&wq_dev->dev.kobj, KOB    5772         kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
7361         return 0;                                5773         return 0;
7362 }                                                5774 }
7363                                                  5775 
7364 /**                                              5776 /**
7365  * workqueue_sysfs_unregister - undo workqueu    5777  * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
7366  * @wq: the workqueue to unregister              5778  * @wq: the workqueue to unregister
7367  *                                               5779  *
7368  * If @wq is registered to sysfs by workqueue    5780  * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
7369  */                                              5781  */
7370 static void workqueue_sysfs_unregister(struct    5782 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
7371 {                                                5783 {
7372         struct wq_device *wq_dev = wq->wq_dev    5784         struct wq_device *wq_dev = wq->wq_dev;
7373                                                  5785 
7374         if (!wq->wq_dev)                         5786         if (!wq->wq_dev)
7375                 return;                          5787                 return;
7376                                                  5788 
7377         wq->wq_dev = NULL;                       5789         wq->wq_dev = NULL;
7378         device_unregister(&wq_dev->dev);         5790         device_unregister(&wq_dev->dev);
7379 }                                                5791 }
7380 #else   /* CONFIG_SYSFS */                       5792 #else   /* CONFIG_SYSFS */
7381 static void workqueue_sysfs_unregister(struct    5793 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)     { }
7382 #endif  /* CONFIG_SYSFS */                       5794 #endif  /* CONFIG_SYSFS */
7383                                                  5795 
7384 /*                                               5796 /*
7385  * Workqueue watchdog.                           5797  * Workqueue watchdog.
7386  *                                               5798  *
7387  * Stall may be caused by various bugs - miss    5799  * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
7388  * flush dependency, a concurrency managed wo    5800  * flush dependency, a concurrency managed work item which stays RUNNING
7389  * indefinitely.  Workqueue stalls can be ver    5801  * indefinitely.  Workqueue stalls can be very difficult to debug as the
7390  * usual warning mechanisms don't trigger and    5802  * usual warning mechanisms don't trigger and internal workqueue state is
7391  * largely opaque.                               5803  * largely opaque.
7392  *                                               5804  *
7393  * Workqueue watchdog monitors all worker poo    5805  * Workqueue watchdog monitors all worker pools periodically and dumps
7394  * state if some pools failed to make forward    5806  * state if some pools failed to make forward progress for a while where
7395  * forward progress is defined as the first i    5807  * forward progress is defined as the first item on ->worklist changing.
7396  *                                               5808  *
7397  * This mechanism is controlled through the k    5809  * This mechanism is controlled through the kernel parameter
7398  * "workqueue.watchdog_thresh" which can be u    5810  * "workqueue.watchdog_thresh" which can be updated at runtime through the
7399  * corresponding sysfs parameter file.           5811  * corresponding sysfs parameter file.
7400  */                                              5812  */
7401 #ifdef CONFIG_WQ_WATCHDOG                        5813 #ifdef CONFIG_WQ_WATCHDOG
7402                                                  5814 
7403 static unsigned long wq_watchdog_thresh = 30;    5815 static unsigned long wq_watchdog_thresh = 30;
7404 static struct timer_list wq_watchdog_timer;      5816 static struct timer_list wq_watchdog_timer;
7405                                                  5817 
7406 static unsigned long wq_watchdog_touched = IN    5818 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
7407 static DEFINE_PER_CPU(unsigned long, wq_watch    5819 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
7408                                                  5820 
7409 /*                                            << 
7410  * Show workers that might prevent the proces << 
7411  * The only candidates are CPU-bound workers  << 
7412  * Pending work items should be handled by an << 
7413  * in all other situations.                   << 
7414  */                                           << 
7415 static void show_cpu_pool_hog(struct worker_p << 
7416 {                                             << 
7417         struct worker *worker;                << 
7418         unsigned long irq_flags;              << 
7419         int bkt;                              << 
7420                                               << 
7421         raw_spin_lock_irqsave(&pool->lock, ir << 
7422                                               << 
7423         hash_for_each(pool->busy_hash, bkt, w << 
7424                 if (task_is_running(worker->t << 
7425                         /*                    << 
7426                          * Defer printing to  << 
7427                          * drivers that queue << 
7428                          * also taken in thei << 
7429                          */                   << 
7430                         printk_deferred_enter << 
7431                                               << 
7432                         pr_info("pool %d:\n", << 
7433                         sched_show_task(worke << 
7434                                               << 
7435                         printk_deferred_exit( << 
7436                 }                             << 
7437         }                                     << 
7438                                               << 
7439         raw_spin_unlock_irqrestore(&pool->loc << 
7440 }                                             << 
7441                                               << 
7442 static void show_cpu_pools_hogs(void)         << 
7443 {                                             << 
7444         struct worker_pool *pool;             << 
7445         int pi;                               << 
7446                                               << 
7447         pr_info("Showing backtraces of runnin << 
7448                                               << 
7449         rcu_read_lock();                      << 
7450                                               << 
7451         for_each_pool(pool, pi) {             << 
7452                 if (pool->cpu_stall)          << 
7453                         show_cpu_pool_hog(poo << 
7454                                               << 
7455         }                                     << 
7456                                               << 
7457         rcu_read_unlock();                    << 
7458 }                                             << 
7459                                               << 
7460 static void wq_watchdog_reset_touched(void)      5821 static void wq_watchdog_reset_touched(void)
7461 {                                                5822 {
7462         int cpu;                                 5823         int cpu;
7463                                                  5824 
7464         wq_watchdog_touched = jiffies;           5825         wq_watchdog_touched = jiffies;
7465         for_each_possible_cpu(cpu)               5826         for_each_possible_cpu(cpu)
7466                 per_cpu(wq_watchdog_touched_c    5827                 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
7467 }                                                5828 }
7468                                                  5829 
7469 static void wq_watchdog_timer_fn(struct timer    5830 static void wq_watchdog_timer_fn(struct timer_list *unused)
7470 {                                                5831 {
7471         unsigned long thresh = READ_ONCE(wq_w    5832         unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
7472         bool lockup_detected = false;            5833         bool lockup_detected = false;
7473         bool cpu_pool_stall = false;          << 
7474         unsigned long now = jiffies;             5834         unsigned long now = jiffies;
7475         struct worker_pool *pool;                5835         struct worker_pool *pool;
7476         int pi;                                  5836         int pi;
7477                                                  5837 
7478         if (!thresh)                             5838         if (!thresh)
7479                 return;                          5839                 return;
7480                                                  5840 
7481         rcu_read_lock();                         5841         rcu_read_lock();
7482                                                  5842 
7483         for_each_pool(pool, pi) {                5843         for_each_pool(pool, pi) {
7484                 unsigned long pool_ts, touche    5844                 unsigned long pool_ts, touched, ts;
7485                                                  5845 
7486                 pool->cpu_stall = false;      << 
7487                 if (list_empty(&pool->worklis    5846                 if (list_empty(&pool->worklist))
7488                         continue;                5847                         continue;
7489                                                  5848 
7490                 /*                               5849                 /*
7491                  * If a virtual machine is st    5850                  * If a virtual machine is stopped by the host it can look to
7492                  * the watchdog like a stall.    5851                  * the watchdog like a stall.
7493                  */                              5852                  */
7494                 kvm_check_and_clear_guest_pau    5853                 kvm_check_and_clear_guest_paused();
7495                                                  5854 
7496                 /* get the latest of pool and    5855                 /* get the latest of pool and touched timestamps */
7497                 if (pool->cpu >= 0)              5856                 if (pool->cpu >= 0)
7498                         touched = READ_ONCE(p    5857                         touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
7499                 else                             5858                 else
7500                         touched = READ_ONCE(w    5859                         touched = READ_ONCE(wq_watchdog_touched);
7501                 pool_ts = READ_ONCE(pool->wat    5860                 pool_ts = READ_ONCE(pool->watchdog_ts);
7502                                                  5861 
7503                 if (time_after(pool_ts, touch    5862                 if (time_after(pool_ts, touched))
7504                         ts = pool_ts;            5863                         ts = pool_ts;
7505                 else                             5864                 else
7506                         ts = touched;            5865                         ts = touched;
7507                                                  5866 
7508                 /* did we stall? */              5867                 /* did we stall? */
7509                 if (time_after(now, ts + thre    5868                 if (time_after(now, ts + thresh)) {
7510                         lockup_detected = tru    5869                         lockup_detected = true;
7511                         if (pool->cpu >= 0 && << 
7512                                 pool->cpu_sta << 
7513                                 cpu_pool_stal << 
7514                         }                     << 
7515                         pr_emerg("BUG: workqu    5870                         pr_emerg("BUG: workqueue lockup - pool");
7516                         pr_cont_pool_info(poo    5871                         pr_cont_pool_info(pool);
7517                         pr_cont(" stuck for %    5872                         pr_cont(" stuck for %us!\n",
7518                                 jiffies_to_ms    5873                                 jiffies_to_msecs(now - pool_ts) / 1000);
7519                 }                                5874                 }
7520                                               << 
7521                                               << 
7522         }                                        5875         }
7523                                                  5876 
7524         rcu_read_unlock();                       5877         rcu_read_unlock();
7525                                                  5878 
7526         if (lockup_detected)                     5879         if (lockup_detected)
7527                 show_all_workqueues();           5880                 show_all_workqueues();
7528                                                  5881 
7529         if (cpu_pool_stall)                   << 
7530                 show_cpu_pools_hogs();        << 
7531                                               << 
7532         wq_watchdog_reset_touched();             5882         wq_watchdog_reset_touched();
7533         mod_timer(&wq_watchdog_timer, jiffies    5883         mod_timer(&wq_watchdog_timer, jiffies + thresh);
7534 }                                                5884 }
7535                                                  5885 
7536 notrace void wq_watchdog_touch(int cpu)          5886 notrace void wq_watchdog_touch(int cpu)
7537 {                                                5887 {
7538         unsigned long thresh = READ_ONCE(wq_w << 
7539         unsigned long touch_ts = READ_ONCE(wq << 
7540         unsigned long now = jiffies;          << 
7541                                               << 
7542         if (cpu >= 0)                            5888         if (cpu >= 0)
7543                 per_cpu(wq_watchdog_touched_c !! 5889                 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
7544         else                                  << 
7545                 WARN_ONCE(1, "%s should be ca << 
7546                                                  5890 
7547         /* Don't unnecessarily store to globa !! 5891         wq_watchdog_touched = jiffies;
7548         if (time_after(now, touch_ts + thresh << 
7549                 WRITE_ONCE(wq_watchdog_touche << 
7550 }                                                5892 }
7551                                                  5893 
7552 static void wq_watchdog_set_thresh(unsigned l    5894 static void wq_watchdog_set_thresh(unsigned long thresh)
7553 {                                                5895 {
7554         wq_watchdog_thresh = 0;                  5896         wq_watchdog_thresh = 0;
7555         del_timer_sync(&wq_watchdog_timer);      5897         del_timer_sync(&wq_watchdog_timer);
7556                                                  5898 
7557         if (thresh) {                            5899         if (thresh) {
7558                 wq_watchdog_thresh = thresh;     5900                 wq_watchdog_thresh = thresh;
7559                 wq_watchdog_reset_touched();     5901                 wq_watchdog_reset_touched();
7560                 mod_timer(&wq_watchdog_timer,    5902                 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
7561         }                                        5903         }
7562 }                                                5904 }
7563                                                  5905 
7564 static int wq_watchdog_param_set_thresh(const    5906 static int wq_watchdog_param_set_thresh(const char *val,
7565                                         const    5907                                         const struct kernel_param *kp)
7566 {                                                5908 {
7567         unsigned long thresh;                    5909         unsigned long thresh;
7568         int ret;                                 5910         int ret;
7569                                                  5911 
7570         ret = kstrtoul(val, 0, &thresh);         5912         ret = kstrtoul(val, 0, &thresh);
7571         if (ret)                                 5913         if (ret)
7572                 return ret;                      5914                 return ret;
7573                                                  5915 
7574         if (system_wq)                           5916         if (system_wq)
7575                 wq_watchdog_set_thresh(thresh    5917                 wq_watchdog_set_thresh(thresh);
7576         else                                     5918         else
7577                 wq_watchdog_thresh = thresh;     5919                 wq_watchdog_thresh = thresh;
7578                                                  5920 
7579         return 0;                                5921         return 0;
7580 }                                                5922 }
7581                                                  5923 
7582 static const struct kernel_param_ops wq_watch    5924 static const struct kernel_param_ops wq_watchdog_thresh_ops = {
7583         .set    = wq_watchdog_param_set_thres    5925         .set    = wq_watchdog_param_set_thresh,
7584         .get    = param_get_ulong,               5926         .get    = param_get_ulong,
7585 };                                               5927 };
7586                                                  5928 
7587 module_param_cb(watchdog_thresh, &wq_watchdog    5929 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
7588                 0644);                           5930                 0644);
7589                                                  5931 
7590 static void wq_watchdog_init(void)               5932 static void wq_watchdog_init(void)
7591 {                                                5933 {
7592         timer_setup(&wq_watchdog_timer, wq_wa    5934         timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
7593         wq_watchdog_set_thresh(wq_watchdog_th    5935         wq_watchdog_set_thresh(wq_watchdog_thresh);
7594 }                                                5936 }
7595                                                  5937 
7596 #else   /* CONFIG_WQ_WATCHDOG */                 5938 #else   /* CONFIG_WQ_WATCHDOG */
7597                                                  5939 
7598 static inline void wq_watchdog_init(void) { }    5940 static inline void wq_watchdog_init(void) { }
7599                                                  5941 
7600 #endif  /* CONFIG_WQ_WATCHDOG */                 5942 #endif  /* CONFIG_WQ_WATCHDOG */
7601                                                  5943 
7602 static void bh_pool_kick_normal(struct irq_wo !! 5944 static void __init wq_numa_init(void)
7603 {                                                5945 {
7604         raise_softirq_irqoff(TASKLET_SOFTIRQ) !! 5946         cpumask_var_t *tbl;
7605 }                                             !! 5947         int node, cpu;
7606                                                  5948 
7607 static void bh_pool_kick_highpri(struct irq_w !! 5949         if (num_possible_nodes() <= 1)
7608 {                                             !! 5950                 return;
7609         raise_softirq_irqoff(HI_SOFTIRQ);     << 
7610 }                                             << 
7611                                                  5951 
7612 static void __init restrict_unbound_cpumask(c !! 5952         if (wq_disable_numa) {
7613 {                                             !! 5953                 pr_info("workqueue: NUMA affinity support disabled\n");
7614         if (!cpumask_intersects(wq_unbound_cp << 
7615                 pr_warn("workqueue: Restricti << 
7616                         cpumask_pr_args(wq_un << 
7617                 return;                          5954                 return;
7618         }                                        5955         }
7619                                                  5956 
7620         cpumask_and(wq_unbound_cpumask, wq_un !! 5957         for_each_possible_cpu(cpu) {
7621 }                                             !! 5958                 if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
                                                   >> 5959                         pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
                                                   >> 5960                         return;
                                                   >> 5961                 }
                                                   >> 5962         }
7622                                                  5963 
7623 static void __init init_cpu_worker_pool(struc !! 5964         wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
7624 {                                             !! 5965         BUG_ON(!wq_update_unbound_numa_attrs_buf);
7625         BUG_ON(init_worker_pool(pool));       << 
7626         pool->cpu = cpu;                      << 
7627         cpumask_copy(pool->attrs->cpumask, cp << 
7628         cpumask_copy(pool->attrs->__pod_cpuma << 
7629         pool->attrs->nice = nice;             << 
7630         pool->attrs->affn_strict = true;      << 
7631         pool->node = cpu_to_node(cpu);        << 
7632                                                  5966 
7633         /* alloc pool ID */                   !! 5967         /*
7634         mutex_lock(&wq_pool_mutex);           !! 5968          * We want masks of possible CPUs of each node which isn't readily
7635         BUG_ON(worker_pool_assign_id(pool));  !! 5969          * available.  Build one from cpu_to_node() which should have been
7636         mutex_unlock(&wq_pool_mutex);         !! 5970          * fully initialized by now.
                                                   >> 5971          */
                                                   >> 5972         tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
                                                   >> 5973         BUG_ON(!tbl);
                                                   >> 5974 
                                                   >> 5975         for_each_node(node)
                                                   >> 5976                 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
                                                   >> 5977                                 node_online(node) ? node : NUMA_NO_NODE));
                                                   >> 5978 
                                                   >> 5979         for_each_possible_cpu(cpu) {
                                                   >> 5980                 node = cpu_to_node(cpu);
                                                   >> 5981                 cpumask_set_cpu(cpu, tbl[node]);
                                                   >> 5982         }
                                                   >> 5983 
                                                   >> 5984         wq_numa_possible_cpumask = tbl;
                                                   >> 5985         wq_numa_enabled = true;
7637 }                                                5986 }
7638                                                  5987 
7639 /**                                              5988 /**
7640  * workqueue_init_early - early init for work    5989  * workqueue_init_early - early init for workqueue subsystem
7641  *                                               5990  *
7642  * This is the first step of three-staged wor !! 5991  * This is the first half of two-staged workqueue subsystem initialization
7643  * invoked as soon as the bare basics - memor !! 5992  * and invoked as soon as the bare basics - memory allocation, cpumasks and
7644  * up. It sets up all the data structures and !! 5993  * idr are up.  It sets up all the data structures and system workqueues
7645  * boot code to create workqueues and queue/c !! 5994  * and allows early boot code to create workqueues and queue/cancel work
7646  * execution starts only after kthreads can b !! 5995  * items.  Actual work item execution starts only after kthreads can be
7647  * before early initcalls.                    !! 5996  * created and scheduled right before early initcalls.
7648  */                                              5997  */
7649 void __init workqueue_init_early(void)           5998 void __init workqueue_init_early(void)
7650 {                                                5999 {
7651         struct wq_pod_type *pt = &wq_pod_type << 
7652         int std_nice[NR_STD_WORKER_POOLS] = {    6000         int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
7653         void (*irq_work_fns[2])(struct irq_wo << 
7654                                               << 
7655         int i, cpu;                              6001         int i, cpu;
7656                                                  6002 
7657         BUILD_BUG_ON(__alignof__(struct pool_    6003         BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
7658                                                  6004 
7659         BUG_ON(!alloc_cpumask_var(&wq_online_ << 
7660         BUG_ON(!alloc_cpumask_var(&wq_unbound    6005         BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
7661         BUG_ON(!alloc_cpumask_var(&wq_request !! 6006         cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
7662         BUG_ON(!zalloc_cpumask_var(&wq_isolat !! 6007         cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
7663                                               << 
7664         cpumask_copy(wq_online_cpumask, cpu_o << 
7665         cpumask_copy(wq_unbound_cpumask, cpu_ << 
7666         restrict_unbound_cpumask("HK_TYPE_WQ" << 
7667         restrict_unbound_cpumask("HK_TYPE_DOM << 
7668         if (!cpumask_empty(&wq_cmdline_cpumas << 
7669                 restrict_unbound_cpumask("wor << 
7670                                               << 
7671         cpumask_copy(wq_requested_unbound_cpu << 
7672                                                  6008 
7673         pwq_cache = KMEM_CACHE(pool_workqueue    6009         pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
7674                                                  6010 
7675         unbound_wq_update_pwq_attrs_buf = all !! 6011         /* initialize CPU pools */
7676         BUG_ON(!unbound_wq_update_pwq_attrs_b << 
7677                                               << 
7678         /*                                    << 
7679          * If nohz_full is enabled, set power << 
7680          * This allows workqueue items to be  << 
7681          */                                   << 
7682         if (housekeeping_enabled(HK_TYPE_TICK << 
7683                 wq_power_efficient = true;    << 
7684                                               << 
7685         /* initialize WQ_AFFN_SYSTEM pods */  << 
7686         pt->pod_cpus = kcalloc(1, sizeof(pt-> << 
7687         pt->pod_node = kcalloc(1, sizeof(pt-> << 
7688         pt->cpu_pod = kcalloc(nr_cpu_ids, siz << 
7689         BUG_ON(!pt->pod_cpus || !pt->pod_node << 
7690                                               << 
7691         BUG_ON(!zalloc_cpumask_var_node(&pt-> << 
7692                                               << 
7693         pt->nr_pods = 1;                      << 
7694         cpumask_copy(pt->pod_cpus[0], cpu_pos << 
7695         pt->pod_node[0] = NUMA_NO_NODE;       << 
7696         pt->cpu_pod[0] = 0;                   << 
7697                                               << 
7698         /* initialize BH and CPU pools */     << 
7699         for_each_possible_cpu(cpu) {             6012         for_each_possible_cpu(cpu) {
7700                 struct worker_pool *pool;        6013                 struct worker_pool *pool;
7701                                                  6014 
7702                 i = 0;                           6015                 i = 0;
7703                 for_each_bh_worker_pool(pool, !! 6016                 for_each_cpu_worker_pool(pool, cpu) {
7704                         init_cpu_worker_pool( !! 6017                         BUG_ON(init_worker_pool(pool));
7705                         pool->flags |= POOL_B !! 6018                         pool->cpu = cpu;
7706                         init_irq_work(bh_pool !! 6019                         cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
7707                         i++;                  !! 6020                         pool->attrs->nice = std_nice[i++];
7708                 }                             !! 6021                         pool->node = cpu_to_node(cpu);
7709                                                  6022 
7710                 i = 0;                        !! 6023                         /* alloc pool ID */
7711                 for_each_cpu_worker_pool(pool !! 6024                         mutex_lock(&wq_pool_mutex);
7712                         init_cpu_worker_pool( !! 6025                         BUG_ON(worker_pool_assign_id(pool));
                                                   >> 6026                         mutex_unlock(&wq_pool_mutex);
                                                   >> 6027                 }
7713         }                                        6028         }
7714                                                  6029 
7715         /* create default unbound and ordered    6030         /* create default unbound and ordered wq attrs */
7716         for (i = 0; i < NR_STD_WORKER_POOLS;     6031         for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
7717                 struct workqueue_attrs *attrs    6032                 struct workqueue_attrs *attrs;
7718                                                  6033 
7719                 BUG_ON(!(attrs = alloc_workqu    6034                 BUG_ON(!(attrs = alloc_workqueue_attrs()));
7720                 attrs->nice = std_nice[i];       6035                 attrs->nice = std_nice[i];
7721                 unbound_std_wq_attrs[i] = att    6036                 unbound_std_wq_attrs[i] = attrs;
7722                                                  6037 
7723                 /*                               6038                 /*
7724                  * An ordered wq should have     6039                  * An ordered wq should have only one pwq as ordering is
7725                  * guaranteed by max_active w    6040                  * guaranteed by max_active which is enforced by pwqs.
                                                   >> 6041                  * Turn off NUMA so that dfl_pwq is used for all nodes.
7726                  */                              6042                  */
7727                 BUG_ON(!(attrs = alloc_workqu    6043                 BUG_ON(!(attrs = alloc_workqueue_attrs()));
7728                 attrs->nice = std_nice[i];       6044                 attrs->nice = std_nice[i];
7729                 attrs->ordered = true;        !! 6045                 attrs->no_numa = true;
7730                 ordered_wq_attrs[i] = attrs;     6046                 ordered_wq_attrs[i] = attrs;
7731         }                                        6047         }
7732                                                  6048 
7733         system_wq = alloc_workqueue("events",    6049         system_wq = alloc_workqueue("events", 0, 0);
7734         system_highpri_wq = alloc_workqueue("    6050         system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
7735         system_long_wq = alloc_workqueue("eve    6051         system_long_wq = alloc_workqueue("events_long", 0, 0);
7736         system_unbound_wq = alloc_workqueue("    6052         system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
7737                                             W !! 6053                                             WQ_UNBOUND_MAX_ACTIVE);
7738         system_freezable_wq = alloc_workqueue    6054         system_freezable_wq = alloc_workqueue("events_freezable",
7739                                                  6055                                               WQ_FREEZABLE, 0);
7740         system_power_efficient_wq = alloc_wor    6056         system_power_efficient_wq = alloc_workqueue("events_power_efficient",
7741                                                  6057                                               WQ_POWER_EFFICIENT, 0);
7742         system_freezable_power_efficient_wq = !! 6058         system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
7743                                                  6059                                               WQ_FREEZABLE | WQ_POWER_EFFICIENT,
7744                                                  6060                                               0);
7745         system_bh_wq = alloc_workqueue("event << 
7746         system_bh_highpri_wq = alloc_workqueu << 
7747                                               << 
7748         BUG_ON(!system_wq || !system_highpri_    6061         BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
7749                !system_unbound_wq || !system_    6062                !system_unbound_wq || !system_freezable_wq ||
7750                !system_power_efficient_wq ||     6063                !system_power_efficient_wq ||
7751                !system_freezable_power_effici !! 6064                !system_freezable_power_efficient_wq);
7752                !system_bh_wq || !system_bh_hi << 
7753 }                                             << 
7754                                               << 
7755 static void __init wq_cpu_intensive_thresh_in << 
7756 {                                             << 
7757         unsigned long thresh;                 << 
7758         unsigned long bogo;                   << 
7759                                               << 
7760         pwq_release_worker = kthread_create_w << 
7761         BUG_ON(IS_ERR(pwq_release_worker));   << 
7762                                               << 
7763         /* if the user set it to a specific v << 
7764         if (wq_cpu_intensive_thresh_us != ULO << 
7765                 return;                       << 
7766                                               << 
7767         /*                                    << 
7768          * The default of 10ms is derived fro << 
7769          * 2023) processors can do a lot in 1 << 
7770          * most consider human-perceivable. H << 
7771          * lot slower CPUs including microcon << 
7772          * too low.                           << 
7773          *                                    << 
7774          * Let's scale up the threshold upto  << 
7775          * This is by no means accurate but i << 
7776          * is still useful even when the thre << 
7777          * the reports would usually be appli << 
7778          * operating on longer thresholds won << 
7779          * usefulness.                        << 
7780          */                                   << 
7781         thresh = 10 * USEC_PER_MSEC;          << 
7782                                               << 
7783         /* see init/calibrate.c for lpj -> Bo << 
7784         bogo = max_t(unsigned long, loops_per << 
7785         if (bogo < 4000)                      << 
7786                 thresh = min_t(unsigned long, << 
7787                                               << 
7788         pr_debug("wq_cpu_intensive_thresh: lp << 
7789                  loops_per_jiffy, bogo, thres << 
7790                                               << 
7791         wq_cpu_intensive_thresh_us = thresh;  << 
7792 }                                                6065 }
7793                                                  6066 
7794 /**                                              6067 /**
7795  * workqueue_init - bring workqueue subsystem    6068  * workqueue_init - bring workqueue subsystem fully online
7796  *                                               6069  *
7797  * This is the second step of three-staged wo !! 6070  * This is the latter half of two-staged workqueue subsystem initialization
7798  * and invoked as soon as kthreads can be cre !! 6071  * and invoked as soon as kthreads can be created and scheduled.
7799  * been created and work items queued on them !! 6072  * Workqueues have been created and work items queued on them, but there
7800  * executing the work items yet. Populate the !! 6073  * are no kworkers executing the work items yet.  Populate the worker pools
7801  * workers and enable future kworker creation !! 6074  * with the initial workers and enable future kworker creations.
7802  */                                              6075  */
7803 void __init workqueue_init(void)                 6076 void __init workqueue_init(void)
7804 {                                                6077 {
7805         struct workqueue_struct *wq;             6078         struct workqueue_struct *wq;
7806         struct worker_pool *pool;                6079         struct worker_pool *pool;
7807         int cpu, bkt;                            6080         int cpu, bkt;
7808                                                  6081 
7809         wq_cpu_intensive_thresh_init();       !! 6082         /*
                                                   >> 6083          * It'd be simpler to initialize NUMA in workqueue_init_early() but
                                                   >> 6084          * CPU to node mapping may not be available that early on some
                                                   >> 6085          * archs such as power and arm64.  As per-cpu pools created
                                                   >> 6086          * previously could be missing node hint and unbound pools NUMA
                                                   >> 6087          * affinity, fix them up.
                                                   >> 6088          *
                                                   >> 6089          * Also, while iterating workqueues, create rescuers if requested.
                                                   >> 6090          */
                                                   >> 6091         wq_numa_init();
7810                                                  6092 
7811         mutex_lock(&wq_pool_mutex);              6093         mutex_lock(&wq_pool_mutex);
7812                                                  6094 
7813         /*                                    << 
7814          * Per-cpu pools created earlier coul << 
7815          * up. Also, create a rescuer for wor << 
7816          */                                   << 
7817         for_each_possible_cpu(cpu) {             6095         for_each_possible_cpu(cpu) {
7818                 for_each_bh_worker_pool(pool, !! 6096                 for_each_cpu_worker_pool(pool, cpu) {
7819                         pool->node = cpu_to_n << 
7820                 for_each_cpu_worker_pool(pool << 
7821                         pool->node = cpu_to_n    6097                         pool->node = cpu_to_node(cpu);
                                                   >> 6098                 }
7822         }                                        6099         }
7823                                                  6100 
7824         list_for_each_entry(wq, &workqueues,     6101         list_for_each_entry(wq, &workqueues, list) {
                                                   >> 6102                 wq_update_unbound_numa(wq, smp_processor_id(), true);
7825                 WARN(init_rescuer(wq),           6103                 WARN(init_rescuer(wq),
7826                      "workqueue: failed to cr    6104                      "workqueue: failed to create early rescuer for %s",
7827                      wq->name);                  6105                      wq->name);
7828         }                                        6106         }
7829                                                  6107 
7830         mutex_unlock(&wq_pool_mutex);            6108         mutex_unlock(&wq_pool_mutex);
7831                                                  6109 
7832         /*                                    !! 6110         /* create the initial workers */
7833          * Create the initial workers. A BH p << 
7834          * represents the shared BH execution << 
7835          * affected by hotplug events. Create << 
7836          * possible CPUs here.                << 
7837          */                                   << 
7838         for_each_possible_cpu(cpu)            << 
7839                 for_each_bh_worker_pool(pool, << 
7840                         BUG_ON(!create_worker << 
7841                                               << 
7842         for_each_online_cpu(cpu) {               6111         for_each_online_cpu(cpu) {
7843                 for_each_cpu_worker_pool(pool    6112                 for_each_cpu_worker_pool(pool, cpu) {
7844                         pool->flags &= ~POOL_    6113                         pool->flags &= ~POOL_DISASSOCIATED;
7845                         BUG_ON(!create_worker    6114                         BUG_ON(!create_worker(pool));
7846                 }                                6115                 }
7847         }                                        6116         }
7848                                                  6117 
7849         hash_for_each(unbound_pool_hash, bkt,    6118         hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
7850                 BUG_ON(!create_worker(pool));    6119                 BUG_ON(!create_worker(pool));
7851                                                  6120 
7852         wq_online = true;                        6121         wq_online = true;
7853         wq_watchdog_init();                      6122         wq_watchdog_init();
7854 }                                                6123 }
7855                                                  6124 
7856 /*                                               6125 /*
7857  * Initialize @pt by first initializing @pt-> !! 6126  * Despite the naming, this is a no-op function which is here only for avoiding
7858  * @cpu_shares_pod(). Each subset of CPUs tha !! 6127  * link error. Since compile-time warning may fail to catch, we will need to
7859  * and consecutive pod ID. The rest of @pt is !! 6128  * emit run-time warning from __flush_workqueue().
7860  */                                           << 
7861 static void __init init_pod_type(struct wq_po << 
7862                                  bool (*cpus_ << 
7863 {                                             << 
7864         int cur, pre, cpu, pod;               << 
7865                                               << 
7866         pt->nr_pods = 0;                      << 
7867                                               << 
7868         /* init @pt->cpu_pod[] according to @ << 
7869         pt->cpu_pod = kcalloc(nr_cpu_ids, siz << 
7870         BUG_ON(!pt->cpu_pod);                 << 
7871                                               << 
7872         for_each_possible_cpu(cur) {          << 
7873                 for_each_possible_cpu(pre) {  << 
7874                         if (pre >= cur) {     << 
7875                                 pt->cpu_pod[c << 
7876                                 break;        << 
7877                         }                     << 
7878                         if (cpus_share_pod(cu << 
7879                                 pt->cpu_pod[c << 
7880                                 break;        << 
7881                         }                     << 
7882                 }                             << 
7883         }                                     << 
7884                                               << 
7885         /* init the rest to match @pt->cpu_po << 
7886         pt->pod_cpus = kcalloc(pt->nr_pods, s << 
7887         pt->pod_node = kcalloc(pt->nr_pods, s << 
7888         BUG_ON(!pt->pod_cpus || !pt->pod_node << 
7889                                               << 
7890         for (pod = 0; pod < pt->nr_pods; pod+ << 
7891                 BUG_ON(!zalloc_cpumask_var(&p << 
7892                                               << 
7893         for_each_possible_cpu(cpu) {          << 
7894                 cpumask_set_cpu(cpu, pt->pod_ << 
7895                 pt->pod_node[pt->cpu_pod[cpu] << 
7896         }                                     << 
7897 }                                             << 
7898                                               << 
7899 static bool __init cpus_dont_share(int cpu0,  << 
7900 {                                             << 
7901         return false;                         << 
7902 }                                             << 
7903                                               << 
7904 static bool __init cpus_share_smt(int cpu0, i << 
7905 {                                             << 
7906 #ifdef CONFIG_SCHED_SMT                       << 
7907         return cpumask_test_cpu(cpu0, cpu_smt << 
7908 #else                                         << 
7909         return false;                         << 
7910 #endif                                        << 
7911 }                                             << 
7912                                               << 
7913 static bool __init cpus_share_numa(int cpu0,  << 
7914 {                                             << 
7915         return cpu_to_node(cpu0) == cpu_to_no << 
7916 }                                             << 
7917                                               << 
7918 /**                                           << 
7919  * workqueue_init_topology - initialize CPU p << 
7920  *                                            << 
7921  * This is the third step of three-staged wor << 
7922  * invoked after SMP and topology information << 
7923  * initializes the unbound CPU pods according << 
7924  */                                              6129  */
7925 void __init workqueue_init_topology(void)     !! 6130 void __warn_flushing_systemwide_wq(void) { }
7926 {                                             << 
7927         struct workqueue_struct *wq;          << 
7928         int cpu;                              << 
7929                                               << 
7930         init_pod_type(&wq_pod_types[WQ_AFFN_C << 
7931         init_pod_type(&wq_pod_types[WQ_AFFN_S << 
7932         init_pod_type(&wq_pod_types[WQ_AFFN_C << 
7933         init_pod_type(&wq_pod_types[WQ_AFFN_N << 
7934                                               << 
7935         wq_topo_initialized = true;           << 
7936                                               << 
7937         mutex_lock(&wq_pool_mutex);           << 
7938                                               << 
7939         /*                                    << 
7940          * Workqueues allocated earlier would << 
7941          * worker pool. Explicitly call unbou << 
7942          * and CPU combinations to apply per- << 
7943          */                                   << 
7944         list_for_each_entry(wq, &workqueues,  << 
7945                 for_each_online_cpu(cpu)      << 
7946                         unbound_wq_update_pwq << 
7947                 if (wq->flags & WQ_UNBOUND) { << 
7948                         mutex_lock(&wq->mutex << 
7949                         wq_update_node_max_ac << 
7950                         mutex_unlock(&wq->mut << 
7951                 }                             << 
7952         }                                     << 
7953                                               << 
7954         mutex_unlock(&wq_pool_mutex);         << 
7955 }                                             << 
7956                                               << 
7957 void __warn_flushing_systemwide_wq(void)      << 
7958 {                                             << 
7959         pr_warn("WARNING: Flushing system-wid << 
7960         dump_stack();                         << 
7961 }                                             << 
7962 EXPORT_SYMBOL(__warn_flushing_systemwide_wq);    6131 EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
7963                                               << 
7964 static int __init workqueue_unbound_cpus_setu << 
7965 {                                             << 
7966         if (cpulist_parse(str, &wq_cmdline_cp << 
7967                 cpumask_clear(&wq_cmdline_cpu << 
7968                 pr_warn("workqueue.unbound_cp << 
7969         }                                     << 
7970                                               << 
7971         return 1;                             << 
7972 }                                             << 
7973 __setup("workqueue.unbound_cpus=", workqueue_ << 
7974                                                  6132 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php