~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/workqueue.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/workqueue.c (Version linux-6.11.5) and /kernel/workqueue.c (Version linux-6.6.58)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*                                                  2 /*
  3  * kernel/workqueue.c - generic async executio      3  * kernel/workqueue.c - generic async execution with shared worker pool
  4  *                                                  4  *
  5  * Copyright (C) 2002           Ingo Molnar         5  * Copyright (C) 2002           Ingo Molnar
  6  *                                                  6  *
  7  *   Derived from the taskqueue/keventd code b      7  *   Derived from the taskqueue/keventd code by:
  8  *     David Woodhouse <dwmw2@infradead.org>        8  *     David Woodhouse <dwmw2@infradead.org>
  9  *     Andrew Morton                                9  *     Andrew Morton
 10  *     Kai Petzke <wpp@marie.physik.tu-berlin.     10  *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
 11  *     Theodore Ts'o <tytso@mit.edu>               11  *     Theodore Ts'o <tytso@mit.edu>
 12  *                                                 12  *
 13  * Made to use alloc_percpu by Christoph Lamet     13  * Made to use alloc_percpu by Christoph Lameter.
 14  *                                                 14  *
 15  * Copyright (C) 2010           SUSE Linux Pro     15  * Copyright (C) 2010           SUSE Linux Products GmbH
 16  * Copyright (C) 2010           Tejun Heo <tj@     16  * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
 17  *                                                 17  *
 18  * This is the generic async execution mechani     18  * This is the generic async execution mechanism.  Work items as are
 19  * executed in process context.  The worker po     19  * executed in process context.  The worker pool is shared and
 20  * automatically managed.  There are two worke     20  * automatically managed.  There are two worker pools for each CPU (one for
 21  * normal work items and the other for high pr     21  * normal work items and the other for high priority ones) and some extra
 22  * pools for workqueues which are not bound to     22  * pools for workqueues which are not bound to any specific CPU - the
 23  * number of these backing pools is dynamic.       23  * number of these backing pools is dynamic.
 24  *                                                 24  *
 25  * Please read Documentation/core-api/workqueu     25  * Please read Documentation/core-api/workqueue.rst for details.
 26  */                                                26  */
 27                                                    27 
 28 #include <linux/export.h>                          28 #include <linux/export.h>
 29 #include <linux/kernel.h>                          29 #include <linux/kernel.h>
 30 #include <linux/sched.h>                           30 #include <linux/sched.h>
 31 #include <linux/init.h>                            31 #include <linux/init.h>
 32 #include <linux/interrupt.h>                   << 
 33 #include <linux/signal.h>                          32 #include <linux/signal.h>
 34 #include <linux/completion.h>                      33 #include <linux/completion.h>
 35 #include <linux/workqueue.h>                       34 #include <linux/workqueue.h>
 36 #include <linux/slab.h>                            35 #include <linux/slab.h>
 37 #include <linux/cpu.h>                             36 #include <linux/cpu.h>
 38 #include <linux/notifier.h>                        37 #include <linux/notifier.h>
 39 #include <linux/kthread.h>                         38 #include <linux/kthread.h>
 40 #include <linux/hardirq.h>                         39 #include <linux/hardirq.h>
 41 #include <linux/mempolicy.h>                       40 #include <linux/mempolicy.h>
 42 #include <linux/freezer.h>                         41 #include <linux/freezer.h>
 43 #include <linux/debug_locks.h>                     42 #include <linux/debug_locks.h>
 44 #include <linux/lockdep.h>                         43 #include <linux/lockdep.h>
 45 #include <linux/idr.h>                             44 #include <linux/idr.h>
 46 #include <linux/jhash.h>                           45 #include <linux/jhash.h>
 47 #include <linux/hashtable.h>                       46 #include <linux/hashtable.h>
 48 #include <linux/rculist.h>                         47 #include <linux/rculist.h>
 49 #include <linux/nodemask.h>                        48 #include <linux/nodemask.h>
 50 #include <linux/moduleparam.h>                     49 #include <linux/moduleparam.h>
 51 #include <linux/uaccess.h>                         50 #include <linux/uaccess.h>
 52 #include <linux/sched/isolation.h>                 51 #include <linux/sched/isolation.h>
 53 #include <linux/sched/debug.h>                     52 #include <linux/sched/debug.h>
 54 #include <linux/nmi.h>                             53 #include <linux/nmi.h>
 55 #include <linux/kvm_para.h>                        54 #include <linux/kvm_para.h>
 56 #include <linux/delay.h>                           55 #include <linux/delay.h>
 57 #include <linux/irq_work.h>                    << 
 58                                                    56 
 59 #include "workqueue_internal.h"                    57 #include "workqueue_internal.h"
 60                                                    58 
 61 enum worker_pool_flags {                       !!  59 enum {
 62         /*                                         60         /*
 63          * worker_pool flags                       61          * worker_pool flags
 64          *                                         62          *
 65          * A bound pool is either associated o     63          * A bound pool is either associated or disassociated with its CPU.
 66          * While associated (!DISASSOCIATED),      64          * While associated (!DISASSOCIATED), all workers are bound to the
 67          * CPU and none has %WORKER_UNBOUND se     65          * CPU and none has %WORKER_UNBOUND set and concurrency management
 68          * is in effect.                           66          * is in effect.
 69          *                                         67          *
 70          * While DISASSOCIATED, the cpu may be     68          * While DISASSOCIATED, the cpu may be offline and all workers have
 71          * %WORKER_UNBOUND set and concurrency     69          * %WORKER_UNBOUND set and concurrency management disabled, and may
 72          * be executing on any CPU.  The pool      70          * be executing on any CPU.  The pool behaves as an unbound one.
 73          *                                         71          *
 74          * Note that DISASSOCIATED should be f     72          * Note that DISASSOCIATED should be flipped only while holding
 75          * wq_pool_attach_mutex to avoid chang     73          * wq_pool_attach_mutex to avoid changing binding state while
 76          * worker_attach_to_pool() is in progr     74          * worker_attach_to_pool() is in progress.
 77          *                                     << 
 78          * As there can only be one concurrent << 
 79          * BH pool is per-CPU and always DISAS << 
 80          */                                        75          */
 81         POOL_BH                 = 1 << 0,      !!  76         POOL_MANAGER_ACTIVE     = 1 << 0,       /* being managed */
 82         POOL_MANAGER_ACTIVE     = 1 << 1,      << 
 83         POOL_DISASSOCIATED      = 1 << 2,          77         POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
 84         POOL_BH_DRAINING        = 1 << 3,      << 
 85 };                                             << 
 86                                                    78 
 87 enum worker_flags {                            << 
 88         /* worker flags */                         79         /* worker flags */
 89         WORKER_DIE              = 1 << 1,          80         WORKER_DIE              = 1 << 1,       /* die die die */
 90         WORKER_IDLE             = 1 << 2,          81         WORKER_IDLE             = 1 << 2,       /* is idle */
 91         WORKER_PREP             = 1 << 3,          82         WORKER_PREP             = 1 << 3,       /* preparing to run works */
 92         WORKER_CPU_INTENSIVE    = 1 << 6,          83         WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
 93         WORKER_UNBOUND          = 1 << 7,          84         WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
 94         WORKER_REBOUND          = 1 << 8,          85         WORKER_REBOUND          = 1 << 8,       /* worker was rebound */
 95                                                    86 
 96         WORKER_NOT_RUNNING      = WORKER_PREP      87         WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_CPU_INTENSIVE |
 97                                   WORKER_UNBOU     88                                   WORKER_UNBOUND | WORKER_REBOUND,
 98 };                                             << 
 99                                                << 
100 enum work_cancel_flags {                       << 
101         WORK_CANCEL_DELAYED     = 1 << 0,      << 
102         WORK_CANCEL_DISABLE     = 1 << 1,      << 
103 };                                             << 
104                                                    89 
105 enum wq_internal_consts {                      << 
106         NR_STD_WORKER_POOLS     = 2,               90         NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
107                                                    91 
108         UNBOUND_POOL_HASH_ORDER = 6,               92         UNBOUND_POOL_HASH_ORDER = 6,            /* hashed by pool->attrs */
109         BUSY_WORKER_HASH_ORDER  = 6,               93         BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
110                                                    94 
111         MAX_IDLE_WORKERS_RATIO  = 4,               95         MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
112         IDLE_WORKER_TIMEOUT     = 300 * HZ,        96         IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
113                                                    97 
114         MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >=      98         MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
115                                                    99                                                 /* call for help after 10ms
116                                                   100                                                    (min two ticks) */
117         MAYDAY_INTERVAL         = HZ / 10,        101         MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
118         CREATE_COOLDOWN         = HZ,             102         CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
119                                                   103 
120         /*                                        104         /*
121          * Rescue workers are used only on eme    105          * Rescue workers are used only on emergencies and shared by
122          * all cpus.  Give MIN_NICE.              106          * all cpus.  Give MIN_NICE.
123          */                                       107          */
124         RESCUER_NICE_LEVEL      = MIN_NICE,       108         RESCUER_NICE_LEVEL      = MIN_NICE,
125         HIGHPRI_NICE_LEVEL      = MIN_NICE,       109         HIGHPRI_NICE_LEVEL      = MIN_NICE,
126                                                   110 
127         WQ_NAME_LEN             = 32,          !! 111         WQ_NAME_LEN             = 24,
128         WORKER_ID_LEN           = 10 + WQ_NAME << 
129 };                                                112 };
130                                                   113 
131 /*                                                114 /*
132  * We don't want to trap softirq for too long. << 
133  * MAX_SOFTIRQ_RESTART in kernel/softirq.c. Th << 
134  * msecs_to_jiffies() can't be an initializer. << 
135  */                                            << 
136 #define BH_WORKER_JIFFIES       msecs_to_jiffi << 
137 #define BH_WORKER_RESTARTS      10             << 
138                                                << 
139 /*                                             << 
140  * Structure fields follow one of the followin    115  * Structure fields follow one of the following exclusion rules.
141  *                                                116  *
142  * I: Modifiable by initialization/destruction    117  * I: Modifiable by initialization/destruction paths and read-only for
143  *    everyone else.                              118  *    everyone else.
144  *                                                119  *
145  * P: Preemption protected.  Disabling preempt    120  * P: Preemption protected.  Disabling preemption is enough and should
146  *    only be modified and accessed from the l    121  *    only be modified and accessed from the local cpu.
147  *                                                122  *
148  * L: pool->lock protected.  Access with pool-    123  * L: pool->lock protected.  Access with pool->lock held.
149  *                                                124  *
150  * LN: pool->lock and wq_node_nr_active->lock  << 
151  *     reads.                                  << 
152  *                                             << 
153  * K: Only modified by worker while holding po    125  * K: Only modified by worker while holding pool->lock. Can be safely read by
154  *    self, while holding pool->lock or from I    126  *    self, while holding pool->lock or from IRQ context if %current is the
155  *    kworker.                                    127  *    kworker.
156  *                                                128  *
157  * S: Only modified by worker self.               129  * S: Only modified by worker self.
158  *                                                130  *
159  * A: wq_pool_attach_mutex protected.             131  * A: wq_pool_attach_mutex protected.
160  *                                                132  *
161  * PL: wq_pool_mutex protected.                   133  * PL: wq_pool_mutex protected.
162  *                                                134  *
163  * PR: wq_pool_mutex protected for writes.  RC    135  * PR: wq_pool_mutex protected for writes.  RCU protected for reads.
164  *                                                136  *
165  * PW: wq_pool_mutex and wq->mutex protected f    137  * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
166  *                                                138  *
167  * PWR: wq_pool_mutex and wq->mutex protected     139  * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
168  *      RCU for reads.                            140  *      RCU for reads.
169  *                                                141  *
170  * WQ: wq->mutex protected.                       142  * WQ: wq->mutex protected.
171  *                                                143  *
172  * WR: wq->mutex protected for writes.  RCU pr    144  * WR: wq->mutex protected for writes.  RCU protected for reads.
173  *                                                145  *
174  * WO: wq->mutex protected for writes. Updated << 
175  *     with READ_ONCE() without locking.       << 
176  *                                             << 
177  * MD: wq_mayday_lock protected.                  146  * MD: wq_mayday_lock protected.
178  *                                                147  *
179  * WD: Used internally by the watchdog.           148  * WD: Used internally by the watchdog.
180  */                                               149  */
181                                                   150 
182 /* struct worker is defined in workqueue_inter    151 /* struct worker is defined in workqueue_internal.h */
183                                                   152 
184 struct worker_pool {                              153 struct worker_pool {
185         raw_spinlock_t          lock;             154         raw_spinlock_t          lock;           /* the pool lock */
186         int                     cpu;              155         int                     cpu;            /* I: the associated cpu */
187         int                     node;             156         int                     node;           /* I: the associated node ID */
188         int                     id;               157         int                     id;             /* I: pool ID */
189         unsigned int            flags;            158         unsigned int            flags;          /* L: flags */
190                                                   159 
191         unsigned long           watchdog_ts;      160         unsigned long           watchdog_ts;    /* L: watchdog timestamp */
192         bool                    cpu_stall;        161         bool                    cpu_stall;      /* WD: stalled cpu bound pool */
193                                                   162 
194         /*                                        163         /*
195          * The counter is incremented in a pro    164          * The counter is incremented in a process context on the associated CPU
196          * w/ preemption disabled, and decreme    165          * w/ preemption disabled, and decremented or reset in the same context
197          * but w/ pool->lock held. The readers    166          * but w/ pool->lock held. The readers grab pool->lock and are
198          * guaranteed to see if the counter re    167          * guaranteed to see if the counter reached zero.
199          */                                       168          */
200         int                     nr_running;       169         int                     nr_running;
201                                                   170 
202         struct list_head        worklist;         171         struct list_head        worklist;       /* L: list of pending works */
203                                                   172 
204         int                     nr_workers;       173         int                     nr_workers;     /* L: total number of workers */
205         int                     nr_idle;          174         int                     nr_idle;        /* L: currently idle workers */
206                                                   175 
207         struct list_head        idle_list;        176         struct list_head        idle_list;      /* L: list of idle workers */
208         struct timer_list       idle_timer;       177         struct timer_list       idle_timer;     /* L: worker idle timeout */
209         struct work_struct      idle_cull_work    178         struct work_struct      idle_cull_work; /* L: worker idle cleanup */
210                                                   179 
211         struct timer_list       mayday_timer;     180         struct timer_list       mayday_timer;     /* L: SOS timer for workers */
212                                                   181 
213         /* a workers is either on busy_hash or    182         /* a workers is either on busy_hash or idle_list, or the manager */
214         DECLARE_HASHTABLE(busy_hash, BUSY_WORK    183         DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
215                                                   184                                                 /* L: hash of busy workers */
216                                                   185 
217         struct worker           *manager;         186         struct worker           *manager;       /* L: purely informational */
218         struct list_head        workers;          187         struct list_head        workers;        /* A: attached workers */
                                                   >> 188         struct list_head        dying_workers;  /* A: workers about to die */
                                                   >> 189         struct completion       *detach_completion; /* all workers detached */
219                                                   190 
220         struct ida              worker_ida;       191         struct ida              worker_ida;     /* worker IDs for task name */
221                                                   192 
222         struct workqueue_attrs  *attrs;           193         struct workqueue_attrs  *attrs;         /* I: worker attributes */
223         struct hlist_node       hash_node;        194         struct hlist_node       hash_node;      /* PL: unbound_pool_hash node */
224         int                     refcnt;           195         int                     refcnt;         /* PL: refcnt for unbound pools */
225                                                   196 
226         /*                                        197         /*
227          * Destruction of pool is RCU protecte    198          * Destruction of pool is RCU protected to allow dereferences
228          * from get_work_pool().                  199          * from get_work_pool().
229          */                                       200          */
230         struct rcu_head         rcu;              201         struct rcu_head         rcu;
231 };                                                202 };
232                                                   203 
233 /*                                                204 /*
234  * Per-pool_workqueue statistics. These can be    205  * Per-pool_workqueue statistics. These can be monitored using
235  * tools/workqueue/wq_monitor.py.                 206  * tools/workqueue/wq_monitor.py.
236  */                                               207  */
237 enum pool_workqueue_stats {                       208 enum pool_workqueue_stats {
238         PWQ_STAT_STARTED,       /* work items     209         PWQ_STAT_STARTED,       /* work items started execution */
239         PWQ_STAT_COMPLETED,     /* work items     210         PWQ_STAT_COMPLETED,     /* work items completed execution */
240         PWQ_STAT_CPU_TIME,      /* total CPU t    211         PWQ_STAT_CPU_TIME,      /* total CPU time consumed */
241         PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_inte    212         PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */
242         PWQ_STAT_CM_WAKEUP,     /* concurrency    213         PWQ_STAT_CM_WAKEUP,     /* concurrency-management worker wakeups */
243         PWQ_STAT_REPATRIATED,   /* unbound wor    214         PWQ_STAT_REPATRIATED,   /* unbound workers brought back into scope */
244         PWQ_STAT_MAYDAY,        /* maydays to     215         PWQ_STAT_MAYDAY,        /* maydays to rescuer */
245         PWQ_STAT_RESCUED,       /* linked work    216         PWQ_STAT_RESCUED,       /* linked work items executed by rescuer */
246                                                   217 
247         PWQ_NR_STATS,                             218         PWQ_NR_STATS,
248 };                                                219 };
249                                                   220 
250 /*                                                221 /*
251  * The per-pool workqueue.  While queued, bits !! 222  * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
252  * of work_struct->data are used for flags and    223  * of work_struct->data are used for flags and the remaining high bits
253  * point to the pwq; thus, pwqs need to be ali    224  * point to the pwq; thus, pwqs need to be aligned at two's power of the
254  * number of flag bits.                           225  * number of flag bits.
255  */                                               226  */
256 struct pool_workqueue {                           227 struct pool_workqueue {
257         struct worker_pool      *pool;            228         struct worker_pool      *pool;          /* I: the associated pool */
258         struct workqueue_struct *wq;              229         struct workqueue_struct *wq;            /* I: the owning workqueue */
259         int                     work_color;       230         int                     work_color;     /* L: current color */
260         int                     flush_color;      231         int                     flush_color;    /* L: flushing color */
261         int                     refcnt;           232         int                     refcnt;         /* L: reference count */
262         int                     nr_in_flight[W    233         int                     nr_in_flight[WORK_NR_COLORS];
263                                                   234                                                 /* L: nr of in_flight works */
264         bool                    plugged;       << 
265                                                   235 
266         /*                                        236         /*
267          * nr_active management and WORK_STRUC    237          * nr_active management and WORK_STRUCT_INACTIVE:
268          *                                        238          *
269          * When pwq->nr_active >= max_active,     239          * When pwq->nr_active >= max_active, new work item is queued to
270          * pwq->inactive_works instead of pool    240          * pwq->inactive_works instead of pool->worklist and marked with
271          * WORK_STRUCT_INACTIVE.                  241          * WORK_STRUCT_INACTIVE.
272          *                                        242          *
273          * All work items marked with WORK_STR !! 243          * All work items marked with WORK_STRUCT_INACTIVE do not participate
274          * nr_active and all work items in pwq !! 244          * in pwq->nr_active and all work items in pwq->inactive_works are
275          * WORK_STRUCT_INACTIVE. But not all W !! 245          * marked with WORK_STRUCT_INACTIVE.  But not all WORK_STRUCT_INACTIVE
276          * in pwq->inactive_works. Some of the !! 246          * work items are in pwq->inactive_works.  Some of them are ready to
277          * pool->worklist or worker->scheduled !! 247          * run in pool->worklist or worker->scheduled.  Those work itmes are
278          * wq_barrier which is used for flush_ !! 248          * only struct wq_barrier which is used for flush_work() and should
279          * in nr_active. For non-barrier work  !! 249          * not participate in pwq->nr_active.  For non-barrier work item, it
280          * WORK_STRUCT_INACTIVE iff it is in p !! 250          * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
281          */                                       251          */
282         int                     nr_active;        252         int                     nr_active;      /* L: nr of active works */
                                                   >> 253         int                     max_active;     /* L: max active works */
283         struct list_head        inactive_works    254         struct list_head        inactive_works; /* L: inactive works */
284         struct list_head        pending_node;  << 
285         struct list_head        pwqs_node;        255         struct list_head        pwqs_node;      /* WR: node on wq->pwqs */
286         struct list_head        mayday_node;      256         struct list_head        mayday_node;    /* MD: node on wq->maydays */
287                                                   257 
288         u64                     stats[PWQ_NR_S    258         u64                     stats[PWQ_NR_STATS];
289                                                   259 
290         /*                                        260         /*
291          * Release of unbound pwq is punted to    261          * Release of unbound pwq is punted to a kthread_worker. See put_pwq()
292          * and pwq_release_workfn() for detail    262          * and pwq_release_workfn() for details. pool_workqueue itself is also
293          * RCU protected so that the first pwq    263          * RCU protected so that the first pwq can be determined without
294          * grabbing wq->mutex.                    264          * grabbing wq->mutex.
295          */                                       265          */
296         struct kthread_work     release_work;     266         struct kthread_work     release_work;
297         struct rcu_head         rcu;              267         struct rcu_head         rcu;
298 } __aligned(1 << WORK_STRUCT_PWQ_SHIFT);       !! 268 } __aligned(1 << WORK_STRUCT_FLAG_BITS);
299                                                   269 
300 /*                                                270 /*
301  * Structure used to wait for workqueue flush.    271  * Structure used to wait for workqueue flush.
302  */                                               272  */
303 struct wq_flusher {                               273 struct wq_flusher {
304         struct list_head        list;             274         struct list_head        list;           /* WQ: list of flushers */
305         int                     flush_color;      275         int                     flush_color;    /* WQ: flush color waiting for */
306         struct completion       done;             276         struct completion       done;           /* flush completion */
307 };                                                277 };
308                                                   278 
309 struct wq_device;                                 279 struct wq_device;
310                                                   280 
311 /*                                                281 /*
312  * Unlike in a per-cpu workqueue where max_act << 
313  * on each CPU, in an unbound workqueue, max_a << 
314  * As sharing a single nr_active across multip << 
315  * the counting and enforcement is per NUMA no << 
316  *                                             << 
317  * The following struct is used to enforce per << 
318  * to start executing a work item, it should i << 
319  * tryinc_node_nr_active(). If acquisition fai << 
320  * ->max, the pwq is queued on ->pending_pwqs. << 
321  * and decrement ->nr, node_activate_pending_p << 
322  * round-robin order.                          << 
323  */                                            << 
324 struct wq_node_nr_active {                     << 
325         int                     max;           << 
326         atomic_t                nr;            << 
327         raw_spinlock_t          lock;          << 
328         struct list_head        pending_pwqs;  << 
329 };                                             << 
330                                                << 
331 /*                                             << 
332  * The externally visible workqueue.  It relay    282  * The externally visible workqueue.  It relays the issued work items to
333  * the appropriate worker_pool through its poo    283  * the appropriate worker_pool through its pool_workqueues.
334  */                                               284  */
335 struct workqueue_struct {                         285 struct workqueue_struct {
336         struct list_head        pwqs;             286         struct list_head        pwqs;           /* WR: all pwqs of this wq */
337         struct list_head        list;             287         struct list_head        list;           /* PR: list of all workqueues */
338                                                   288 
339         struct mutex            mutex;            289         struct mutex            mutex;          /* protects this wq */
340         int                     work_color;       290         int                     work_color;     /* WQ: current work color */
341         int                     flush_color;      291         int                     flush_color;    /* WQ: current flush color */
342         atomic_t                nr_pwqs_to_flu    292         atomic_t                nr_pwqs_to_flush; /* flush in progress */
343         struct wq_flusher       *first_flusher    293         struct wq_flusher       *first_flusher; /* WQ: first flusher */
344         struct list_head        flusher_queue;    294         struct list_head        flusher_queue;  /* WQ: flush waiters */
345         struct list_head        flusher_overfl    295         struct list_head        flusher_overflow; /* WQ: flush overflow list */
346                                                   296 
347         struct list_head        maydays;          297         struct list_head        maydays;        /* MD: pwqs requesting rescue */
348         struct worker           *rescuer;         298         struct worker           *rescuer;       /* MD: rescue worker */
349                                                   299 
350         int                     nr_drainers;      300         int                     nr_drainers;    /* WQ: drain in progress */
351                                                !! 301         int                     saved_max_active; /* WQ: saved pwq max_active */
352         /* See alloc_workqueue() function comm << 
353         int                     max_active;    << 
354         int                     min_active;    << 
355         int                     saved_max_acti << 
356         int                     saved_min_acti << 
357                                                   302 
358         struct workqueue_attrs  *unbound_attrs    303         struct workqueue_attrs  *unbound_attrs; /* PW: only for unbound wqs */
359         struct pool_workqueue __rcu *dfl_pwq;  !! 304         struct pool_workqueue   *dfl_pwq;       /* PW: only for unbound wqs */
360                                                   305 
361 #ifdef CONFIG_SYSFS                               306 #ifdef CONFIG_SYSFS
362         struct wq_device        *wq_dev;          307         struct wq_device        *wq_dev;        /* I: for sysfs interface */
363 #endif                                            308 #endif
364 #ifdef CONFIG_LOCKDEP                             309 #ifdef CONFIG_LOCKDEP
365         char                    *lock_name;       310         char                    *lock_name;
366         struct lock_class_key   key;              311         struct lock_class_key   key;
367         struct lockdep_map      lockdep_map;      312         struct lockdep_map      lockdep_map;
368 #endif                                            313 #endif
369         char                    name[WQ_NAME_L    314         char                    name[WQ_NAME_LEN]; /* I: workqueue name */
370                                                   315 
371         /*                                        316         /*
372          * Destruction of workqueue_struct is     317          * Destruction of workqueue_struct is RCU protected to allow walking
373          * the workqueues list without grabbin    318          * the workqueues list without grabbing wq_pool_mutex.
374          * This is used to dump all workqueues    319          * This is used to dump all workqueues from sysrq.
375          */                                       320          */
376         struct rcu_head         rcu;              321         struct rcu_head         rcu;
377                                                   322 
378         /* hot fields used during command issu    323         /* hot fields used during command issue, aligned to cacheline */
379         unsigned int            flags ____cach    324         unsigned int            flags ____cacheline_aligned; /* WQ: WQ_* flags */
380         struct pool_workqueue __rcu * __percpu !! 325         struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
381         struct wq_node_nr_active *node_nr_acti << 
382 };                                                326 };
383                                                   327 
                                                   >> 328 static struct kmem_cache *pwq_cache;
                                                   >> 329 
384 /*                                                330 /*
385  * Each pod type describes how CPUs should be     331  * Each pod type describes how CPUs should be grouped for unbound workqueues.
386  * See the comment above workqueue_attrs->affn    332  * See the comment above workqueue_attrs->affn_scope.
387  */                                               333  */
388 struct wq_pod_type {                              334 struct wq_pod_type {
389         int                     nr_pods;          335         int                     nr_pods;        /* number of pods */
390         cpumask_var_t           *pod_cpus;        336         cpumask_var_t           *pod_cpus;      /* pod -> cpus */
391         int                     *pod_node;        337         int                     *pod_node;      /* pod -> node */
392         int                     *cpu_pod;         338         int                     *cpu_pod;       /* cpu -> pod */
393 };                                                339 };
394                                                   340 
395 struct work_offq_data {                        !! 341 static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
396         u32                     pool_id;       !! 342 static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
397         u32                     disable;       << 
398         u32                     flags;         << 
399 };                                             << 
400                                                   343 
401 static const char *wq_affn_names[WQ_AFFN_NR_TY    344 static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
402         [WQ_AFFN_DFL]           = "default",   !! 345         [WQ_AFFN_DFL]                   = "default",
403         [WQ_AFFN_CPU]           = "cpu",       !! 346         [WQ_AFFN_CPU]                   = "cpu",
404         [WQ_AFFN_SMT]           = "smt",       !! 347         [WQ_AFFN_SMT]                   = "smt",
405         [WQ_AFFN_CACHE]         = "cache",     !! 348         [WQ_AFFN_CACHE]                 = "cache",
406         [WQ_AFFN_NUMA]          = "numa",      !! 349         [WQ_AFFN_NUMA]                  = "numa",
407         [WQ_AFFN_SYSTEM]        = "system",    !! 350         [WQ_AFFN_SYSTEM]                = "system",
408 };                                                351 };
409                                                   352 
410 /*                                                353 /*
411  * Per-cpu work items which run for longer tha    354  * Per-cpu work items which run for longer than the following threshold are
412  * automatically considered CPU intensive and     355  * automatically considered CPU intensive and excluded from concurrency
413  * management to prevent them from noticeably     356  * management to prevent them from noticeably delaying other per-cpu work items.
414  * ULONG_MAX indicates that the user hasn't ov    357  * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter.
415  * The actual value is initialized in wq_cpu_i    358  * The actual value is initialized in wq_cpu_intensive_thresh_init().
416  */                                               359  */
417 static unsigned long wq_cpu_intensive_thresh_u    360 static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
418 module_param_named(cpu_intensive_thresh_us, wq    361 module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
419 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT          << 
420 static unsigned int wq_cpu_intensive_warning_t << 
421 module_param_named(cpu_intensive_warning_thres << 
422 #endif                                         << 
423                                                   362 
424 /* see the comment above the definition of WQ_    363 /* see the comment above the definition of WQ_POWER_EFFICIENT */
425 static bool wq_power_efficient = IS_ENABLED(CO    364 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
426 module_param_named(power_efficient, wq_power_e    365 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
427                                                   366 
428 static bool wq_online;                  /* can    367 static bool wq_online;                  /* can kworkers be created yet? */
429 static bool wq_topo_initialized __read_mostly  << 
430                                                << 
431 static struct kmem_cache *pwq_cache;           << 
432                                                << 
433 static struct wq_pod_type wq_pod_types[WQ_AFFN << 
434 static enum wq_affn_scope wq_affn_dfl = WQ_AFF << 
435                                                   368 
436 /* buf for wq_update_unbound_pod_attrs(), prot    369 /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
437 static struct workqueue_attrs *unbound_wq_upda !! 370 static struct workqueue_attrs *wq_update_pod_attrs_buf;
438                                                   371 
439 static DEFINE_MUTEX(wq_pool_mutex);     /* pro    372 static DEFINE_MUTEX(wq_pool_mutex);     /* protects pools and workqueues list */
440 static DEFINE_MUTEX(wq_pool_attach_mutex); /*     373 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
441 static DEFINE_RAW_SPINLOCK(wq_mayday_lock);       374 static DEFINE_RAW_SPINLOCK(wq_mayday_lock);     /* protects wq->maydays list */
442 /* wait for manager to go away */                 375 /* wait for manager to go away */
443 static struct rcuwait manager_wait = __RCUWAIT    376 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
444                                                   377 
445 static LIST_HEAD(workqueues);           /* PR:    378 static LIST_HEAD(workqueues);           /* PR: list of all workqueues */
446 static bool workqueue_freezing;         /* PL:    379 static bool workqueue_freezing;         /* PL: have wqs started freezing? */
447                                                   380 
448 /* PL: mirror the cpu_online_mask excluding th << 
449 static cpumask_var_t wq_online_cpumask;        << 
450                                                << 
451 /* PL&A: allowable cpus for unbound wqs and wo    381 /* PL&A: allowable cpus for unbound wqs and work items */
452 static cpumask_var_t wq_unbound_cpumask;          382 static cpumask_var_t wq_unbound_cpumask;
453                                                   383 
454 /* PL: user requested unbound cpumask via sysf << 
455 static cpumask_var_t wq_requested_unbound_cpum << 
456                                                << 
457 /* PL: isolated cpumask to be excluded from un << 
458 static cpumask_var_t wq_isolated_cpumask;      << 
459                                                << 
460 /* for further constrain wq_unbound_cpumask by    384 /* for further constrain wq_unbound_cpumask by cmdline parameter*/
461 static struct cpumask wq_cmdline_cpumask __ini    385 static struct cpumask wq_cmdline_cpumask __initdata;
462                                                   386 
463 /* CPU where unbound work was last round robin    387 /* CPU where unbound work was last round robin scheduled from this CPU */
464 static DEFINE_PER_CPU(int, wq_rr_cpu_last);       388 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
465                                                   389 
466 /*                                                390 /*
467  * Local execution of unbound work items is no    391  * Local execution of unbound work items is no longer guaranteed.  The
468  * following always forces round-robin CPU sel    392  * following always forces round-robin CPU selection on unbound work items
469  * to uncover usages which depend on it.          393  * to uncover usages which depend on it.
470  */                                               394  */
471 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU               395 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
472 static bool wq_debug_force_rr_cpu = true;         396 static bool wq_debug_force_rr_cpu = true;
473 #else                                             397 #else
474 static bool wq_debug_force_rr_cpu = false;        398 static bool wq_debug_force_rr_cpu = false;
475 #endif                                            399 #endif
476 module_param_named(debug_force_rr_cpu, wq_debu    400 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
477                                                   401 
478 /* to raise softirq for the BH worker pools on << 
479 static DEFINE_PER_CPU_SHARED_ALIGNED(struct ir << 
480                                      bh_pool_i << 
481                                                << 
482 /* the BH worker pools */                      << 
483 static DEFINE_PER_CPU_SHARED_ALIGNED(struct wo << 
484                                      bh_worker << 
485                                                << 
486 /* the per-cpu worker pools */                    402 /* the per-cpu worker pools */
487 static DEFINE_PER_CPU_SHARED_ALIGNED(struct wo !! 403 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
488                                      cpu_worke << 
489                                                   404 
490 static DEFINE_IDR(worker_pool_idr);     /* PR:    405 static DEFINE_IDR(worker_pool_idr);     /* PR: idr of all pools */
491                                                   406 
492 /* PL: hash of all unbound pools keyed by pool    407 /* PL: hash of all unbound pools keyed by pool->attrs */
493 static DEFINE_HASHTABLE(unbound_pool_hash, UNB    408 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
494                                                   409 
495 /* I: attributes used when instantiating stand    410 /* I: attributes used when instantiating standard unbound pools on demand */
496 static struct workqueue_attrs *unbound_std_wq_    411 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
497                                                   412 
498 /* I: attributes used when instantiating order    413 /* I: attributes used when instantiating ordered pools on demand */
499 static struct workqueue_attrs *ordered_wq_attr    414 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
500                                                   415 
501 /*                                                416 /*
502  * I: kthread_worker to release pwq's. pwq rel    417  * I: kthread_worker to release pwq's. pwq release needs to be bounced to a
503  * process context while holding a pool lock.     418  * process context while holding a pool lock. Bounce to a dedicated kthread
504  * worker to avoid A-A deadlocks.                 419  * worker to avoid A-A deadlocks.
505  */                                               420  */
506 static struct kthread_worker *pwq_release_work !! 421 static struct kthread_worker *pwq_release_worker;
507                                                   422 
508 struct workqueue_struct *system_wq __ro_after_ !! 423 struct workqueue_struct *system_wq __read_mostly;
509 EXPORT_SYMBOL(system_wq);                         424 EXPORT_SYMBOL(system_wq);
510 struct workqueue_struct *system_highpri_wq __r !! 425 struct workqueue_struct *system_highpri_wq __read_mostly;
511 EXPORT_SYMBOL_GPL(system_highpri_wq);             426 EXPORT_SYMBOL_GPL(system_highpri_wq);
512 struct workqueue_struct *system_long_wq __ro_a !! 427 struct workqueue_struct *system_long_wq __read_mostly;
513 EXPORT_SYMBOL_GPL(system_long_wq);                428 EXPORT_SYMBOL_GPL(system_long_wq);
514 struct workqueue_struct *system_unbound_wq __r !! 429 struct workqueue_struct *system_unbound_wq __read_mostly;
515 EXPORT_SYMBOL_GPL(system_unbound_wq);             430 EXPORT_SYMBOL_GPL(system_unbound_wq);
516 struct workqueue_struct *system_freezable_wq _ !! 431 struct workqueue_struct *system_freezable_wq __read_mostly;
517 EXPORT_SYMBOL_GPL(system_freezable_wq);           432 EXPORT_SYMBOL_GPL(system_freezable_wq);
518 struct workqueue_struct *system_power_efficien !! 433 struct workqueue_struct *system_power_efficient_wq __read_mostly;
519 EXPORT_SYMBOL_GPL(system_power_efficient_wq);     434 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
520 struct workqueue_struct *system_freezable_powe !! 435 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
521 EXPORT_SYMBOL_GPL(system_freezable_power_effic    436 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
522 struct workqueue_struct *system_bh_wq;         << 
523 EXPORT_SYMBOL_GPL(system_bh_wq);               << 
524 struct workqueue_struct *system_bh_highpri_wq; << 
525 EXPORT_SYMBOL_GPL(system_bh_highpri_wq);       << 
526                                                   437 
527 static int worker_thread(void *__worker);         438 static int worker_thread(void *__worker);
528 static void workqueue_sysfs_unregister(struct     439 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
529 static void show_pwq(struct pool_workqueue *pw    440 static void show_pwq(struct pool_workqueue *pwq);
530 static void show_one_worker_pool(struct worker    441 static void show_one_worker_pool(struct worker_pool *pool);
531                                                   442 
532 #define CREATE_TRACE_POINTS                       443 #define CREATE_TRACE_POINTS
533 #include <trace/events/workqueue.h>               444 #include <trace/events/workqueue.h>
534                                                   445 
535 #define assert_rcu_or_pool_mutex()                446 #define assert_rcu_or_pool_mutex()                                      \
536         RCU_LOCKDEP_WARN(!rcu_read_lock_any_he !! 447         RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
537                          !lockdep_is_held(&wq_    448                          !lockdep_is_held(&wq_pool_mutex),              \
538                          "RCU or wq_pool_mutex    449                          "RCU or wq_pool_mutex should be held")
539                                                   450 
540 #define assert_rcu_or_wq_mutex_or_pool_mutex(w    451 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)                        \
541         RCU_LOCKDEP_WARN(!rcu_read_lock_any_he !! 452         RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
542                          !lockdep_is_held(&wq-    453                          !lockdep_is_held(&wq->mutex) &&                \
543                          !lockdep_is_held(&wq_    454                          !lockdep_is_held(&wq_pool_mutex),              \
544                          "RCU, wq->mutex or wq    455                          "RCU, wq->mutex or wq_pool_mutex should be held")
545                                                   456 
546 #define for_each_bh_worker_pool(pool, cpu)     << 
547         for ((pool) = &per_cpu(bh_worker_pools << 
548              (pool) < &per_cpu(bh_worker_pools << 
549              (pool)++)                         << 
550                                                << 
551 #define for_each_cpu_worker_pool(pool, cpu)       457 #define for_each_cpu_worker_pool(pool, cpu)                             \
552         for ((pool) = &per_cpu(cpu_worker_pool    458         for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];               \
553              (pool) < &per_cpu(cpu_worker_pool    459              (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
554              (pool)++)                            460              (pool)++)
555                                                   461 
556 /**                                               462 /**
557  * for_each_pool - iterate through all worker_    463  * for_each_pool - iterate through all worker_pools in the system
558  * @pool: iteration cursor                        464  * @pool: iteration cursor
559  * @pi: integer used for iteration                465  * @pi: integer used for iteration
560  *                                                466  *
561  * This must be called either with wq_pool_mut    467  * This must be called either with wq_pool_mutex held or RCU read
562  * locked.  If the pool needs to be used beyon    468  * locked.  If the pool needs to be used beyond the locking in effect, the
563  * caller is responsible for guaranteeing that    469  * caller is responsible for guaranteeing that the pool stays online.
564  *                                                470  *
565  * The if/else clause exists only for the lock    471  * The if/else clause exists only for the lockdep assertion and can be
566  * ignored.                                       472  * ignored.
567  */                                               473  */
568 #define for_each_pool(pool, pi)                   474 #define for_each_pool(pool, pi)                                         \
569         idr_for_each_entry(&worker_pool_idr, p    475         idr_for_each_entry(&worker_pool_idr, pool, pi)                  \
570                 if (({ assert_rcu_or_pool_mute    476                 if (({ assert_rcu_or_pool_mutex(); false; })) { }       \
571                 else                              477                 else
572                                                   478 
573 /**                                               479 /**
574  * for_each_pool_worker - iterate through all     480  * for_each_pool_worker - iterate through all workers of a worker_pool
575  * @worker: iteration cursor                      481  * @worker: iteration cursor
576  * @pool: worker_pool to iterate workers of       482  * @pool: worker_pool to iterate workers of
577  *                                                483  *
578  * This must be called with wq_pool_attach_mut    484  * This must be called with wq_pool_attach_mutex.
579  *                                                485  *
580  * The if/else clause exists only for the lock    486  * The if/else clause exists only for the lockdep assertion and can be
581  * ignored.                                       487  * ignored.
582  */                                               488  */
583 #define for_each_pool_worker(worker, pool)        489 #define for_each_pool_worker(worker, pool)                              \
584         list_for_each_entry((worker), &(pool)-    490         list_for_each_entry((worker), &(pool)->workers, node)           \
585                 if (({ lockdep_assert_held(&wq    491                 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
586                 else                              492                 else
587                                                   493 
588 /**                                               494 /**
589  * for_each_pwq - iterate through all pool_wor    495  * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
590  * @pwq: iteration cursor                         496  * @pwq: iteration cursor
591  * @wq: the target workqueue                      497  * @wq: the target workqueue
592  *                                                498  *
593  * This must be called either with wq->mutex h    499  * This must be called either with wq->mutex held or RCU read locked.
594  * If the pwq needs to be used beyond the lock    500  * If the pwq needs to be used beyond the locking in effect, the caller is
595  * responsible for guaranteeing that the pwq s    501  * responsible for guaranteeing that the pwq stays online.
596  *                                                502  *
597  * The if/else clause exists only for the lock    503  * The if/else clause exists only for the lockdep assertion and can be
598  * ignored.                                       504  * ignored.
599  */                                               505  */
600 #define for_each_pwq(pwq, wq)                     506 #define for_each_pwq(pwq, wq)                                           \
601         list_for_each_entry_rcu((pwq), &(wq)->    507         list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node,          \
602                                  lockdep_is_he    508                                  lockdep_is_held(&(wq->mutex)))
603                                                   509 
604 #ifdef CONFIG_DEBUG_OBJECTS_WORK                  510 #ifdef CONFIG_DEBUG_OBJECTS_WORK
605                                                   511 
606 static const struct debug_obj_descr work_debug    512 static const struct debug_obj_descr work_debug_descr;
607                                                   513 
608 static void *work_debug_hint(void *addr)          514 static void *work_debug_hint(void *addr)
609 {                                                 515 {
610         return ((struct work_struct *) addr)->    516         return ((struct work_struct *) addr)->func;
611 }                                                 517 }
612                                                   518 
613 static bool work_is_static_object(void *addr)     519 static bool work_is_static_object(void *addr)
614 {                                                 520 {
615         struct work_struct *work = addr;          521         struct work_struct *work = addr;
616                                                   522 
617         return test_bit(WORK_STRUCT_STATIC_BIT    523         return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
618 }                                                 524 }
619                                                   525 
620 /*                                                526 /*
621  * fixup_init is called when:                     527  * fixup_init is called when:
622  * - an active object is initialized              528  * - an active object is initialized
623  */                                               529  */
624 static bool work_fixup_init(void *addr, enum d    530 static bool work_fixup_init(void *addr, enum debug_obj_state state)
625 {                                                 531 {
626         struct work_struct *work = addr;          532         struct work_struct *work = addr;
627                                                   533 
628         switch (state) {                          534         switch (state) {
629         case ODEBUG_STATE_ACTIVE:                 535         case ODEBUG_STATE_ACTIVE:
630                 cancel_work_sync(work);           536                 cancel_work_sync(work);
631                 debug_object_init(work, &work_    537                 debug_object_init(work, &work_debug_descr);
632                 return true;                      538                 return true;
633         default:                                  539         default:
634                 return false;                     540                 return false;
635         }                                         541         }
636 }                                                 542 }
637                                                   543 
638 /*                                                544 /*
639  * fixup_free is called when:                     545  * fixup_free is called when:
640  * - an active object is freed                    546  * - an active object is freed
641  */                                               547  */
642 static bool work_fixup_free(void *addr, enum d    548 static bool work_fixup_free(void *addr, enum debug_obj_state state)
643 {                                                 549 {
644         struct work_struct *work = addr;          550         struct work_struct *work = addr;
645                                                   551 
646         switch (state) {                          552         switch (state) {
647         case ODEBUG_STATE_ACTIVE:                 553         case ODEBUG_STATE_ACTIVE:
648                 cancel_work_sync(work);           554                 cancel_work_sync(work);
649                 debug_object_free(work, &work_    555                 debug_object_free(work, &work_debug_descr);
650                 return true;                      556                 return true;
651         default:                                  557         default:
652                 return false;                     558                 return false;
653         }                                         559         }
654 }                                                 560 }
655                                                   561 
656 static const struct debug_obj_descr work_debug    562 static const struct debug_obj_descr work_debug_descr = {
657         .name           = "work_struct",          563         .name           = "work_struct",
658         .debug_hint     = work_debug_hint,        564         .debug_hint     = work_debug_hint,
659         .is_static_object = work_is_static_obj    565         .is_static_object = work_is_static_object,
660         .fixup_init     = work_fixup_init,        566         .fixup_init     = work_fixup_init,
661         .fixup_free     = work_fixup_free,        567         .fixup_free     = work_fixup_free,
662 };                                                568 };
663                                                   569 
664 static inline void debug_work_activate(struct     570 static inline void debug_work_activate(struct work_struct *work)
665 {                                                 571 {
666         debug_object_activate(work, &work_debu    572         debug_object_activate(work, &work_debug_descr);
667 }                                                 573 }
668                                                   574 
669 static inline void debug_work_deactivate(struc    575 static inline void debug_work_deactivate(struct work_struct *work)
670 {                                                 576 {
671         debug_object_deactivate(work, &work_de    577         debug_object_deactivate(work, &work_debug_descr);
672 }                                                 578 }
673                                                   579 
674 void __init_work(struct work_struct *work, int    580 void __init_work(struct work_struct *work, int onstack)
675 {                                                 581 {
676         if (onstack)                              582         if (onstack)
677                 debug_object_init_on_stack(wor    583                 debug_object_init_on_stack(work, &work_debug_descr);
678         else                                      584         else
679                 debug_object_init(work, &work_    585                 debug_object_init(work, &work_debug_descr);
680 }                                                 586 }
681 EXPORT_SYMBOL_GPL(__init_work);                   587 EXPORT_SYMBOL_GPL(__init_work);
682                                                   588 
683 void destroy_work_on_stack(struct work_struct     589 void destroy_work_on_stack(struct work_struct *work)
684 {                                                 590 {
685         debug_object_free(work, &work_debug_de    591         debug_object_free(work, &work_debug_descr);
686 }                                                 592 }
687 EXPORT_SYMBOL_GPL(destroy_work_on_stack);         593 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
688                                                   594 
689 void destroy_delayed_work_on_stack(struct dela    595 void destroy_delayed_work_on_stack(struct delayed_work *work)
690 {                                                 596 {
691         destroy_timer_on_stack(&work->timer);     597         destroy_timer_on_stack(&work->timer);
692         debug_object_free(&work->work, &work_d    598         debug_object_free(&work->work, &work_debug_descr);
693 }                                                 599 }
694 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stac    600 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
695                                                   601 
696 #else                                             602 #else
697 static inline void debug_work_activate(struct     603 static inline void debug_work_activate(struct work_struct *work) { }
698 static inline void debug_work_deactivate(struc    604 static inline void debug_work_deactivate(struct work_struct *work) { }
699 #endif                                            605 #endif
700                                                   606 
701 /**                                               607 /**
702  * worker_pool_assign_id - allocate ID and ass    608  * worker_pool_assign_id - allocate ID and assign it to @pool
703  * @pool: the pool pointer of interest            609  * @pool: the pool pointer of interest
704  *                                                610  *
705  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE)    611  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
706  * successfully, -errno on failure.               612  * successfully, -errno on failure.
707  */                                               613  */
708 static int worker_pool_assign_id(struct worker    614 static int worker_pool_assign_id(struct worker_pool *pool)
709 {                                                 615 {
710         int ret;                                  616         int ret;
711                                                   617 
712         lockdep_assert_held(&wq_pool_mutex);      618         lockdep_assert_held(&wq_pool_mutex);
713                                                   619 
714         ret = idr_alloc(&worker_pool_idr, pool    620         ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
715                         GFP_KERNEL);              621                         GFP_KERNEL);
716         if (ret >= 0) {                           622         if (ret >= 0) {
717                 pool->id = ret;                   623                 pool->id = ret;
718                 return 0;                         624                 return 0;
719         }                                         625         }
720         return ret;                               626         return ret;
721 }                                                 627 }
722                                                   628 
723 static struct pool_workqueue __rcu **          << 
724 unbound_pwq_slot(struct workqueue_struct *wq,  << 
725 {                                              << 
726        if (cpu >= 0)                           << 
727                return per_cpu_ptr(wq->cpu_pwq, << 
728        else                                    << 
729                return &wq->dfl_pwq;            << 
730 }                                              << 
731                                                << 
732 /* @cpu < 0 for dfl_pwq */                     << 
733 static struct pool_workqueue *unbound_pwq(stru << 
734 {                                              << 
735         return rcu_dereference_check(*unbound_ << 
736                                      lockdep_i << 
737                                      lockdep_i << 
738 }                                              << 
739                                                << 
740 /**                                            << 
741  * unbound_effective_cpumask - effective cpuma << 
742  * @wq: workqueue of interest                  << 
743  *                                             << 
744  * @wq->unbound_attrs->cpumask contains the cp << 
745  * is masked with wq_unbound_cpumask to determ << 
746  * default pwq is always mapped to the pool wi << 
747  */                                            << 
748 static struct cpumask *unbound_effective_cpuma << 
749 {                                              << 
750         return unbound_pwq(wq, -1)->pool->attr << 
751 }                                              << 
752                                                << 
753 static unsigned int work_color_to_flags(int co    629 static unsigned int work_color_to_flags(int color)
754 {                                                 630 {
755         return color << WORK_STRUCT_COLOR_SHIF    631         return color << WORK_STRUCT_COLOR_SHIFT;
756 }                                                 632 }
757                                                   633 
758 static int get_work_color(unsigned long work_d    634 static int get_work_color(unsigned long work_data)
759 {                                                 635 {
760         return (work_data >> WORK_STRUCT_COLOR    636         return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
761                 ((1 << WORK_STRUCT_COLOR_BITS)    637                 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
762 }                                                 638 }
763                                                   639 
764 static int work_next_color(int color)             640 static int work_next_color(int color)
765 {                                                 641 {
766         return (color + 1) % WORK_NR_COLORS;      642         return (color + 1) % WORK_NR_COLORS;
767 }                                                 643 }
768                                                   644 
769 static unsigned long pool_offq_flags(struct wo << 
770 {                                              << 
771         return (pool->flags & POOL_BH) ? WORK_ << 
772 }                                              << 
773                                                << 
774 /*                                                645 /*
775  * While queued, %WORK_STRUCT_PWQ is set and n    646  * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
776  * contain the pointer to the queued pwq.  Onc    647  * contain the pointer to the queued pwq.  Once execution starts, the flag
777  * is cleared and the high bits contain OFFQ f    648  * is cleared and the high bits contain OFFQ flags and pool ID.
778  *                                                649  *
779  * set_work_pwq(), set_work_pool_and_clear_pen !! 650  * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
780  * can be used to set the pwq, pool or clear w !! 651  * and clear_work_data() can be used to set the pwq, pool or clear
781  * only be called while the work is owned - ie !! 652  * work->data.  These functions should only be called while the work is
                                                   >> 653  * owned - ie. while the PENDING bit is set.
782  *                                                654  *
783  * get_work_pool() and get_work_pwq() can be u    655  * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
784  * corresponding to a work.  Pool is available    656  * corresponding to a work.  Pool is available once the work has been
785  * queued anywhere after initialization until     657  * queued anywhere after initialization until it is sync canceled.  pwq is
786  * available only while the work item is queue    658  * available only while the work item is queued.
                                                   >> 659  *
                                                   >> 660  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
                                                   >> 661  * canceled.  While being canceled, a work item may have its PENDING set
                                                   >> 662  * but stay off timer and worklist for arbitrarily long and nobody should
                                                   >> 663  * try to steal the PENDING bit.
787  */                                               664  */
788 static inline void set_work_data(struct work_s !! 665 static inline void set_work_data(struct work_struct *work, unsigned long data,
                                                   >> 666                                  unsigned long flags)
789 {                                                 667 {
790         WARN_ON_ONCE(!work_pending(work));        668         WARN_ON_ONCE(!work_pending(work));
791         atomic_long_set(&work->data, data | wo !! 669         atomic_long_set(&work->data, data | flags | work_static(work));
792 }                                                 670 }
793                                                   671 
794 static void set_work_pwq(struct work_struct *w    672 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
795                          unsigned long flags)  !! 673                          unsigned long extra_flags)
796 {                                                 674 {
797         set_work_data(work, (unsigned long)pwq !! 675         set_work_data(work, (unsigned long)pwq,
798                       WORK_STRUCT_PWQ | flags) !! 676                       WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
799 }                                                 677 }
800                                                   678 
801 static void set_work_pool_and_keep_pending(str    679 static void set_work_pool_and_keep_pending(struct work_struct *work,
802                                            int !! 680                                            int pool_id)
803 {                                                 681 {
804         set_work_data(work, ((unsigned long)po !! 682         set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
805                       WORK_STRUCT_PENDING | fl !! 683                       WORK_STRUCT_PENDING);
806 }                                                 684 }
807                                                   685 
808 static void set_work_pool_and_clear_pending(st    686 static void set_work_pool_and_clear_pending(struct work_struct *work,
809                                             in !! 687                                             int pool_id)
810 {                                                 688 {
811         /*                                        689         /*
812          * The following wmb is paired with th    690          * The following wmb is paired with the implied mb in
813          * test_and_set_bit(PENDING) and ensur    691          * test_and_set_bit(PENDING) and ensures all updates to @work made
814          * here are visible to and precede any    692          * here are visible to and precede any updates by the next PENDING
815          * owner.                                 693          * owner.
816          */                                       694          */
817         smp_wmb();                                695         smp_wmb();
818         set_work_data(work, ((unsigned long)po !! 696         set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
819                       flags);                  << 
820         /*                                        697         /*
821          * The following mb guarantees that pr    698          * The following mb guarantees that previous clear of a PENDING bit
822          * will not be reordered with any spec    699          * will not be reordered with any speculative LOADS or STORES from
823          * work->current_func, which is execut    700          * work->current_func, which is executed afterwards.  This possible
824          * reordering can lead to a missed exe    701          * reordering can lead to a missed execution on attempt to queue
825          * the same @work.  E.g. consider this    702          * the same @work.  E.g. consider this case:
826          *                                        703          *
827          *   CPU#0                         CPU    704          *   CPU#0                         CPU#1
828          *   ----------------------------  ---    705          *   ----------------------------  --------------------------------
829          *                                        706          *
830          * 1  STORE event_indicated               707          * 1  STORE event_indicated
831          * 2  queue_work_on() {                   708          * 2  queue_work_on() {
832          * 3    test_and_set_bit(PENDING)         709          * 3    test_and_set_bit(PENDING)
833          * 4 }                             set    710          * 4 }                             set_..._and_clear_pending() {
834          * 5                                 s    711          * 5                                 set_work_data() # clear bit
835          * 6                                 s    712          * 6                                 smp_mb()
836          * 7                               wor    713          * 7                               work->current_func() {
837          * 8                                      714          * 8                                  LOAD event_indicated
838          *                                 }      715          *                                 }
839          *                                        716          *
840          * Without an explicit full barrier sp    717          * Without an explicit full barrier speculative LOAD on line 8 can
841          * be executed before CPU#0 does STORE    718          * be executed before CPU#0 does STORE on line 1.  If that happens,
842          * CPU#0 observes the PENDING bit is s    719          * CPU#0 observes the PENDING bit is still set and new execution of
843          * a @work is not queued in a hope, th    720          * a @work is not queued in a hope, that CPU#1 will eventually
844          * finish the queued @work.  Meanwhile    721          * finish the queued @work.  Meanwhile CPU#1 does not see
845          * event_indicated is set, because spe    722          * event_indicated is set, because speculative LOAD was executed
846          * before actual STORE.                   723          * before actual STORE.
847          */                                       724          */
848         smp_mb();                                 725         smp_mb();
849 }                                                 726 }
850                                                   727 
                                                   >> 728 static void clear_work_data(struct work_struct *work)
                                                   >> 729 {
                                                   >> 730         smp_wmb();      /* see set_work_pool_and_clear_pending() */
                                                   >> 731         set_work_data(work, WORK_STRUCT_NO_POOL, 0);
                                                   >> 732 }
                                                   >> 733 
851 static inline struct pool_workqueue *work_stru    734 static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
852 {                                                 735 {
853         return (struct pool_workqueue *)(data  !! 736         return (struct pool_workqueue *)(data & WORK_STRUCT_WQ_DATA_MASK);
854 }                                                 737 }
855                                                   738 
856 static struct pool_workqueue *get_work_pwq(str    739 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
857 {                                                 740 {
858         unsigned long data = atomic_long_read(    741         unsigned long data = atomic_long_read(&work->data);
859                                                   742 
860         if (data & WORK_STRUCT_PWQ)               743         if (data & WORK_STRUCT_PWQ)
861                 return work_struct_pwq(data);     744                 return work_struct_pwq(data);
862         else                                      745         else
863                 return NULL;                      746                 return NULL;
864 }                                                 747 }
865                                                   748 
866 /**                                               749 /**
867  * get_work_pool - return the worker_pool a gi    750  * get_work_pool - return the worker_pool a given work was associated with
868  * @work: the work item of interest               751  * @work: the work item of interest
869  *                                                752  *
870  * Pools are created and destroyed under wq_po    753  * Pools are created and destroyed under wq_pool_mutex, and allows read
871  * access under RCU read lock.  As such, this     754  * access under RCU read lock.  As such, this function should be
872  * called under wq_pool_mutex or inside of a r    755  * called under wq_pool_mutex or inside of a rcu_read_lock() region.
873  *                                                756  *
874  * All fields of the returned pool are accessi    757  * All fields of the returned pool are accessible as long as the above
875  * mentioned locking is in effect.  If the ret    758  * mentioned locking is in effect.  If the returned pool needs to be used
876  * beyond the critical section, the caller is     759  * beyond the critical section, the caller is responsible for ensuring the
877  * returned pool is and stays online.             760  * returned pool is and stays online.
878  *                                                761  *
879  * Return: The worker_pool @work was last asso    762  * Return: The worker_pool @work was last associated with.  %NULL if none.
880  */                                               763  */
881 static struct worker_pool *get_work_pool(struc    764 static struct worker_pool *get_work_pool(struct work_struct *work)
882 {                                                 765 {
883         unsigned long data = atomic_long_read(    766         unsigned long data = atomic_long_read(&work->data);
884         int pool_id;                              767         int pool_id;
885                                                   768 
886         assert_rcu_or_pool_mutex();               769         assert_rcu_or_pool_mutex();
887                                                   770 
888         if (data & WORK_STRUCT_PWQ)               771         if (data & WORK_STRUCT_PWQ)
889                 return work_struct_pwq(data)->    772                 return work_struct_pwq(data)->pool;
890                                                   773 
891         pool_id = data >> WORK_OFFQ_POOL_SHIFT    774         pool_id = data >> WORK_OFFQ_POOL_SHIFT;
892         if (pool_id == WORK_OFFQ_POOL_NONE)       775         if (pool_id == WORK_OFFQ_POOL_NONE)
893                 return NULL;                      776                 return NULL;
894                                                   777 
895         return idr_find(&worker_pool_idr, pool    778         return idr_find(&worker_pool_idr, pool_id);
896 }                                                 779 }
897                                                   780 
898 static unsigned long shift_and_mask(unsigned l !! 781 /**
                                                   >> 782  * get_work_pool_id - return the worker pool ID a given work is associated with
                                                   >> 783  * @work: the work item of interest
                                                   >> 784  *
                                                   >> 785  * Return: The worker_pool ID @work was last associated with.
                                                   >> 786  * %WORK_OFFQ_POOL_NONE if none.
                                                   >> 787  */
                                                   >> 788 static int get_work_pool_id(struct work_struct *work)
899 {                                                 789 {
900         return (v >> shift) & ((1U << bits) -  !! 790         unsigned long data = atomic_long_read(&work->data);
                                                   >> 791 
                                                   >> 792         if (data & WORK_STRUCT_PWQ)
                                                   >> 793                 return work_struct_pwq(data)->pool->id;
                                                   >> 794 
                                                   >> 795         return data >> WORK_OFFQ_POOL_SHIFT;
901 }                                                 796 }
902                                                   797 
903 static void work_offqd_unpack(struct work_offq !! 798 static void mark_work_canceling(struct work_struct *work)
904 {                                                 799 {
905         WARN_ON_ONCE(data & WORK_STRUCT_PWQ);  !! 800         unsigned long pool_id = get_work_pool_id(work);
906                                                   801 
907         offqd->pool_id = shift_and_mask(data,  !! 802         pool_id <<= WORK_OFFQ_POOL_SHIFT;
908                                         WORK_O !! 803         set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
909         offqd->disable = shift_and_mask(data,  << 
910                                         WORK_O << 
911         offqd->flags = data & WORK_OFFQ_FLAG_M << 
912 }                                                 804 }
913                                                   805 
914 static unsigned long work_offqd_pack_flags(str !! 806 static bool work_is_canceling(struct work_struct *work)
915 {                                                 807 {
916         return ((unsigned long)offqd->disable  !! 808         unsigned long data = atomic_long_read(&work->data);
917                 ((unsigned long)offqd->flags); !! 809 
                                                   >> 810         return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
918 }                                                 811 }
919                                                   812 
920 /*                                                813 /*
921  * Policy functions.  These define the policie    814  * Policy functions.  These define the policies on how the global worker
922  * pools are managed.  Unless noted otherwise,    815  * pools are managed.  Unless noted otherwise, these functions assume that
923  * they're being called with pool->lock held.     816  * they're being called with pool->lock held.
924  */                                               817  */
925                                                   818 
926 /*                                                819 /*
927  * Need to wake up a worker?  Called from anyt    820  * Need to wake up a worker?  Called from anything but currently
928  * running workers.                               821  * running workers.
929  *                                                822  *
930  * Note that, because unbound workers never co    823  * Note that, because unbound workers never contribute to nr_running, this
931  * function will always return %true for unbou    824  * function will always return %true for unbound pools as long as the
932  * worklist isn't empty.                          825  * worklist isn't empty.
933  */                                               826  */
934 static bool need_more_worker(struct worker_poo    827 static bool need_more_worker(struct worker_pool *pool)
935 {                                                 828 {
936         return !list_empty(&pool->worklist) &&    829         return !list_empty(&pool->worklist) && !pool->nr_running;
937 }                                                 830 }
938                                                   831 
939 /* Can I start working?  Called from busy but     832 /* Can I start working?  Called from busy but !running workers. */
940 static bool may_start_working(struct worker_po    833 static bool may_start_working(struct worker_pool *pool)
941 {                                                 834 {
942         return pool->nr_idle;                     835         return pool->nr_idle;
943 }                                                 836 }
944                                                   837 
945 /* Do I need to keep working?  Called from cur    838 /* Do I need to keep working?  Called from currently running workers. */
946 static bool keep_working(struct worker_pool *p    839 static bool keep_working(struct worker_pool *pool)
947 {                                                 840 {
948         return !list_empty(&pool->worklist) &&    841         return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
949 }                                                 842 }
950                                                   843 
951 /* Do we need a new worker?  Called from manag    844 /* Do we need a new worker?  Called from manager. */
952 static bool need_to_create_worker(struct worke    845 static bool need_to_create_worker(struct worker_pool *pool)
953 {                                                 846 {
954         return need_more_worker(pool) && !may_    847         return need_more_worker(pool) && !may_start_working(pool);
955 }                                                 848 }
956                                                   849 
957 /* Do we have too many workers and should some    850 /* Do we have too many workers and should some go away? */
958 static bool too_many_workers(struct worker_poo    851 static bool too_many_workers(struct worker_pool *pool)
959 {                                                 852 {
960         bool managing = pool->flags & POOL_MAN    853         bool managing = pool->flags & POOL_MANAGER_ACTIVE;
961         int nr_idle = pool->nr_idle + managing    854         int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
962         int nr_busy = pool->nr_workers - nr_id    855         int nr_busy = pool->nr_workers - nr_idle;
963                                                   856 
964         return nr_idle > 2 && (nr_idle - 2) *     857         return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
965 }                                                 858 }
966                                                   859 
967 /**                                               860 /**
968  * worker_set_flags - set worker flags and adj    861  * worker_set_flags - set worker flags and adjust nr_running accordingly
969  * @worker: self                                  862  * @worker: self
970  * @flags: flags to set                           863  * @flags: flags to set
971  *                                                864  *
972  * Set @flags in @worker->flags and adjust nr_    865  * Set @flags in @worker->flags and adjust nr_running accordingly.
973  */                                               866  */
974 static inline void worker_set_flags(struct wor    867 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
975 {                                                 868 {
976         struct worker_pool *pool = worker->poo    869         struct worker_pool *pool = worker->pool;
977                                                   870 
978         lockdep_assert_held(&pool->lock);         871         lockdep_assert_held(&pool->lock);
979                                                   872 
980         /* If transitioning into NOT_RUNNING,     873         /* If transitioning into NOT_RUNNING, adjust nr_running. */
981         if ((flags & WORKER_NOT_RUNNING) &&       874         if ((flags & WORKER_NOT_RUNNING) &&
982             !(worker->flags & WORKER_NOT_RUNNI    875             !(worker->flags & WORKER_NOT_RUNNING)) {
983                 pool->nr_running--;               876                 pool->nr_running--;
984         }                                         877         }
985                                                   878 
986         worker->flags |= flags;                   879         worker->flags |= flags;
987 }                                                 880 }
988                                                   881 
989 /**                                               882 /**
990  * worker_clr_flags - clear worker flags and a    883  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
991  * @worker: self                                  884  * @worker: self
992  * @flags: flags to clear                         885  * @flags: flags to clear
993  *                                                886  *
994  * Clear @flags in @worker->flags and adjust n    887  * Clear @flags in @worker->flags and adjust nr_running accordingly.
995  */                                               888  */
996 static inline void worker_clr_flags(struct wor    889 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
997 {                                                 890 {
998         struct worker_pool *pool = worker->poo    891         struct worker_pool *pool = worker->pool;
999         unsigned int oflags = worker->flags;      892         unsigned int oflags = worker->flags;
1000                                                  893 
1001         lockdep_assert_held(&pool->lock);        894         lockdep_assert_held(&pool->lock);
1002                                                  895 
1003         worker->flags &= ~flags;                 896         worker->flags &= ~flags;
1004                                                  897 
1005         /*                                       898         /*
1006          * If transitioning out of NOT_RUNNIN    899          * If transitioning out of NOT_RUNNING, increment nr_running.  Note
1007          * that the nested NOT_RUNNING is not    900          * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
1008          * of multiple flags, not a single fl    901          * of multiple flags, not a single flag.
1009          */                                      902          */
1010         if ((flags & WORKER_NOT_RUNNING) && (    903         if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
1011                 if (!(worker->flags & WORKER_    904                 if (!(worker->flags & WORKER_NOT_RUNNING))
1012                         pool->nr_running++;      905                         pool->nr_running++;
1013 }                                                906 }
1014                                                  907 
1015 /* Return the first idle worker.  Called with    908 /* Return the first idle worker.  Called with pool->lock held. */
1016 static struct worker *first_idle_worker(struc    909 static struct worker *first_idle_worker(struct worker_pool *pool)
1017 {                                                910 {
1018         if (unlikely(list_empty(&pool->idle_l    911         if (unlikely(list_empty(&pool->idle_list)))
1019                 return NULL;                     912                 return NULL;
1020                                                  913 
1021         return list_first_entry(&pool->idle_l    914         return list_first_entry(&pool->idle_list, struct worker, entry);
1022 }                                                915 }
1023                                                  916 
1024 /**                                              917 /**
1025  * worker_enter_idle - enter idle state          918  * worker_enter_idle - enter idle state
1026  * @worker: worker which is entering idle sta    919  * @worker: worker which is entering idle state
1027  *                                               920  *
1028  * @worker is entering idle state.  Update st    921  * @worker is entering idle state.  Update stats and idle timer if
1029  * necessary.                                    922  * necessary.
1030  *                                               923  *
1031  * LOCKING:                                      924  * LOCKING:
1032  * raw_spin_lock_irq(pool->lock).                925  * raw_spin_lock_irq(pool->lock).
1033  */                                              926  */
1034 static void worker_enter_idle(struct worker *    927 static void worker_enter_idle(struct worker *worker)
1035 {                                                928 {
1036         struct worker_pool *pool = worker->po    929         struct worker_pool *pool = worker->pool;
1037                                                  930 
1038         if (WARN_ON_ONCE(worker->flags & WORK    931         if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1039             WARN_ON_ONCE(!list_empty(&worker-    932             WARN_ON_ONCE(!list_empty(&worker->entry) &&
1040                          (worker->hentry.next    933                          (worker->hentry.next || worker->hentry.pprev)))
1041                 return;                          934                 return;
1042                                                  935 
1043         /* can't use worker_set_flags(), also    936         /* can't use worker_set_flags(), also called from create_worker() */
1044         worker->flags |= WORKER_IDLE;            937         worker->flags |= WORKER_IDLE;
1045         pool->nr_idle++;                         938         pool->nr_idle++;
1046         worker->last_active = jiffies;           939         worker->last_active = jiffies;
1047                                                  940 
1048         /* idle_list is LIFO */                  941         /* idle_list is LIFO */
1049         list_add(&worker->entry, &pool->idle_    942         list_add(&worker->entry, &pool->idle_list);
1050                                                  943 
1051         if (too_many_workers(pool) && !timer_    944         if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1052                 mod_timer(&pool->idle_timer,     945                 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1053                                                  946 
1054         /* Sanity check nr_running. */           947         /* Sanity check nr_running. */
1055         WARN_ON_ONCE(pool->nr_workers == pool    948         WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
1056 }                                                949 }
1057                                                  950 
1058 /**                                              951 /**
1059  * worker_leave_idle - leave idle state          952  * worker_leave_idle - leave idle state
1060  * @worker: worker which is leaving idle stat    953  * @worker: worker which is leaving idle state
1061  *                                               954  *
1062  * @worker is leaving idle state.  Update sta    955  * @worker is leaving idle state.  Update stats.
1063  *                                               956  *
1064  * LOCKING:                                      957  * LOCKING:
1065  * raw_spin_lock_irq(pool->lock).                958  * raw_spin_lock_irq(pool->lock).
1066  */                                              959  */
1067 static void worker_leave_idle(struct worker *    960 static void worker_leave_idle(struct worker *worker)
1068 {                                                961 {
1069         struct worker_pool *pool = worker->po    962         struct worker_pool *pool = worker->pool;
1070                                                  963 
1071         if (WARN_ON_ONCE(!(worker->flags & WO    964         if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1072                 return;                          965                 return;
1073         worker_clr_flags(worker, WORKER_IDLE)    966         worker_clr_flags(worker, WORKER_IDLE);
1074         pool->nr_idle--;                         967         pool->nr_idle--;
1075         list_del_init(&worker->entry);           968         list_del_init(&worker->entry);
1076 }                                                969 }
1077                                                  970 
1078 /**                                              971 /**
1079  * find_worker_executing_work - find worker w    972  * find_worker_executing_work - find worker which is executing a work
1080  * @pool: pool of interest                       973  * @pool: pool of interest
1081  * @work: work to find worker for                974  * @work: work to find worker for
1082  *                                               975  *
1083  * Find a worker which is executing @work on     976  * Find a worker which is executing @work on @pool by searching
1084  * @pool->busy_hash which is keyed by the add    977  * @pool->busy_hash which is keyed by the address of @work.  For a worker
1085  * to match, its current execution should mat    978  * to match, its current execution should match the address of @work and
1086  * its work function.  This is to avoid unwan    979  * its work function.  This is to avoid unwanted dependency between
1087  * unrelated work executions through a work i    980  * unrelated work executions through a work item being recycled while still
1088  * being executed.                               981  * being executed.
1089  *                                               982  *
1090  * This is a bit tricky.  A work item may be     983  * This is a bit tricky.  A work item may be freed once its execution
1091  * starts and nothing prevents the freed area    984  * starts and nothing prevents the freed area from being recycled for
1092  * another work item.  If the same work item     985  * another work item.  If the same work item address ends up being reused
1093  * before the original execution finishes, wo    986  * before the original execution finishes, workqueue will identify the
1094  * recycled work item as currently executing     987  * recycled work item as currently executing and make it wait until the
1095  * current execution finishes, introducing an    988  * current execution finishes, introducing an unwanted dependency.
1096  *                                               989  *
1097  * This function checks the work item address    990  * This function checks the work item address and work function to avoid
1098  * false positives.  Note that this isn't com    991  * false positives.  Note that this isn't complete as one may construct a
1099  * work function which can introduce dependen    992  * work function which can introduce dependency onto itself through a
1100  * recycled work item.  Well, if somebody wan    993  * recycled work item.  Well, if somebody wants to shoot oneself in the
1101  * foot that badly, there's only so much we c    994  * foot that badly, there's only so much we can do, and if such deadlock
1102  * actually occurs, it should be easy to loca    995  * actually occurs, it should be easy to locate the culprit work function.
1103  *                                               996  *
1104  * CONTEXT:                                      997  * CONTEXT:
1105  * raw_spin_lock_irq(pool->lock).                998  * raw_spin_lock_irq(pool->lock).
1106  *                                               999  *
1107  * Return:                                       1000  * Return:
1108  * Pointer to worker which is executing @work    1001  * Pointer to worker which is executing @work if found, %NULL
1109  * otherwise.                                    1002  * otherwise.
1110  */                                              1003  */
1111 static struct worker *find_worker_executing_w    1004 static struct worker *find_worker_executing_work(struct worker_pool *pool,
1112                                                  1005                                                  struct work_struct *work)
1113 {                                                1006 {
1114         struct worker *worker;                   1007         struct worker *worker;
1115                                                  1008 
1116         hash_for_each_possible(pool->busy_has    1009         hash_for_each_possible(pool->busy_hash, worker, hentry,
1117                                (unsigned long    1010                                (unsigned long)work)
1118                 if (worker->current_work == w    1011                 if (worker->current_work == work &&
1119                     worker->current_func == w    1012                     worker->current_func == work->func)
1120                         return worker;           1013                         return worker;
1121                                                  1014 
1122         return NULL;                             1015         return NULL;
1123 }                                                1016 }
1124                                                  1017 
1125 /**                                              1018 /**
1126  * move_linked_works - move linked works to a    1019  * move_linked_works - move linked works to a list
1127  * @work: start of series of works to be sche    1020  * @work: start of series of works to be scheduled
1128  * @head: target list to append @work to         1021  * @head: target list to append @work to
1129  * @nextp: out parameter for nested worklist     1022  * @nextp: out parameter for nested worklist walking
1130  *                                               1023  *
1131  * Schedule linked works starting from @work     1024  * Schedule linked works starting from @work to @head. Work series to be
1132  * scheduled starts at @work and includes any    1025  * scheduled starts at @work and includes any consecutive work with
1133  * WORK_STRUCT_LINKED set in its predecessor.    1026  * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on
1134  * @nextp.                                       1027  * @nextp.
1135  *                                               1028  *
1136  * CONTEXT:                                      1029  * CONTEXT:
1137  * raw_spin_lock_irq(pool->lock).                1030  * raw_spin_lock_irq(pool->lock).
1138  */                                              1031  */
1139 static void move_linked_works(struct work_str    1032 static void move_linked_works(struct work_struct *work, struct list_head *head,
1140                               struct work_str    1033                               struct work_struct **nextp)
1141 {                                                1034 {
1142         struct work_struct *n;                   1035         struct work_struct *n;
1143                                                  1036 
1144         /*                                       1037         /*
1145          * Linked worklist will always end be    1038          * Linked worklist will always end before the end of the list,
1146          * use NULL for list head.               1039          * use NULL for list head.
1147          */                                      1040          */
1148         list_for_each_entry_safe_from(work, n    1041         list_for_each_entry_safe_from(work, n, NULL, entry) {
1149                 list_move_tail(&work->entry,     1042                 list_move_tail(&work->entry, head);
1150                 if (!(*work_data_bits(work) &    1043                 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1151                         break;                   1044                         break;
1152         }                                        1045         }
1153                                                  1046 
1154         /*                                       1047         /*
1155          * If we're already inside safe list     1048          * If we're already inside safe list traversal and have moved
1156          * multiple works to the scheduled qu    1049          * multiple works to the scheduled queue, the next position
1157          * needs to be updated.                  1050          * needs to be updated.
1158          */                                      1051          */
1159         if (nextp)                               1052         if (nextp)
1160                 *nextp = n;                      1053                 *nextp = n;
1161 }                                                1054 }
1162                                                  1055 
1163 /**                                              1056 /**
1164  * assign_work - assign a work item and its l    1057  * assign_work - assign a work item and its linked work items to a worker
1165  * @work: work to assign                         1058  * @work: work to assign
1166  * @worker: worker to assign to                  1059  * @worker: worker to assign to
1167  * @nextp: out parameter for nested worklist     1060  * @nextp: out parameter for nested worklist walking
1168  *                                               1061  *
1169  * Assign @work and its linked work items to     1062  * Assign @work and its linked work items to @worker. If @work is already being
1170  * executed by another worker in the same poo    1063  * executed by another worker in the same pool, it'll be punted there.
1171  *                                               1064  *
1172  * If @nextp is not NULL, it's updated to poi    1065  * If @nextp is not NULL, it's updated to point to the next work of the last
1173  * scheduled work. This allows assign_work()     1066  * scheduled work. This allows assign_work() to be nested inside
1174  * list_for_each_entry_safe().                   1067  * list_for_each_entry_safe().
1175  *                                               1068  *
1176  * Returns %true if @work was successfully as    1069  * Returns %true if @work was successfully assigned to @worker. %false if @work
1177  * was punted to another worker already execu    1070  * was punted to another worker already executing it.
1178  */                                              1071  */
1179 static bool assign_work(struct work_struct *w    1072 static bool assign_work(struct work_struct *work, struct worker *worker,
1180                         struct work_struct **    1073                         struct work_struct **nextp)
1181 {                                                1074 {
1182         struct worker_pool *pool = worker->po    1075         struct worker_pool *pool = worker->pool;
1183         struct worker *collision;                1076         struct worker *collision;
1184                                                  1077 
1185         lockdep_assert_held(&pool->lock);        1078         lockdep_assert_held(&pool->lock);
1186                                                  1079 
1187         /*                                       1080         /*
1188          * A single work shouldn't be execute    1081          * A single work shouldn't be executed concurrently by multiple workers.
1189          * __queue_work() ensures that @work     1082          * __queue_work() ensures that @work doesn't jump to a different pool
1190          * while still running in the previou    1083          * while still running in the previous pool. Here, we should ensure that
1191          * @work is not executed concurrently    1084          * @work is not executed concurrently by multiple workers from the same
1192          * pool. Check whether anyone is alre    1085          * pool. Check whether anyone is already processing the work. If so,
1193          * defer the work to the currently ex    1086          * defer the work to the currently executing one.
1194          */                                      1087          */
1195         collision = find_worker_executing_wor    1088         collision = find_worker_executing_work(pool, work);
1196         if (unlikely(collision)) {               1089         if (unlikely(collision)) {
1197                 move_linked_works(work, &coll    1090                 move_linked_works(work, &collision->scheduled, nextp);
1198                 return false;                    1091                 return false;
1199         }                                        1092         }
1200                                                  1093 
1201         move_linked_works(work, &worker->sche    1094         move_linked_works(work, &worker->scheduled, nextp);
1202         return true;                             1095         return true;
1203 }                                                1096 }
1204                                                  1097 
1205 static struct irq_work *bh_pool_irq_work(stru << 
1206 {                                             << 
1207         int high = pool->attrs->nice == HIGHP << 
1208                                               << 
1209         return &per_cpu(bh_pool_irq_works, po << 
1210 }                                             << 
1211                                               << 
1212 static void kick_bh_pool(struct worker_pool * << 
1213 {                                             << 
1214 #ifdef CONFIG_SMP                             << 
1215         /* see drain_dead_softirq_workfn() fo << 
1216         if (unlikely(pool->cpu != smp_process << 
1217                      !(pool->flags & POOL_BH_ << 
1218                 irq_work_queue_on(bh_pool_irq << 
1219                 return;                       << 
1220         }                                     << 
1221 #endif                                        << 
1222         if (pool->attrs->nice == HIGHPRI_NICE << 
1223                 raise_softirq_irqoff(HI_SOFTI << 
1224         else                                  << 
1225                 raise_softirq_irqoff(TASKLET_ << 
1226 }                                             << 
1227                                               << 
1228 /**                                              1098 /**
1229  * kick_pool - wake up an idle worker if nece    1099  * kick_pool - wake up an idle worker if necessary
1230  * @pool: pool to kick                           1100  * @pool: pool to kick
1231  *                                               1101  *
1232  * @pool may have pending work items. Wake up    1102  * @pool may have pending work items. Wake up worker if necessary. Returns
1233  * whether a worker was woken up.                1103  * whether a worker was woken up.
1234  */                                              1104  */
1235 static bool kick_pool(struct worker_pool *poo    1105 static bool kick_pool(struct worker_pool *pool)
1236 {                                                1106 {
1237         struct worker *worker = first_idle_wo    1107         struct worker *worker = first_idle_worker(pool);
1238         struct task_struct *p;                   1108         struct task_struct *p;
1239                                                  1109 
1240         lockdep_assert_held(&pool->lock);        1110         lockdep_assert_held(&pool->lock);
1241                                                  1111 
1242         if (!need_more_worker(pool) || !worke    1112         if (!need_more_worker(pool) || !worker)
1243                 return false;                    1113                 return false;
1244                                                  1114 
1245         if (pool->flags & POOL_BH) {          << 
1246                 kick_bh_pool(pool);           << 
1247                 return true;                  << 
1248         }                                     << 
1249                                               << 
1250         p = worker->task;                        1115         p = worker->task;
1251                                                  1116 
1252 #ifdef CONFIG_SMP                                1117 #ifdef CONFIG_SMP
1253         /*                                       1118         /*
1254          * Idle @worker is about to execute @    1119          * Idle @worker is about to execute @work and waking up provides an
1255          * opportunity to migrate @worker at     1120          * opportunity to migrate @worker at a lower cost by setting the task's
1256          * wake_cpu field. Let's see if we wa    1121          * wake_cpu field. Let's see if we want to move @worker to improve
1257          * execution locality.                   1122          * execution locality.
1258          *                                       1123          *
1259          * We're waking the worker that went     1124          * We're waking the worker that went idle the latest and there's some
1260          * chance that @worker is marked idle    1125          * chance that @worker is marked idle but hasn't gone off CPU yet. If
1261          * so, setting the wake_cpu won't do     1126          * so, setting the wake_cpu won't do anything. As this is a best-effort
1262          * optimization and the race window i    1127          * optimization and the race window is narrow, let's leave as-is for
1263          * now. If this becomes pronounced, w    1128          * now. If this becomes pronounced, we can skip over workers which are
1264          * still on cpu when picking an idle     1129          * still on cpu when picking an idle worker.
1265          *                                       1130          *
1266          * If @pool has non-strict affinity,     1131          * If @pool has non-strict affinity, @worker might have ended up outside
1267          * its affinity scope. Repatriate.       1132          * its affinity scope. Repatriate.
1268          */                                      1133          */
1269         if (!pool->attrs->affn_strict &&         1134         if (!pool->attrs->affn_strict &&
1270             !cpumask_test_cpu(p->wake_cpu, po    1135             !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
1271                 struct work_struct *work = li    1136                 struct work_struct *work = list_first_entry(&pool->worklist,
1272                                                  1137                                                 struct work_struct, entry);
1273                 int wake_cpu = cpumask_any_an    1138                 int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask,
1274                                                  1139                                                           cpu_online_mask);
1275                 if (wake_cpu < nr_cpu_ids) {     1140                 if (wake_cpu < nr_cpu_ids) {
1276                         p->wake_cpu = wake_cp    1141                         p->wake_cpu = wake_cpu;
1277                         get_work_pwq(work)->s    1142                         get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
1278                 }                                1143                 }
1279         }                                        1144         }
1280 #endif                                           1145 #endif
1281         wake_up_process(p);                      1146         wake_up_process(p);
1282         return true;                             1147         return true;
1283 }                                                1148 }
1284                                                  1149 
1285 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT            1150 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
1286                                                  1151 
1287 /*                                               1152 /*
1288  * Concurrency-managed per-cpu work items tha    1153  * Concurrency-managed per-cpu work items that hog CPU for longer than
1289  * wq_cpu_intensive_thresh_us trigger the aut    1154  * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism,
1290  * which prevents them from stalling other co    1155  * which prevents them from stalling other concurrency-managed work items. If a
1291  * work function keeps triggering this mechan    1156  * work function keeps triggering this mechanism, it's likely that the work item
1292  * should be using an unbound workqueue inste    1157  * should be using an unbound workqueue instead.
1293  *                                               1158  *
1294  * wq_cpu_intensive_report() tracks work func    1159  * wq_cpu_intensive_report() tracks work functions which trigger such conditions
1295  * and report them so that they can be examin    1160  * and report them so that they can be examined and converted to use unbound
1296  * workqueues as appropriate. To avoid floodi    1161  * workqueues as appropriate. To avoid flooding the console, each violating work
1297  * function is tracked and reported with expo    1162  * function is tracked and reported with exponential backoff.
1298  */                                              1163  */
1299 #define WCI_MAX_ENTS 128                         1164 #define WCI_MAX_ENTS 128
1300                                                  1165 
1301 struct wci_ent {                                 1166 struct wci_ent {
1302         work_func_t             func;            1167         work_func_t             func;
1303         atomic64_t              cnt;             1168         atomic64_t              cnt;
1304         struct hlist_node       hash_node;       1169         struct hlist_node       hash_node;
1305 };                                               1170 };
1306                                                  1171 
1307 static struct wci_ent wci_ents[WCI_MAX_ENTS];    1172 static struct wci_ent wci_ents[WCI_MAX_ENTS];
1308 static int wci_nr_ents;                          1173 static int wci_nr_ents;
1309 static DEFINE_RAW_SPINLOCK(wci_lock);            1174 static DEFINE_RAW_SPINLOCK(wci_lock);
1310 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_M    1175 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS));
1311                                                  1176 
1312 static struct wci_ent *wci_find_ent(work_func    1177 static struct wci_ent *wci_find_ent(work_func_t func)
1313 {                                                1178 {
1314         struct wci_ent *ent;                     1179         struct wci_ent *ent;
1315                                                  1180 
1316         hash_for_each_possible_rcu(wci_hash,     1181         hash_for_each_possible_rcu(wci_hash, ent, hash_node,
1317                                    (unsigned     1182                                    (unsigned long)func) {
1318                 if (ent->func == func)           1183                 if (ent->func == func)
1319                         return ent;              1184                         return ent;
1320         }                                        1185         }
1321         return NULL;                             1186         return NULL;
1322 }                                                1187 }
1323                                                  1188 
1324 static void wq_cpu_intensive_report(work_func    1189 static void wq_cpu_intensive_report(work_func_t func)
1325 {                                                1190 {
1326         struct wci_ent *ent;                     1191         struct wci_ent *ent;
1327                                                  1192 
1328 restart:                                         1193 restart:
1329         ent = wci_find_ent(func);                1194         ent = wci_find_ent(func);
1330         if (ent) {                               1195         if (ent) {
1331                 u64 cnt;                         1196                 u64 cnt;
1332                                                  1197 
1333                 /*                               1198                 /*
1334                  * Start reporting from the w !! 1199                  * Start reporting from the fourth time and back off
1335                  * exponentially.                1200                  * exponentially.
1336                  */                              1201                  */
1337                 cnt = atomic64_inc_return_rel    1202                 cnt = atomic64_inc_return_relaxed(&ent->cnt);
1338                 if (wq_cpu_intensive_warning_ !! 1203                 if (cnt >= 4 && is_power_of_2(cnt))
1339                     cnt >= wq_cpu_intensive_w << 
1340                     is_power_of_2(cnt + 1 - w << 
1341                         printk_deferred(KERN_    1204                         printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n",
1342                                         ent->    1205                                         ent->func, wq_cpu_intensive_thresh_us,
1343                                         atomi    1206                                         atomic64_read(&ent->cnt));
1344                 return;                          1207                 return;
1345         }                                        1208         }
1346                                                  1209 
1347         /*                                       1210         /*
1348          * @func is a new violation. Allocate    1211          * @func is a new violation. Allocate a new entry for it. If wcn_ents[]
1349          * is exhausted, something went reall    1212          * is exhausted, something went really wrong and we probably made enough
1350          * noise already.                        1213          * noise already.
1351          */                                      1214          */
1352         if (wci_nr_ents >= WCI_MAX_ENTS)         1215         if (wci_nr_ents >= WCI_MAX_ENTS)
1353                 return;                          1216                 return;
1354                                                  1217 
1355         raw_spin_lock(&wci_lock);                1218         raw_spin_lock(&wci_lock);
1356                                                  1219 
1357         if (wci_nr_ents >= WCI_MAX_ENTS) {       1220         if (wci_nr_ents >= WCI_MAX_ENTS) {
1358                 raw_spin_unlock(&wci_lock);      1221                 raw_spin_unlock(&wci_lock);
1359                 return;                          1222                 return;
1360         }                                        1223         }
1361                                                  1224 
1362         if (wci_find_ent(func)) {                1225         if (wci_find_ent(func)) {
1363                 raw_spin_unlock(&wci_lock);      1226                 raw_spin_unlock(&wci_lock);
1364                 goto restart;                    1227                 goto restart;
1365         }                                        1228         }
1366                                                  1229 
1367         ent = &wci_ents[wci_nr_ents++];          1230         ent = &wci_ents[wci_nr_ents++];
1368         ent->func = func;                        1231         ent->func = func;
1369         atomic64_set(&ent->cnt, 0);           !! 1232         atomic64_set(&ent->cnt, 1);
1370         hash_add_rcu(wci_hash, &ent->hash_nod    1233         hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func);
1371                                                  1234 
1372         raw_spin_unlock(&wci_lock);              1235         raw_spin_unlock(&wci_lock);
1373                                               << 
1374         goto restart;                         << 
1375 }                                                1236 }
1376                                                  1237 
1377 #else   /* CONFIG_WQ_CPU_INTENSIVE_REPORT */     1238 #else   /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
1378 static void wq_cpu_intensive_report(work_func    1239 static void wq_cpu_intensive_report(work_func_t func) {}
1379 #endif  /* CONFIG_WQ_CPU_INTENSIVE_REPORT */     1240 #endif  /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
1380                                                  1241 
1381 /**                                              1242 /**
1382  * wq_worker_running - a worker is running ag    1243  * wq_worker_running - a worker is running again
1383  * @task: task waking up                         1244  * @task: task waking up
1384  *                                               1245  *
1385  * This function is called when a worker retu    1246  * This function is called when a worker returns from schedule()
1386  */                                              1247  */
1387 void wq_worker_running(struct task_struct *ta    1248 void wq_worker_running(struct task_struct *task)
1388 {                                                1249 {
1389         struct worker *worker = kthread_data(    1250         struct worker *worker = kthread_data(task);
1390                                                  1251 
1391         if (!READ_ONCE(worker->sleeping))        1252         if (!READ_ONCE(worker->sleeping))
1392                 return;                          1253                 return;
1393                                                  1254 
1394         /*                                       1255         /*
1395          * If preempted by unbind_workers() b    1256          * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
1396          * and the nr_running increment below    1257          * and the nr_running increment below, we may ruin the nr_running reset
1397          * and leave with an unexpected pool-    1258          * and leave with an unexpected pool->nr_running == 1 on the newly unbound
1398          * pool. Protect against such race.      1259          * pool. Protect against such race.
1399          */                                      1260          */
1400         preempt_disable();                       1261         preempt_disable();
1401         if (!(worker->flags & WORKER_NOT_RUNN    1262         if (!(worker->flags & WORKER_NOT_RUNNING))
1402                 worker->pool->nr_running++;      1263                 worker->pool->nr_running++;
1403         preempt_enable();                        1264         preempt_enable();
1404                                                  1265 
1405         /*                                       1266         /*
1406          * CPU intensive auto-detection cares    1267          * CPU intensive auto-detection cares about how long a work item hogged
1407          * CPU without sleeping. Reset the st    1268          * CPU without sleeping. Reset the starting timestamp on wakeup.
1408          */                                      1269          */
1409         worker->current_at = worker->task->se    1270         worker->current_at = worker->task->se.sum_exec_runtime;
1410                                                  1271 
1411         WRITE_ONCE(worker->sleeping, 0);         1272         WRITE_ONCE(worker->sleeping, 0);
1412 }                                                1273 }
1413                                                  1274 
1414 /**                                              1275 /**
1415  * wq_worker_sleeping - a worker is going to     1276  * wq_worker_sleeping - a worker is going to sleep
1416  * @task: task going to sleep                    1277  * @task: task going to sleep
1417  *                                               1278  *
1418  * This function is called from schedule() wh    1279  * This function is called from schedule() when a busy worker is
1419  * going to sleep.                               1280  * going to sleep.
1420  */                                              1281  */
1421 void wq_worker_sleeping(struct task_struct *t    1282 void wq_worker_sleeping(struct task_struct *task)
1422 {                                                1283 {
1423         struct worker *worker = kthread_data(    1284         struct worker *worker = kthread_data(task);
1424         struct worker_pool *pool;                1285         struct worker_pool *pool;
1425                                                  1286 
1426         /*                                       1287         /*
1427          * Rescuers, which may not have all t    1288          * Rescuers, which may not have all the fields set up like normal
1428          * workers, also reach here, let's no    1289          * workers, also reach here, let's not access anything before
1429          * checking NOT_RUNNING.                 1290          * checking NOT_RUNNING.
1430          */                                      1291          */
1431         if (worker->flags & WORKER_NOT_RUNNIN    1292         if (worker->flags & WORKER_NOT_RUNNING)
1432                 return;                          1293                 return;
1433                                                  1294 
1434         pool = worker->pool;                     1295         pool = worker->pool;
1435                                                  1296 
1436         /* Return if preempted before wq_work    1297         /* Return if preempted before wq_worker_running() was reached */
1437         if (READ_ONCE(worker->sleeping))         1298         if (READ_ONCE(worker->sleeping))
1438                 return;                          1299                 return;
1439                                                  1300 
1440         WRITE_ONCE(worker->sleeping, 1);         1301         WRITE_ONCE(worker->sleeping, 1);
1441         raw_spin_lock_irq(&pool->lock);          1302         raw_spin_lock_irq(&pool->lock);
1442                                                  1303 
1443         /*                                       1304         /*
1444          * Recheck in case unbind_workers() p    1305          * Recheck in case unbind_workers() preempted us. We don't
1445          * want to decrement nr_running after    1306          * want to decrement nr_running after the worker is unbound
1446          * and nr_running has been reset.        1307          * and nr_running has been reset.
1447          */                                      1308          */
1448         if (worker->flags & WORKER_NOT_RUNNIN    1309         if (worker->flags & WORKER_NOT_RUNNING) {
1449                 raw_spin_unlock_irq(&pool->lo    1310                 raw_spin_unlock_irq(&pool->lock);
1450                 return;                          1311                 return;
1451         }                                        1312         }
1452                                                  1313 
1453         pool->nr_running--;                      1314         pool->nr_running--;
1454         if (kick_pool(pool))                     1315         if (kick_pool(pool))
1455                 worker->current_pwq->stats[PW    1316                 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
1456                                                  1317 
1457         raw_spin_unlock_irq(&pool->lock);        1318         raw_spin_unlock_irq(&pool->lock);
1458 }                                                1319 }
1459                                                  1320 
1460 /**                                              1321 /**
1461  * wq_worker_tick - a scheduler tick occurred    1322  * wq_worker_tick - a scheduler tick occurred while a kworker is running
1462  * @task: task currently running                 1323  * @task: task currently running
1463  *                                               1324  *
1464  * Called from sched_tick(). We're in the IRQ !! 1325  * Called from scheduler_tick(). We're in the IRQ context and the current
1465  * worker's fields which follow the 'K' locki    1326  * worker's fields which follow the 'K' locking rule can be accessed safely.
1466  */                                              1327  */
1467 void wq_worker_tick(struct task_struct *task)    1328 void wq_worker_tick(struct task_struct *task)
1468 {                                                1329 {
1469         struct worker *worker = kthread_data(    1330         struct worker *worker = kthread_data(task);
1470         struct pool_workqueue *pwq = worker->    1331         struct pool_workqueue *pwq = worker->current_pwq;
1471         struct worker_pool *pool = worker->po    1332         struct worker_pool *pool = worker->pool;
1472                                                  1333 
1473         if (!pwq)                                1334         if (!pwq)
1474                 return;                          1335                 return;
1475                                                  1336 
1476         pwq->stats[PWQ_STAT_CPU_TIME] += TICK    1337         pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC;
1477                                                  1338 
1478         if (!wq_cpu_intensive_thresh_us)         1339         if (!wq_cpu_intensive_thresh_us)
1479                 return;                          1340                 return;
1480                                                  1341 
1481         /*                                       1342         /*
1482          * If the current worker is concurren    1343          * If the current worker is concurrency managed and hogged the CPU for
1483          * longer than wq_cpu_intensive_thres    1344          * longer than wq_cpu_intensive_thresh_us, it's automatically marked
1484          * CPU_INTENSIVE to avoid stalling ot    1345          * CPU_INTENSIVE to avoid stalling other concurrency-managed work items.
1485          *                                       1346          *
1486          * Set @worker->sleeping means that @    1347          * Set @worker->sleeping means that @worker is in the process of
1487          * switching out voluntarily and won'    1348          * switching out voluntarily and won't be contributing to
1488          * @pool->nr_running until it wakes u    1349          * @pool->nr_running until it wakes up. As wq_worker_sleeping() also
1489          * decrements ->nr_running, setting C    1350          * decrements ->nr_running, setting CPU_INTENSIVE here can lead to
1490          * double decrements. The task is rel    1351          * double decrements. The task is releasing the CPU anyway. Let's skip.
1491          * We probably want to make this pret    1352          * We probably want to make this prettier in the future.
1492          */                                      1353          */
1493         if ((worker->flags & WORKER_NOT_RUNNI    1354         if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
1494             worker->task->se.sum_exec_runtime    1355             worker->task->se.sum_exec_runtime - worker->current_at <
1495             wq_cpu_intensive_thresh_us * NSEC    1356             wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
1496                 return;                          1357                 return;
1497                                                  1358 
1498         raw_spin_lock(&pool->lock);              1359         raw_spin_lock(&pool->lock);
1499                                                  1360 
1500         worker_set_flags(worker, WORKER_CPU_I    1361         worker_set_flags(worker, WORKER_CPU_INTENSIVE);
1501         wq_cpu_intensive_report(worker->curre    1362         wq_cpu_intensive_report(worker->current_func);
1502         pwq->stats[PWQ_STAT_CPU_INTENSIVE]++;    1363         pwq->stats[PWQ_STAT_CPU_INTENSIVE]++;
1503                                                  1364 
1504         if (kick_pool(pool))                     1365         if (kick_pool(pool))
1505                 pwq->stats[PWQ_STAT_CM_WAKEUP    1366                 pwq->stats[PWQ_STAT_CM_WAKEUP]++;
1506                                                  1367 
1507         raw_spin_unlock(&pool->lock);            1368         raw_spin_unlock(&pool->lock);
1508 }                                                1369 }
1509                                                  1370 
1510 /**                                              1371 /**
1511  * wq_worker_last_func - retrieve worker's la    1372  * wq_worker_last_func - retrieve worker's last work function
1512  * @task: Task to retrieve last work function    1373  * @task: Task to retrieve last work function of.
1513  *                                               1374  *
1514  * Determine the last function a worker execu    1375  * Determine the last function a worker executed. This is called from
1515  * the scheduler to get a worker's last known    1376  * the scheduler to get a worker's last known identity.
1516  *                                               1377  *
1517  * CONTEXT:                                      1378  * CONTEXT:
1518  * raw_spin_lock_irq(rq->lock)                   1379  * raw_spin_lock_irq(rq->lock)
1519  *                                               1380  *
1520  * This function is called during schedule()     1381  * This function is called during schedule() when a kworker is going
1521  * to sleep. It's used by psi to identify agg    1382  * to sleep. It's used by psi to identify aggregation workers during
1522  * dequeuing, to allow periodic aggregation t    1383  * dequeuing, to allow periodic aggregation to shut-off when that
1523  * worker is the last task in the system or c    1384  * worker is the last task in the system or cgroup to go to sleep.
1524  *                                               1385  *
1525  * As this function doesn't involve any workq    1386  * As this function doesn't involve any workqueue-related locking, it
1526  * only returns stable values when called fro    1387  * only returns stable values when called from inside the scheduler's
1527  * queuing and dequeuing paths, when @task, w    1388  * queuing and dequeuing paths, when @task, which must be a kworker,
1528  * is guaranteed to not be processing any wor    1389  * is guaranteed to not be processing any works.
1529  *                                               1390  *
1530  * Return:                                       1391  * Return:
1531  * The last work function %current executed a    1392  * The last work function %current executed as a worker, NULL if it
1532  * hasn't executed any work yet.                 1393  * hasn't executed any work yet.
1533  */                                              1394  */
1534 work_func_t wq_worker_last_func(struct task_s    1395 work_func_t wq_worker_last_func(struct task_struct *task)
1535 {                                                1396 {
1536         struct worker *worker = kthread_data(    1397         struct worker *worker = kthread_data(task);
1537                                                  1398 
1538         return worker->last_func;                1399         return worker->last_func;
1539 }                                                1400 }
1540                                                  1401 
1541 /**                                              1402 /**
1542  * wq_node_nr_active - Determine wq_node_nr_a << 
1543  * @wq: workqueue of interest                 << 
1544  * @node: NUMA node, can be %NUMA_NO_NODE     << 
1545  *                                            << 
1546  * Determine wq_node_nr_active to use for @wq << 
1547  *                                            << 
1548  * - %NULL for per-cpu workqueues as they don << 
1549  *                                            << 
1550  * - node_nr_active[nr_node_ids] if @node is  << 
1551  *                                            << 
1552  * - Otherwise, node_nr_active[@node].        << 
1553  */                                           << 
1554 static struct wq_node_nr_active *wq_node_nr_a << 
1555                                               << 
1556 {                                             << 
1557         if (!(wq->flags & WQ_UNBOUND))        << 
1558                 return NULL;                  << 
1559                                               << 
1560         if (node == NUMA_NO_NODE)             << 
1561                 node = nr_node_ids;           << 
1562                                               << 
1563         return wq->node_nr_active[node];      << 
1564 }                                             << 
1565                                               << 
1566 /**                                           << 
1567  * wq_update_node_max_active - Update per-nod << 
1568  * @wq: workqueue to update                   << 
1569  * @off_cpu: CPU that's going down, -1 if a C << 
1570  *                                            << 
1571  * Update @wq->node_nr_active[]->max. @wq mus << 
1572  * distributed among nodes according to the p << 
1573  * cpus. The result is always between @wq->mi << 
1574  */                                           << 
1575 static void wq_update_node_max_active(struct  << 
1576 {                                             << 
1577         struct cpumask *effective = unbound_e << 
1578         int min_active = READ_ONCE(wq->min_ac << 
1579         int max_active = READ_ONCE(wq->max_ac << 
1580         int total_cpus, node;                 << 
1581                                               << 
1582         lockdep_assert_held(&wq->mutex);      << 
1583                                               << 
1584         if (!wq_topo_initialized)             << 
1585                 return;                       << 
1586                                               << 
1587         if (off_cpu >= 0 && !cpumask_test_cpu << 
1588                 off_cpu = -1;                 << 
1589                                               << 
1590         total_cpus = cpumask_weight_and(effec << 
1591         if (off_cpu >= 0)                     << 
1592                 total_cpus--;                 << 
1593                                               << 
1594         /* If all CPUs of the wq get offline, << 
1595         if (unlikely(!total_cpus)) {          << 
1596                 for_each_node(node)           << 
1597                         wq_node_nr_active(wq, << 
1598                                               << 
1599                 wq_node_nr_active(wq, NUMA_NO << 
1600                 return;                       << 
1601         }                                     << 
1602                                               << 
1603         for_each_node(node) {                 << 
1604                 int node_cpus;                << 
1605                                               << 
1606                 node_cpus = cpumask_weight_an << 
1607                 if (off_cpu >= 0 && cpu_to_no << 
1608                         node_cpus--;          << 
1609                                               << 
1610                 wq_node_nr_active(wq, node)-> << 
1611                         clamp(DIV_ROUND_UP(ma << 
1612                               min_active, max << 
1613         }                                     << 
1614                                               << 
1615         wq_node_nr_active(wq, NUMA_NO_NODE)-> << 
1616 }                                             << 
1617                                               << 
1618 /**                                           << 
1619  * get_pwq - get an extra reference on the sp    1403  * get_pwq - get an extra reference on the specified pool_workqueue
1620  * @pwq: pool_workqueue to get                   1404  * @pwq: pool_workqueue to get
1621  *                                               1405  *
1622  * Obtain an extra reference on @pwq.  The ca    1406  * Obtain an extra reference on @pwq.  The caller should guarantee that
1623  * @pwq has positive refcnt and be holding th    1407  * @pwq has positive refcnt and be holding the matching pool->lock.
1624  */                                              1408  */
1625 static void get_pwq(struct pool_workqueue *pw    1409 static void get_pwq(struct pool_workqueue *pwq)
1626 {                                                1410 {
1627         lockdep_assert_held(&pwq->pool->lock)    1411         lockdep_assert_held(&pwq->pool->lock);
1628         WARN_ON_ONCE(pwq->refcnt <= 0);          1412         WARN_ON_ONCE(pwq->refcnt <= 0);
1629         pwq->refcnt++;                           1413         pwq->refcnt++;
1630 }                                                1414 }
1631                                                  1415 
1632 /**                                              1416 /**
1633  * put_pwq - put a pool_workqueue reference      1417  * put_pwq - put a pool_workqueue reference
1634  * @pwq: pool_workqueue to put                   1418  * @pwq: pool_workqueue to put
1635  *                                               1419  *
1636  * Drop a reference of @pwq.  If its refcnt r    1420  * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1637  * destruction.  The caller should be holding    1421  * destruction.  The caller should be holding the matching pool->lock.
1638  */                                              1422  */
1639 static void put_pwq(struct pool_workqueue *pw    1423 static void put_pwq(struct pool_workqueue *pwq)
1640 {                                                1424 {
1641         lockdep_assert_held(&pwq->pool->lock)    1425         lockdep_assert_held(&pwq->pool->lock);
1642         if (likely(--pwq->refcnt))               1426         if (likely(--pwq->refcnt))
1643                 return;                          1427                 return;
1644         /*                                       1428         /*
1645          * @pwq can't be released under pool-    1429          * @pwq can't be released under pool->lock, bounce to a dedicated
1646          * kthread_worker to avoid A-A deadlo    1430          * kthread_worker to avoid A-A deadlocks.
1647          */                                      1431          */
1648         kthread_queue_work(pwq_release_worker    1432         kthread_queue_work(pwq_release_worker, &pwq->release_work);
1649 }                                                1433 }
1650                                                  1434 
1651 /**                                              1435 /**
1652  * put_pwq_unlocked - put_pwq() with surround    1436  * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1653  * @pwq: pool_workqueue to put (can be %NULL)    1437  * @pwq: pool_workqueue to put (can be %NULL)
1654  *                                               1438  *
1655  * put_pwq() with locking.  This function als    1439  * put_pwq() with locking.  This function also allows %NULL @pwq.
1656  */                                              1440  */
1657 static void put_pwq_unlocked(struct pool_work    1441 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1658 {                                                1442 {
1659         if (pwq) {                               1443         if (pwq) {
1660                 /*                               1444                 /*
1661                  * As both pwqs and pools are    1445                  * As both pwqs and pools are RCU protected, the
1662                  * following lock operations     1446                  * following lock operations are safe.
1663                  */                              1447                  */
1664                 raw_spin_lock_irq(&pwq->pool-    1448                 raw_spin_lock_irq(&pwq->pool->lock);
1665                 put_pwq(pwq);                    1449                 put_pwq(pwq);
1666                 raw_spin_unlock_irq(&pwq->poo    1450                 raw_spin_unlock_irq(&pwq->pool->lock);
1667         }                                        1451         }
1668 }                                                1452 }
1669                                                  1453 
1670 static bool pwq_is_empty(struct pool_workqueu !! 1454 static void pwq_activate_inactive_work(struct work_struct *work)
1671 {                                             << 
1672         return !pwq->nr_active && list_empty( << 
1673 }                                             << 
1674                                               << 
1675 static void __pwq_activate_work(struct pool_w << 
1676                                 struct work_s << 
1677 {                                                1455 {
1678         unsigned long *wdb = work_data_bits(w !! 1456         struct pool_workqueue *pwq = get_work_pwq(work);
1679                                                  1457 
1680         WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INA << 
1681         trace_workqueue_activate_work(work);     1458         trace_workqueue_activate_work(work);
1682         if (list_empty(&pwq->pool->worklist))    1459         if (list_empty(&pwq->pool->worklist))
1683                 pwq->pool->watchdog_ts = jiff    1460                 pwq->pool->watchdog_ts = jiffies;
1684         move_linked_works(work, &pwq->pool->w    1461         move_linked_works(work, &pwq->pool->worklist, NULL);
1685         __clear_bit(WORK_STRUCT_INACTIVE_BIT, !! 1462         __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
                                                   >> 1463         pwq->nr_active++;
1686 }                                                1464 }
1687                                                  1465 
1688 static bool tryinc_node_nr_active(struct wq_n !! 1466 static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
1689 {                                                1467 {
1690         int max = READ_ONCE(nna->max);        !! 1468         struct work_struct *work = list_first_entry(&pwq->inactive_works,
1691                                               !! 1469                                                     struct work_struct, entry);
1692         while (true) {                        << 
1693                 int old, tmp;                 << 
1694                                               << 
1695                 old = atomic_read(&nna->nr);  << 
1696                 if (old >= max)               << 
1697                         return false;         << 
1698                 tmp = atomic_cmpxchg_relaxed( << 
1699                 if (tmp == old)               << 
1700                         return true;          << 
1701         }                                     << 
1702 }                                             << 
1703                                               << 
1704 /**                                           << 
1705  * pwq_tryinc_nr_active - Try to increment nr << 
1706  * @pwq: pool_workqueue of interest           << 
1707  * @fill: max_active may have increased, try  << 
1708  *                                            << 
1709  * Try to increment nr_active for @pwq. Retur << 
1710  * successfully obtained. %false otherwise.   << 
1711  */                                           << 
1712 static bool pwq_tryinc_nr_active(struct pool_ << 
1713 {                                             << 
1714         struct workqueue_struct *wq = pwq->wq << 
1715         struct worker_pool *pool = pwq->pool; << 
1716         struct wq_node_nr_active *nna = wq_no << 
1717         bool obtained = false;                << 
1718                                               << 
1719         lockdep_assert_held(&pool->lock);     << 
1720                                               << 
1721         if (!nna) {                           << 
1722                 /* BH or per-cpu workqueue, p << 
1723                 obtained = pwq->nr_active < R << 
1724                 goto out;                     << 
1725         }                                     << 
1726                                               << 
1727         if (unlikely(pwq->plugged))           << 
1728                 return false;                 << 
1729                                               << 
1730         /*                                    << 
1731          * Unbound workqueue uses per-node sh << 
1732          * already waiting on $nna, pwq_dec_n << 
1733          * concurrency level. Don't jump the  << 
1734          *                                    << 
1735          * We need to ignore the pending test << 
1736          * pwq_dec_nr_active() can only maint << 
1737          * increase it. This is indicated by  << 
1738          */                                   << 
1739         if (!list_empty(&pwq->pending_node) & << 
1740                 goto out;                     << 
1741                                               << 
1742         obtained = tryinc_node_nr_active(nna) << 
1743         if (obtained)                         << 
1744                 goto out;                     << 
1745                                               << 
1746         /*                                    << 
1747          * Lockless acquisition failed. Lock, << 
1748          * and try again. The smp_mb() is pai << 
1749          * of atomic_dec_return() in pwq_dec_ << 
1750          * we see the decremented $nna->nr or << 
1751          * $nna->pending_pwqs.                << 
1752          */                                   << 
1753         raw_spin_lock(&nna->lock);            << 
1754                                               << 
1755         if (list_empty(&pwq->pending_node))   << 
1756                 list_add_tail(&pwq->pending_n << 
1757         else if (likely(!fill))               << 
1758                 goto out_unlock;              << 
1759                                               << 
1760         smp_mb();                             << 
1761                                               << 
1762         obtained = tryinc_node_nr_active(nna) << 
1763                                               << 
1764         /*                                    << 
1765          * If @fill, @pwq might have already  << 
1766          * pending in cold paths doesn't affe << 
1767          */                                   << 
1768         if (obtained && likely(!fill))        << 
1769                 list_del_init(&pwq->pending_n << 
1770                                               << 
1771 out_unlock:                                   << 
1772         raw_spin_unlock(&nna->lock);          << 
1773 out:                                          << 
1774         if (obtained)                         << 
1775                 pwq->nr_active++;             << 
1776         return obtained;                      << 
1777 }                                             << 
1778                                               << 
1779 /**                                           << 
1780  * pwq_activate_first_inactive - Activate the << 
1781  * @pwq: pool_workqueue of interest           << 
1782  * @fill: max_active may have increased, try  << 
1783  *                                            << 
1784  * Activate the first inactive work item of @ << 
1785  * max_active limit.                          << 
1786  *                                            << 
1787  * Returns %true if an inactive work item has << 
1788  * inactive work item is found or max_active  << 
1789  */                                           << 
1790 static bool pwq_activate_first_inactive(struc << 
1791 {                                             << 
1792         struct work_struct *work =            << 
1793                 list_first_entry_or_null(&pwq << 
1794                                          stru << 
1795                                               << 
1796         if (work && pwq_tryinc_nr_active(pwq, << 
1797                 __pwq_activate_work(pwq, work << 
1798                 return true;                  << 
1799         } else {                              << 
1800                 return false;                 << 
1801         }                                     << 
1802 }                                             << 
1803                                               << 
1804 /**                                           << 
1805  * unplug_oldest_pwq - unplug the oldest pool << 
1806  * @wq: workqueue_struct where its oldest pwq << 
1807  *                                            << 
1808  * This function should only be called for or << 
1809  * oldest pwq is unplugged, the others are pl << 
1810  * ensure proper work item ordering::         << 
1811  *                                            << 
1812  *    dfl_pwq --------------+     [P] - plugg << 
1813  *                          |                 << 
1814  *                          v                 << 
1815  *    pwqs -> A -> B [P] -> C [P] (newest)    << 
1816  *            |    |        |                 << 
1817  *            1    3        5                 << 
1818  *            |    |        |                 << 
1819  *            2    4        6                 << 
1820  *                                            << 
1821  * When the oldest pwq is drained and removed << 
1822  * to unplug the next oldest one to start its << 
1823  * pwq's are linked into wq->pwqs with the ol << 
1824  * the list is the oldest.                    << 
1825  */                                           << 
1826 static void unplug_oldest_pwq(struct workqueu << 
1827 {                                             << 
1828         struct pool_workqueue *pwq;           << 
1829                                               << 
1830         lockdep_assert_held(&wq->mutex);      << 
1831                                               << 
1832         /* Caller should make sure that pwqs  << 
1833         pwq = list_first_entry_or_null(&wq->p << 
1834                                        pwqs_n << 
1835         raw_spin_lock_irq(&pwq->pool->lock);  << 
1836         if (pwq->plugged) {                   << 
1837                 pwq->plugged = false;         << 
1838                 if (pwq_activate_first_inacti << 
1839                         kick_pool(pwq->pool); << 
1840         }                                     << 
1841         raw_spin_unlock_irq(&pwq->pool->lock) << 
1842 }                                             << 
1843                                               << 
1844 /**                                           << 
1845  * node_activate_pending_pwq - Activate a pen << 
1846  * @nna: wq_node_nr_active to activate a pend << 
1847  * @caller_pool: worker_pool the caller is lo << 
1848  *                                            << 
1849  * Activate a pwq in @nna->pending_pwqs. Call << 
1850  * @caller_pool may be unlocked and relocked  << 
1851  */                                           << 
1852 static void node_activate_pending_pwq(struct  << 
1853                                       struct  << 
1854 {                                             << 
1855         struct worker_pool *locked_pool = cal << 
1856         struct pool_workqueue *pwq;           << 
1857         struct work_struct *work;             << 
1858                                               << 
1859         lockdep_assert_held(&caller_pool->loc << 
1860                                               << 
1861         raw_spin_lock(&nna->lock);            << 
1862 retry:                                        << 
1863         pwq = list_first_entry_or_null(&nna-> << 
1864                                        struct << 
1865         if (!pwq)                             << 
1866                 goto out_unlock;              << 
1867                                               << 
1868         /*                                    << 
1869          * If @pwq is for a different pool th << 
1870          * @pwq->pool->lock. Let's trylock fi << 
1871          * / lock dance. For that, we also ne << 
1872          * nested inside pool locks.          << 
1873          */                                   << 
1874         if (pwq->pool != locked_pool) {       << 
1875                 raw_spin_unlock(&locked_pool- << 
1876                 locked_pool = pwq->pool;      << 
1877                 if (!raw_spin_trylock(&locked << 
1878                         raw_spin_unlock(&nna- << 
1879                         raw_spin_lock(&locked << 
1880                         raw_spin_lock(&nna->l << 
1881                         goto retry;           << 
1882                 }                             << 
1883         }                                     << 
1884                                               << 
1885         /*                                    << 
1886          * $pwq may not have any inactive wor << 
1887          * Drop it from pending_pwqs and see  << 
1888          */                                   << 
1889         work = list_first_entry_or_null(&pwq- << 
1890                                         struc << 
1891         if (!work) {                          << 
1892                 list_del_init(&pwq->pending_n << 
1893                 goto retry;                   << 
1894         }                                     << 
1895                                               << 
1896         /*                                    << 
1897          * Acquire an nr_active count and act << 
1898          * $pwq still has inactive work items << 
1899          * pending_pwqs so that we round-robi << 
1900          * inactive work items are not activa << 
1901          * given that there has never been an << 
1902          */                                   << 
1903         if (likely(tryinc_node_nr_active(nna) << 
1904                 pwq->nr_active++;             << 
1905                 __pwq_activate_work(pwq, work << 
1906                                               << 
1907                 if (list_empty(&pwq->inactive << 
1908                         list_del_init(&pwq->p << 
1909                 else                          << 
1910                         list_move_tail(&pwq-> << 
1911                                               << 
1912                 /* if activating a foreign po << 
1913                 if (pwq->pool != caller_pool) << 
1914                         kick_pool(pwq->pool); << 
1915         }                                     << 
1916                                               << 
1917 out_unlock:                                   << 
1918         raw_spin_unlock(&nna->lock);          << 
1919         if (locked_pool != caller_pool) {     << 
1920                 raw_spin_unlock(&locked_pool- << 
1921                 raw_spin_lock(&caller_pool->l << 
1922         }                                     << 
1923 }                                             << 
1924                                               << 
1925 /**                                           << 
1926  * pwq_dec_nr_active - Retire an active count << 
1927  * @pwq: pool_workqueue of interest           << 
1928  *                                            << 
1929  * Decrement @pwq's nr_active and try to acti << 
1930  * For unbound workqueues, this function may  << 
1931  */                                           << 
1932 static void pwq_dec_nr_active(struct pool_wor << 
1933 {                                             << 
1934         struct worker_pool *pool = pwq->pool; << 
1935         struct wq_node_nr_active *nna = wq_no << 
1936                                               << 
1937         lockdep_assert_held(&pool->lock);     << 
1938                                               << 
1939         /*                                    << 
1940          * @pwq->nr_active should be decremen << 
1941          * workqueues.                        << 
1942          */                                   << 
1943         pwq->nr_active--;                     << 
1944                                               << 
1945         /*                                    << 
1946          * For a percpu workqueue, it's simpl << 
1947          * inactive work item on @pwq itself. << 
1948          */                                   << 
1949         if (!nna) {                           << 
1950                 pwq_activate_first_inactive(p << 
1951                 return;                       << 
1952         }                                     << 
1953                                               << 
1954         /*                                    << 
1955          * If @pwq is for an unbound workqueu << 
1956          * multiple pwqs and pools may be sha << 
1957          * pwq needs to wait for an nr_active << 
1958          * $nna->pending_pwqs. The following  << 
1959          * memory barrier is paired with smp_ << 
1960          * guarantee that either we see non-e << 
1961          * decremented $nna->nr.              << 
1962          *                                    << 
1963          * $nna->max may change as CPUs come  << 
1964          * max_active gets updated. However,  << 
1965          * larger than @pwq->wq->min_active w << 
1966          * This maintains the forward progres << 
1967          */                                   << 
1968         if (atomic_dec_return(&nna->nr) >= RE << 
1969                 return;                       << 
1970                                                  1470 
1971         if (!list_empty(&nna->pending_pwqs))  !! 1471         pwq_activate_inactive_work(work);
1972                 node_activate_pending_pwq(nna << 
1973 }                                                1472 }
1974                                                  1473 
1975 /**                                              1474 /**
1976  * pwq_dec_nr_in_flight - decrement pwq's nr_    1475  * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1977  * @pwq: pwq of interest                         1476  * @pwq: pwq of interest
1978  * @work_data: work_data of work which left t    1477  * @work_data: work_data of work which left the queue
1979  *                                               1478  *
1980  * A work either has completed or is removed     1479  * A work either has completed or is removed from pending queue,
1981  * decrement nr_in_flight of its pwq and hand    1480  * decrement nr_in_flight of its pwq and handle workqueue flushing.
1982  *                                               1481  *
1983  * NOTE:                                      << 
1984  * For unbound workqueues, this function may  << 
1985  * and thus should be called after all other  << 
1986  * work item is complete.                     << 
1987  *                                            << 
1988  * CONTEXT:                                      1482  * CONTEXT:
1989  * raw_spin_lock_irq(pool->lock).                1483  * raw_spin_lock_irq(pool->lock).
1990  */                                              1484  */
1991 static void pwq_dec_nr_in_flight(struct pool_    1485 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
1992 {                                                1486 {
1993         int color = get_work_color(work_data)    1487         int color = get_work_color(work_data);
1994                                                  1488 
1995         if (!(work_data & WORK_STRUCT_INACTIV !! 1489         if (!(work_data & WORK_STRUCT_INACTIVE)) {
1996                 pwq_dec_nr_active(pwq);       !! 1490                 pwq->nr_active--;
                                                   >> 1491                 if (!list_empty(&pwq->inactive_works)) {
                                                   >> 1492                         /* one down, submit an inactive one */
                                                   >> 1493                         if (pwq->nr_active < pwq->max_active)
                                                   >> 1494                                 pwq_activate_first_inactive(pwq);
                                                   >> 1495                 }
                                                   >> 1496         }
1997                                                  1497 
1998         pwq->nr_in_flight[color]--;              1498         pwq->nr_in_flight[color]--;
1999                                                  1499 
2000         /* is flush in progress and are we at    1500         /* is flush in progress and are we at the flushing tip? */
2001         if (likely(pwq->flush_color != color)    1501         if (likely(pwq->flush_color != color))
2002                 goto out_put;                    1502                 goto out_put;
2003                                                  1503 
2004         /* are there still in-flight works? *    1504         /* are there still in-flight works? */
2005         if (pwq->nr_in_flight[color])            1505         if (pwq->nr_in_flight[color])
2006                 goto out_put;                    1506                 goto out_put;
2007                                                  1507 
2008         /* this pwq is done, clear flush_colo    1508         /* this pwq is done, clear flush_color */
2009         pwq->flush_color = -1;                   1509         pwq->flush_color = -1;
2010                                                  1510 
2011         /*                                       1511         /*
2012          * If this was the last pwq, wake up     1512          * If this was the last pwq, wake up the first flusher.  It
2013          * will handle the rest.                 1513          * will handle the rest.
2014          */                                      1514          */
2015         if (atomic_dec_and_test(&pwq->wq->nr_    1515         if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
2016                 complete(&pwq->wq->first_flus    1516                 complete(&pwq->wq->first_flusher->done);
2017 out_put:                                         1517 out_put:
2018         put_pwq(pwq);                            1518         put_pwq(pwq);
2019 }                                                1519 }
2020                                                  1520 
2021 /**                                              1521 /**
2022  * try_to_grab_pending - steal work item from    1522  * try_to_grab_pending - steal work item from worklist and disable irq
2023  * @work: work item to steal                     1523  * @work: work item to steal
2024  * @cflags: %WORK_CANCEL_ flags               !! 1524  * @is_dwork: @work is a delayed_work
2025  * @irq_flags: place to store irq state       !! 1525  * @flags: place to store irq state
2026  *                                               1526  *
2027  * Try to grab PENDING bit of @work.  This fu    1527  * Try to grab PENDING bit of @work.  This function can handle @work in any
2028  * stable state - idle, on timer or on workli    1528  * stable state - idle, on timer or on worklist.
2029  *                                               1529  *
2030  * Return:                                       1530  * Return:
2031  *                                               1531  *
2032  *  ========    =============================    1532  *  ========    ================================================================
2033  *  1           if @work was pending and we s    1533  *  1           if @work was pending and we successfully stole PENDING
2034  *  0           if @work was idle and we clai    1534  *  0           if @work was idle and we claimed PENDING
2035  *  -EAGAIN     if PENDING couldn't be grabbe    1535  *  -EAGAIN     if PENDING couldn't be grabbed at the moment, safe to busy-retry
                                                   >> 1536  *  -ENOENT     if someone else is canceling @work, this state may persist
                                                   >> 1537  *              for arbitrarily long
2036  *  ========    =============================    1538  *  ========    ================================================================
2037  *                                               1539  *
2038  * Note:                                         1540  * Note:
2039  * On >= 0 return, the caller owns @work's PE    1541  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
2040  * interrupted while holding PENDING and @wor    1542  * interrupted while holding PENDING and @work off queue, irq must be
2041  * disabled on entry.  This, combined with de    1543  * disabled on entry.  This, combined with delayed_work->timer being
2042  * irqsafe, ensures that we return -EAGAIN fo    1544  * irqsafe, ensures that we return -EAGAIN for finite short period of time.
2043  *                                               1545  *
2044  * On successful return, >= 0, irq is disable    1546  * On successful return, >= 0, irq is disabled and the caller is
2045  * responsible for releasing it using local_i !! 1547  * responsible for releasing it using local_irq_restore(*@flags).
2046  *                                               1548  *
2047  * This function is safe to call from any con    1549  * This function is safe to call from any context including IRQ handler.
2048  */                                              1550  */
2049 static int try_to_grab_pending(struct work_st !! 1551 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
2050                                unsigned long  !! 1552                                unsigned long *flags)
2051 {                                                1553 {
2052         struct worker_pool *pool;                1554         struct worker_pool *pool;
2053         struct pool_workqueue *pwq;              1555         struct pool_workqueue *pwq;
2054                                                  1556 
2055         local_irq_save(*irq_flags);           !! 1557         local_irq_save(*flags);
2056                                                  1558 
2057         /* try to steal the timer if it exist    1559         /* try to steal the timer if it exists */
2058         if (cflags & WORK_CANCEL_DELAYED) {   !! 1560         if (is_dwork) {
2059                 struct delayed_work *dwork =     1561                 struct delayed_work *dwork = to_delayed_work(work);
2060                                                  1562 
2061                 /*                               1563                 /*
2062                  * dwork->timer is irqsafe.      1564                  * dwork->timer is irqsafe.  If del_timer() fails, it's
2063                  * guaranteed that the timer     1565                  * guaranteed that the timer is not queued anywhere and not
2064                  * running on the local CPU.     1566                  * running on the local CPU.
2065                  */                              1567                  */
2066                 if (likely(del_timer(&dwork->    1568                 if (likely(del_timer(&dwork->timer)))
2067                         return 1;                1569                         return 1;
2068         }                                        1570         }
2069                                                  1571 
2070         /* try to claim PENDING the normal wa    1572         /* try to claim PENDING the normal way */
2071         if (!test_and_set_bit(WORK_STRUCT_PEN    1573         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2072                 return 0;                        1574                 return 0;
2073                                                  1575 
2074         rcu_read_lock();                         1576         rcu_read_lock();
2075         /*                                       1577         /*
2076          * The queueing is in progress, or it    1578          * The queueing is in progress, or it is already queued. Try to
2077          * steal it from ->worklist without c    1579          * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2078          */                                      1580          */
2079         pool = get_work_pool(work);              1581         pool = get_work_pool(work);
2080         if (!pool)                               1582         if (!pool)
2081                 goto fail;                       1583                 goto fail;
2082                                                  1584 
2083         raw_spin_lock(&pool->lock);              1585         raw_spin_lock(&pool->lock);
2084         /*                                       1586         /*
2085          * work->data is guaranteed to point     1587          * work->data is guaranteed to point to pwq only while the work
2086          * item is queued on pwq->wq, and bot    1588          * item is queued on pwq->wq, and both updating work->data to point
2087          * to pwq on queueing and to pool on     1589          * to pwq on queueing and to pool on dequeueing are done under
2088          * pwq->pool->lock.  This in turn gua    1590          * pwq->pool->lock.  This in turn guarantees that, if work->data
2089          * points to pwq which is associated     1591          * points to pwq which is associated with a locked pool, the work
2090          * item is currently queued on that p    1592          * item is currently queued on that pool.
2091          */                                      1593          */
2092         pwq = get_work_pwq(work);                1594         pwq = get_work_pwq(work);
2093         if (pwq && pwq->pool == pool) {          1595         if (pwq && pwq->pool == pool) {
2094                 unsigned long work_data = *wo << 
2095                                               << 
2096                 debug_work_deactivate(work);     1596                 debug_work_deactivate(work);
2097                                                  1597 
2098                 /*                               1598                 /*
2099                  * A cancelable inactive work    1599                  * A cancelable inactive work item must be in the
2100                  * pwq->inactive_works since     1600                  * pwq->inactive_works since a queued barrier can't be
2101                  * canceled (see the comments    1601                  * canceled (see the comments in insert_wq_barrier()).
2102                  *                               1602                  *
2103                  * An inactive work item cann !! 1603                  * An inactive work item cannot be grabbed directly because
2104                  * it might have linked barri    1604                  * it might have linked barrier work items which, if left
2105                  * on the inactive_works list    1605                  * on the inactive_works list, will confuse pwq->nr_active
2106                  * management later on and ca !! 1606                  * management later on and cause stall.  Make sure the work
2107                  * barrier work items to the  !! 1607                  * item is activated before grabbing.
2108                  * item. Also keep WORK_STRUC << 
2109                  * it doesn't participate in  << 
2110                  * pwq_dec_nr_in_flight().    << 
2111                  */                              1608                  */
2112                 if (work_data & WORK_STRUCT_I !! 1609                 if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
2113                         move_linked_works(wor !! 1610                         pwq_activate_inactive_work(work);
2114                                                  1611 
2115                 list_del_init(&work->entry);     1612                 list_del_init(&work->entry);
                                                   >> 1613                 pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
2116                                                  1614 
2117                 /*                            !! 1615                 /* work->data points to pwq iff queued, point to pool */
2118                  * work->data points to pwq i !! 1616                 set_work_pool_and_keep_pending(work, pool->id);
2119                  * this destroys work->data n << 
2120                  */                           << 
2121                 set_work_pool_and_keep_pendin << 
2122                                               << 
2123                                               << 
2124                 /* must be the last step, see << 
2125                 pwq_dec_nr_in_flight(pwq, wor << 
2126                                                  1617 
2127                 raw_spin_unlock(&pool->lock);    1618                 raw_spin_unlock(&pool->lock);
2128                 rcu_read_unlock();               1619                 rcu_read_unlock();
2129                 return 1;                        1620                 return 1;
2130         }                                        1621         }
2131         raw_spin_unlock(&pool->lock);            1622         raw_spin_unlock(&pool->lock);
2132 fail:                                            1623 fail:
2133         rcu_read_unlock();                       1624         rcu_read_unlock();
2134         local_irq_restore(*irq_flags);        !! 1625         local_irq_restore(*flags);
                                                   >> 1626         if (work_is_canceling(work))
                                                   >> 1627                 return -ENOENT;
                                                   >> 1628         cpu_relax();
2135         return -EAGAIN;                          1629         return -EAGAIN;
2136 }                                                1630 }
2137                                                  1631 
2138 /**                                              1632 /**
2139  * work_grab_pending - steal work item from w << 
2140  * @work: work item to steal                  << 
2141  * @cflags: %WORK_CANCEL_ flags               << 
2142  * @irq_flags: place to store IRQ state       << 
2143  *                                            << 
2144  * Grab PENDING bit of @work. @work can be in << 
2145  * or on worklist.                            << 
2146  *                                            << 
2147  * Can be called from any context. IRQ is dis << 
2148  * stored in *@irq_flags. The caller is respo << 
2149  * local_irq_restore().                       << 
2150  *                                            << 
2151  * Returns %true if @work was pending. %false << 
2152  */                                           << 
2153 static bool work_grab_pending(struct work_str << 
2154                               unsigned long * << 
2155 {                                             << 
2156         int ret;                              << 
2157                                               << 
2158         while (true) {                        << 
2159                 ret = try_to_grab_pending(wor << 
2160                 if (ret >= 0)                 << 
2161                         return ret;           << 
2162                 cpu_relax();                  << 
2163         }                                     << 
2164 }                                             << 
2165                                               << 
2166 /**                                           << 
2167  * insert_work - insert a work into a pool       1633  * insert_work - insert a work into a pool
2168  * @pwq: pwq @work belongs to                    1634  * @pwq: pwq @work belongs to
2169  * @work: work to insert                         1635  * @work: work to insert
2170  * @head: insertion point                        1636  * @head: insertion point
2171  * @extra_flags: extra WORK_STRUCT_* flags to    1637  * @extra_flags: extra WORK_STRUCT_* flags to set
2172  *                                               1638  *
2173  * Insert @work which belongs to @pwq after @    1639  * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
2174  * work_struct flags.                            1640  * work_struct flags.
2175  *                                               1641  *
2176  * CONTEXT:                                      1642  * CONTEXT:
2177  * raw_spin_lock_irq(pool->lock).                1643  * raw_spin_lock_irq(pool->lock).
2178  */                                              1644  */
2179 static void insert_work(struct pool_workqueue    1645 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
2180                         struct list_head *hea    1646                         struct list_head *head, unsigned int extra_flags)
2181 {                                                1647 {
2182         debug_work_activate(work);               1648         debug_work_activate(work);
2183                                                  1649 
2184         /* record the work call stack in orde    1650         /* record the work call stack in order to print it in KASAN reports */
2185         kasan_record_aux_stack_noalloc(work);    1651         kasan_record_aux_stack_noalloc(work);
2186                                                  1652 
2187         /* we own @work, set data and link */    1653         /* we own @work, set data and link */
2188         set_work_pwq(work, pwq, extra_flags);    1654         set_work_pwq(work, pwq, extra_flags);
2189         list_add_tail(&work->entry, head);       1655         list_add_tail(&work->entry, head);
2190         get_pwq(pwq);                            1656         get_pwq(pwq);
2191 }                                                1657 }
2192                                                  1658 
2193 /*                                               1659 /*
2194  * Test whether @work is being queued from an    1660  * Test whether @work is being queued from another work executing on the
2195  * same workqueue.                               1661  * same workqueue.
2196  */                                              1662  */
2197 static bool is_chained_work(struct workqueue_    1663 static bool is_chained_work(struct workqueue_struct *wq)
2198 {                                                1664 {
2199         struct worker *worker;                   1665         struct worker *worker;
2200                                                  1666 
2201         worker = current_wq_worker();            1667         worker = current_wq_worker();
2202         /*                                       1668         /*
2203          * Return %true iff I'm a worker exec    1669          * Return %true iff I'm a worker executing a work item on @wq.  If
2204          * I'm @worker, it's safe to derefere    1670          * I'm @worker, it's safe to dereference it without locking.
2205          */                                      1671          */
2206         return worker && worker->current_pwq-    1672         return worker && worker->current_pwq->wq == wq;
2207 }                                                1673 }
2208                                                  1674 
2209 /*                                               1675 /*
2210  * When queueing an unbound work item to a wq    1676  * When queueing an unbound work item to a wq, prefer local CPU if allowed
2211  * by wq_unbound_cpumask.  Otherwise, round r    1677  * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
2212  * avoid perturbing sensitive tasks.             1678  * avoid perturbing sensitive tasks.
2213  */                                              1679  */
2214 static int wq_select_unbound_cpu(int cpu)        1680 static int wq_select_unbound_cpu(int cpu)
2215 {                                                1681 {
2216         int new_cpu;                             1682         int new_cpu;
2217                                                  1683 
2218         if (likely(!wq_debug_force_rr_cpu)) {    1684         if (likely(!wq_debug_force_rr_cpu)) {
2219                 if (cpumask_test_cpu(cpu, wq_    1685                 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
2220                         return cpu;              1686                         return cpu;
2221         } else {                                 1687         } else {
2222                 pr_warn_once("workqueue: roun    1688                 pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
2223         }                                        1689         }
2224                                                  1690 
2225         new_cpu = __this_cpu_read(wq_rr_cpu_l    1691         new_cpu = __this_cpu_read(wq_rr_cpu_last);
2226         new_cpu = cpumask_next_and(new_cpu, w    1692         new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
2227         if (unlikely(new_cpu >= nr_cpu_ids))     1693         if (unlikely(new_cpu >= nr_cpu_ids)) {
2228                 new_cpu = cpumask_first_and(w    1694                 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
2229                 if (unlikely(new_cpu >= nr_cp    1695                 if (unlikely(new_cpu >= nr_cpu_ids))
2230                         return cpu;              1696                         return cpu;
2231         }                                        1697         }
2232         __this_cpu_write(wq_rr_cpu_last, new_    1698         __this_cpu_write(wq_rr_cpu_last, new_cpu);
2233                                                  1699 
2234         return new_cpu;                          1700         return new_cpu;
2235 }                                                1701 }
2236                                                  1702 
2237 static void __queue_work(int cpu, struct work    1703 static void __queue_work(int cpu, struct workqueue_struct *wq,
2238                          struct work_struct *    1704                          struct work_struct *work)
2239 {                                                1705 {
2240         struct pool_workqueue *pwq;              1706         struct pool_workqueue *pwq;
2241         struct worker_pool *last_pool, *pool;    1707         struct worker_pool *last_pool, *pool;
2242         unsigned int work_flags;                 1708         unsigned int work_flags;
2243         unsigned int req_cpu = cpu;              1709         unsigned int req_cpu = cpu;
2244                                                  1710 
2245         /*                                       1711         /*
2246          * While a work item is PENDING && of    1712          * While a work item is PENDING && off queue, a task trying to
2247          * steal the PENDING will busy-loop w    1713          * steal the PENDING will busy-loop waiting for it to either get
2248          * queued or lose PENDING.  Grabbing     1714          * queued or lose PENDING.  Grabbing PENDING and queueing should
2249          * happen with IRQ disabled.             1715          * happen with IRQ disabled.
2250          */                                      1716          */
2251         lockdep_assert_irqs_disabled();          1717         lockdep_assert_irqs_disabled();
2252                                                  1718 
                                                   >> 1719 
2253         /*                                       1720         /*
2254          * For a draining wq, only works from    1721          * For a draining wq, only works from the same workqueue are
2255          * allowed. The __WQ_DESTROYING helps    1722          * allowed. The __WQ_DESTROYING helps to spot the issue that
2256          * queues a new work item to a wq aft    1723          * queues a new work item to a wq after destroy_workqueue(wq).
2257          */                                      1724          */
2258         if (unlikely(wq->flags & (__WQ_DESTRO    1725         if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
2259                      WARN_ON_ONCE(!is_chained    1726                      WARN_ON_ONCE(!is_chained_work(wq))))
2260                 return;                          1727                 return;
2261         rcu_read_lock();                         1728         rcu_read_lock();
2262 retry:                                           1729 retry:
2263         /* pwq which will be used unless @wor    1730         /* pwq which will be used unless @work is executing elsewhere */
2264         if (req_cpu == WORK_CPU_UNBOUND) {       1731         if (req_cpu == WORK_CPU_UNBOUND) {
2265                 if (wq->flags & WQ_UNBOUND)      1732                 if (wq->flags & WQ_UNBOUND)
2266                         cpu = wq_select_unbou    1733                         cpu = wq_select_unbound_cpu(raw_smp_processor_id());
2267                 else                             1734                 else
2268                         cpu = raw_smp_process    1735                         cpu = raw_smp_processor_id();
2269         }                                        1736         }
2270                                                  1737 
2271         pwq = rcu_dereference(*per_cpu_ptr(wq    1738         pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu));
2272         pool = pwq->pool;                        1739         pool = pwq->pool;
2273                                                  1740 
2274         /*                                       1741         /*
2275          * If @work was previously on a diffe    1742          * If @work was previously on a different pool, it might still be
2276          * running there, in which case the w    1743          * running there, in which case the work needs to be queued on that
2277          * pool to guarantee non-reentrancy.     1744          * pool to guarantee non-reentrancy.
2278          *                                    << 
2279          * For ordered workqueue, work items  << 
2280          * for accurate order management.  Gu << 
2281          * non-reentrancy.  See the comments  << 
2282          */                                      1745          */
2283         last_pool = get_work_pool(work);         1746         last_pool = get_work_pool(work);
2284         if (last_pool && last_pool != pool && !! 1747         if (last_pool && last_pool != pool) {
2285                 struct worker *worker;           1748                 struct worker *worker;
2286                                                  1749 
2287                 raw_spin_lock(&last_pool->loc    1750                 raw_spin_lock(&last_pool->lock);
2288                                                  1751 
2289                 worker = find_worker_executin    1752                 worker = find_worker_executing_work(last_pool, work);
2290                                                  1753 
2291                 if (worker && worker->current    1754                 if (worker && worker->current_pwq->wq == wq) {
2292                         pwq = worker->current    1755                         pwq = worker->current_pwq;
2293                         pool = pwq->pool;        1756                         pool = pwq->pool;
2294                         WARN_ON_ONCE(pool !=     1757                         WARN_ON_ONCE(pool != last_pool);
2295                 } else {                         1758                 } else {
2296                         /* meh... not running    1759                         /* meh... not running there, queue here */
2297                         raw_spin_unlock(&last    1760                         raw_spin_unlock(&last_pool->lock);
2298                         raw_spin_lock(&pool->    1761                         raw_spin_lock(&pool->lock);
2299                 }                                1762                 }
2300         } else {                                 1763         } else {
2301                 raw_spin_lock(&pool->lock);      1764                 raw_spin_lock(&pool->lock);
2302         }                                        1765         }
2303                                                  1766 
2304         /*                                       1767         /*
2305          * pwq is determined and locked. For     1768          * pwq is determined and locked. For unbound pools, we could have raced
2306          * with pwq release and it could alre    1769          * with pwq release and it could already be dead. If its refcnt is zero,
2307          * repeat pwq selection. Note that un    1770          * repeat pwq selection. Note that unbound pwqs never die without
2308          * another pwq replacing it in cpu_pw    1771          * another pwq replacing it in cpu_pwq or while work items are executing
2309          * on it, so the retrying is guarante    1772          * on it, so the retrying is guaranteed to make forward-progress.
2310          */                                      1773          */
2311         if (unlikely(!pwq->refcnt)) {            1774         if (unlikely(!pwq->refcnt)) {
2312                 if (wq->flags & WQ_UNBOUND) {    1775                 if (wq->flags & WQ_UNBOUND) {
2313                         raw_spin_unlock(&pool    1776                         raw_spin_unlock(&pool->lock);
2314                         cpu_relax();             1777                         cpu_relax();
2315                         goto retry;              1778                         goto retry;
2316                 }                                1779                 }
2317                 /* oops */                       1780                 /* oops */
2318                 WARN_ONCE(true, "workqueue: p    1781                 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
2319                           wq->name, cpu);        1782                           wq->name, cpu);
2320         }                                        1783         }
2321                                                  1784 
2322         /* pwq determined, queue */              1785         /* pwq determined, queue */
2323         trace_workqueue_queue_work(req_cpu, p    1786         trace_workqueue_queue_work(req_cpu, pwq, work);
2324                                                  1787 
2325         if (WARN_ON(!list_empty(&work->entry)    1788         if (WARN_ON(!list_empty(&work->entry)))
2326                 goto out;                        1789                 goto out;
2327                                                  1790 
2328         pwq->nr_in_flight[pwq->work_color]++;    1791         pwq->nr_in_flight[pwq->work_color]++;
2329         work_flags = work_color_to_flags(pwq-    1792         work_flags = work_color_to_flags(pwq->work_color);
2330                                                  1793 
2331         /*                                    !! 1794         if (likely(pwq->nr_active < pwq->max_active)) {
2332          * Limit the number of concurrently a << 
2333          * @work must also queue behind exist << 
2334          * ordering when max_active changes.  << 
2335          */                                   << 
2336         if (list_empty(&pwq->inactive_works)  << 
2337                 if (list_empty(&pool->worklis    1795                 if (list_empty(&pool->worklist))
2338                         pool->watchdog_ts = j    1796                         pool->watchdog_ts = jiffies;
2339                                                  1797 
2340                 trace_workqueue_activate_work    1798                 trace_workqueue_activate_work(work);
                                                   >> 1799                 pwq->nr_active++;
2341                 insert_work(pwq, work, &pool-    1800                 insert_work(pwq, work, &pool->worklist, work_flags);
2342                 kick_pool(pool);                 1801                 kick_pool(pool);
2343         } else {                                 1802         } else {
2344                 work_flags |= WORK_STRUCT_INA    1803                 work_flags |= WORK_STRUCT_INACTIVE;
2345                 insert_work(pwq, work, &pwq->    1804                 insert_work(pwq, work, &pwq->inactive_works, work_flags);
2346         }                                        1805         }
2347                                                  1806 
2348 out:                                             1807 out:
2349         raw_spin_unlock(&pool->lock);            1808         raw_spin_unlock(&pool->lock);
2350         rcu_read_unlock();                       1809         rcu_read_unlock();
2351 }                                                1810 }
2352                                                  1811 
2353 static bool clear_pending_if_disabled(struct  << 
2354 {                                             << 
2355         unsigned long data = *work_data_bits( << 
2356         struct work_offq_data offqd;          << 
2357                                               << 
2358         if (likely((data & WORK_STRUCT_PWQ) | << 
2359                    !(data & WORK_OFFQ_DISABLE << 
2360                 return false;                 << 
2361                                               << 
2362         work_offqd_unpack(&offqd, data);      << 
2363         set_work_pool_and_clear_pending(work, << 
2364                                         work_ << 
2365         return true;                          << 
2366 }                                             << 
2367                                               << 
2368 /**                                              1812 /**
2369  * queue_work_on - queue work on specific cpu    1813  * queue_work_on - queue work on specific cpu
2370  * @cpu: CPU number to execute work on           1814  * @cpu: CPU number to execute work on
2371  * @wq: workqueue to use                         1815  * @wq: workqueue to use
2372  * @work: work to queue                          1816  * @work: work to queue
2373  *                                               1817  *
2374  * We queue the work to a specific CPU, the c    1818  * We queue the work to a specific CPU, the caller must ensure it
2375  * can't go away.  Callers that fail to ensur    1819  * can't go away.  Callers that fail to ensure that the specified
2376  * CPU cannot go away will execute on a rando    1820  * CPU cannot go away will execute on a randomly chosen CPU.
2377  * But note well that callers specifying a CP    1821  * But note well that callers specifying a CPU that never has been
2378  * online will get a splat.                      1822  * online will get a splat.
2379  *                                               1823  *
2380  * Return: %false if @work was already on a q    1824  * Return: %false if @work was already on a queue, %true otherwise.
2381  */                                              1825  */
2382 bool queue_work_on(int cpu, struct workqueue_    1826 bool queue_work_on(int cpu, struct workqueue_struct *wq,
2383                    struct work_struct *work)     1827                    struct work_struct *work)
2384 {                                                1828 {
2385         bool ret = false;                        1829         bool ret = false;
2386         unsigned long irq_flags;              !! 1830         unsigned long flags;
2387                                                  1831 
2388         local_irq_save(irq_flags);            !! 1832         local_irq_save(flags);
2389                                                  1833 
2390         if (!test_and_set_bit(WORK_STRUCT_PEN !! 1834         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2391             !clear_pending_if_disabled(work)) << 
2392                 __queue_work(cpu, wq, work);     1835                 __queue_work(cpu, wq, work);
2393                 ret = true;                      1836                 ret = true;
2394         }                                        1837         }
2395                                                  1838 
2396         local_irq_restore(irq_flags);         !! 1839         local_irq_restore(flags);
2397         return ret;                              1840         return ret;
2398 }                                                1841 }
2399 EXPORT_SYMBOL(queue_work_on);                    1842 EXPORT_SYMBOL(queue_work_on);
2400                                                  1843 
2401 /**                                              1844 /**
2402  * select_numa_node_cpu - Select a CPU based     1845  * select_numa_node_cpu - Select a CPU based on NUMA node
2403  * @node: NUMA node ID that we want to select    1846  * @node: NUMA node ID that we want to select a CPU from
2404  *                                               1847  *
2405  * This function will attempt to find a "rand    1848  * This function will attempt to find a "random" cpu available on a given
2406  * node. If there are no CPUs available on th    1849  * node. If there are no CPUs available on the given node it will return
2407  * WORK_CPU_UNBOUND indicating that we should    1850  * WORK_CPU_UNBOUND indicating that we should just schedule to any
2408  * available CPU if we need to schedule this     1851  * available CPU if we need to schedule this work.
2409  */                                              1852  */
2410 static int select_numa_node_cpu(int node)        1853 static int select_numa_node_cpu(int node)
2411 {                                                1854 {
2412         int cpu;                                 1855         int cpu;
2413                                                  1856 
2414         /* Delay binding to CPU if node is no    1857         /* Delay binding to CPU if node is not valid or online */
2415         if (node < 0 || node >= MAX_NUMNODES     1858         if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
2416                 return WORK_CPU_UNBOUND;         1859                 return WORK_CPU_UNBOUND;
2417                                                  1860 
2418         /* Use local node/cpu if we are alrea    1861         /* Use local node/cpu if we are already there */
2419         cpu = raw_smp_processor_id();            1862         cpu = raw_smp_processor_id();
2420         if (node == cpu_to_node(cpu))            1863         if (node == cpu_to_node(cpu))
2421                 return cpu;                      1864                 return cpu;
2422                                                  1865 
2423         /* Use "random" otherwise know as "fi    1866         /* Use "random" otherwise know as "first" online CPU of node */
2424         cpu = cpumask_any_and(cpumask_of_node    1867         cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
2425                                                  1868 
2426         /* If CPU is valid return that, other    1869         /* If CPU is valid return that, otherwise just defer */
2427         return cpu < nr_cpu_ids ? cpu : WORK_    1870         return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
2428 }                                                1871 }
2429                                                  1872 
2430 /**                                              1873 /**
2431  * queue_work_node - queue work on a "random"    1874  * queue_work_node - queue work on a "random" cpu for a given NUMA node
2432  * @node: NUMA node that we are targeting the    1875  * @node: NUMA node that we are targeting the work for
2433  * @wq: workqueue to use                         1876  * @wq: workqueue to use
2434  * @work: work to queue                          1877  * @work: work to queue
2435  *                                               1878  *
2436  * We queue the work to a "random" CPU within    1879  * We queue the work to a "random" CPU within a given NUMA node. The basic
2437  * idea here is to provide a way to somehow a    1880  * idea here is to provide a way to somehow associate work with a given
2438  * NUMA node.                                    1881  * NUMA node.
2439  *                                               1882  *
2440  * This function will only make a best effort    1883  * This function will only make a best effort attempt at getting this onto
2441  * the right NUMA node. If no node is request    1884  * the right NUMA node. If no node is requested or the requested node is
2442  * offline then we just fall back to standard    1885  * offline then we just fall back to standard queue_work behavior.
2443  *                                               1886  *
2444  * Currently the "random" CPU ends up being t    1887  * Currently the "random" CPU ends up being the first available CPU in the
2445  * intersection of cpu_online_mask and the cp    1888  * intersection of cpu_online_mask and the cpumask of the node, unless we
2446  * are running on the node. In that case we j    1889  * are running on the node. In that case we just use the current CPU.
2447  *                                               1890  *
2448  * Return: %false if @work was already on a q    1891  * Return: %false if @work was already on a queue, %true otherwise.
2449  */                                              1892  */
2450 bool queue_work_node(int node, struct workque    1893 bool queue_work_node(int node, struct workqueue_struct *wq,
2451                      struct work_struct *work    1894                      struct work_struct *work)
2452 {                                                1895 {
2453         unsigned long irq_flags;              !! 1896         unsigned long flags;
2454         bool ret = false;                        1897         bool ret = false;
2455                                                  1898 
2456         /*                                       1899         /*
2457          * This current implementation is spe    1900          * This current implementation is specific to unbound workqueues.
2458          * Specifically we only return the fi    1901          * Specifically we only return the first available CPU for a given
2459          * node instead of cycling through in    1902          * node instead of cycling through individual CPUs within the node.
2460          *                                       1903          *
2461          * If this is used with a per-cpu wor    1904          * If this is used with a per-cpu workqueue then the logic in
2462          * workqueue_select_cpu_near would ne    1905          * workqueue_select_cpu_near would need to be updated to allow for
2463          * some round robin type logic.          1906          * some round robin type logic.
2464          */                                      1907          */
2465         WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND    1908         WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
2466                                                  1909 
2467         local_irq_save(irq_flags);            !! 1910         local_irq_save(flags);
2468                                                  1911 
2469         if (!test_and_set_bit(WORK_STRUCT_PEN !! 1912         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2470             !clear_pending_if_disabled(work)) << 
2471                 int cpu = select_numa_node_cp    1913                 int cpu = select_numa_node_cpu(node);
2472                                                  1914 
2473                 __queue_work(cpu, wq, work);     1915                 __queue_work(cpu, wq, work);
2474                 ret = true;                      1916                 ret = true;
2475         }                                        1917         }
2476                                                  1918 
2477         local_irq_restore(irq_flags);         !! 1919         local_irq_restore(flags);
2478         return ret;                              1920         return ret;
2479 }                                                1921 }
2480 EXPORT_SYMBOL_GPL(queue_work_node);              1922 EXPORT_SYMBOL_GPL(queue_work_node);
2481                                                  1923 
2482 void delayed_work_timer_fn(struct timer_list     1924 void delayed_work_timer_fn(struct timer_list *t)
2483 {                                                1925 {
2484         struct delayed_work *dwork = from_tim    1926         struct delayed_work *dwork = from_timer(dwork, t, timer);
2485                                                  1927 
2486         /* should have been called from irqsa    1928         /* should have been called from irqsafe timer with irq already off */
2487         __queue_work(dwork->cpu, dwork->wq, &    1929         __queue_work(dwork->cpu, dwork->wq, &dwork->work);
2488 }                                                1930 }
2489 EXPORT_SYMBOL(delayed_work_timer_fn);            1931 EXPORT_SYMBOL(delayed_work_timer_fn);
2490                                                  1932 
2491 static void __queue_delayed_work(int cpu, str    1933 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
2492                                 struct delaye    1934                                 struct delayed_work *dwork, unsigned long delay)
2493 {                                                1935 {
2494         struct timer_list *timer = &dwork->ti    1936         struct timer_list *timer = &dwork->timer;
2495         struct work_struct *work = &dwork->wo    1937         struct work_struct *work = &dwork->work;
2496                                                  1938 
2497         WARN_ON_ONCE(!wq);                       1939         WARN_ON_ONCE(!wq);
2498         WARN_ON_ONCE(timer->function != delay    1940         WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
2499         WARN_ON_ONCE(timer_pending(timer));      1941         WARN_ON_ONCE(timer_pending(timer));
2500         WARN_ON_ONCE(!list_empty(&work->entry    1942         WARN_ON_ONCE(!list_empty(&work->entry));
2501                                                  1943 
2502         /*                                       1944         /*
2503          * If @delay is 0, queue @dwork->work    1945          * If @delay is 0, queue @dwork->work immediately.  This is for
2504          * both optimization and correctness.    1946          * both optimization and correctness.  The earliest @timer can
2505          * expire is on the closest next tick    1947          * expire is on the closest next tick and delayed_work users depend
2506          * on that there's no such delay when    1948          * on that there's no such delay when @delay is 0.
2507          */                                      1949          */
2508         if (!delay) {                            1950         if (!delay) {
2509                 __queue_work(cpu, wq, &dwork-    1951                 __queue_work(cpu, wq, &dwork->work);
2510                 return;                          1952                 return;
2511         }                                        1953         }
2512                                                  1954 
2513         dwork->wq = wq;                          1955         dwork->wq = wq;
2514         dwork->cpu = cpu;                        1956         dwork->cpu = cpu;
2515         timer->expires = jiffies + delay;        1957         timer->expires = jiffies + delay;
2516                                                  1958 
2517         if (housekeeping_enabled(HK_TYPE_TIME !! 1959         if (unlikely(cpu != WORK_CPU_UNBOUND))
2518                 /* If the current cpu is a ho << 
2519                 cpu = smp_processor_id();     << 
2520                 if (!housekeeping_test_cpu(cp << 
2521                         cpu = housekeeping_an << 
2522                 add_timer_on(timer, cpu);        1960                 add_timer_on(timer, cpu);
2523         } else {                              !! 1961         else
2524                 if (likely(cpu == WORK_CPU_UN !! 1962                 add_timer(timer);
2525                         add_timer_global(time << 
2526                 else                          << 
2527                         add_timer_on(timer, c << 
2528         }                                     << 
2529 }                                                1963 }
2530                                                  1964 
2531 /**                                              1965 /**
2532  * queue_delayed_work_on - queue work on spec    1966  * queue_delayed_work_on - queue work on specific CPU after delay
2533  * @cpu: CPU number to execute work on           1967  * @cpu: CPU number to execute work on
2534  * @wq: workqueue to use                         1968  * @wq: workqueue to use
2535  * @dwork: work to queue                         1969  * @dwork: work to queue
2536  * @delay: number of jiffies to wait before q    1970  * @delay: number of jiffies to wait before queueing
2537  *                                               1971  *
2538  * Return: %false if @work was already on a q    1972  * Return: %false if @work was already on a queue, %true otherwise.  If
2539  * @delay is zero and @dwork is idle, it will    1973  * @delay is zero and @dwork is idle, it will be scheduled for immediate
2540  * execution.                                    1974  * execution.
2541  */                                              1975  */
2542 bool queue_delayed_work_on(int cpu, struct wo    1976 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
2543                            struct delayed_wor    1977                            struct delayed_work *dwork, unsigned long delay)
2544 {                                                1978 {
2545         struct work_struct *work = &dwork->wo    1979         struct work_struct *work = &dwork->work;
2546         bool ret = false;                        1980         bool ret = false;
2547         unsigned long irq_flags;              !! 1981         unsigned long flags;
2548                                                  1982 
2549         /* read the comment in __queue_work()    1983         /* read the comment in __queue_work() */
2550         local_irq_save(irq_flags);            !! 1984         local_irq_save(flags);
2551                                                  1985 
2552         if (!test_and_set_bit(WORK_STRUCT_PEN !! 1986         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2553             !clear_pending_if_disabled(work)) << 
2554                 __queue_delayed_work(cpu, wq,    1987                 __queue_delayed_work(cpu, wq, dwork, delay);
2555                 ret = true;                      1988                 ret = true;
2556         }                                        1989         }
2557                                                  1990 
2558         local_irq_restore(irq_flags);         !! 1991         local_irq_restore(flags);
2559         return ret;                              1992         return ret;
2560 }                                                1993 }
2561 EXPORT_SYMBOL(queue_delayed_work_on);            1994 EXPORT_SYMBOL(queue_delayed_work_on);
2562                                                  1995 
2563 /**                                              1996 /**
2564  * mod_delayed_work_on - modify delay of or q    1997  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
2565  * @cpu: CPU number to execute work on           1998  * @cpu: CPU number to execute work on
2566  * @wq: workqueue to use                         1999  * @wq: workqueue to use
2567  * @dwork: work to queue                         2000  * @dwork: work to queue
2568  * @delay: number of jiffies to wait before q    2001  * @delay: number of jiffies to wait before queueing
2569  *                                               2002  *
2570  * If @dwork is idle, equivalent to queue_del    2003  * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
2571  * modify @dwork's timer so that it expires a    2004  * modify @dwork's timer so that it expires after @delay.  If @delay is
2572  * zero, @work is guaranteed to be scheduled     2005  * zero, @work is guaranteed to be scheduled immediately regardless of its
2573  * current state.                                2006  * current state.
2574  *                                               2007  *
2575  * Return: %false if @dwork was idle and queu    2008  * Return: %false if @dwork was idle and queued, %true if @dwork was
2576  * pending and its timer was modified.           2009  * pending and its timer was modified.
2577  *                                               2010  *
2578  * This function is safe to call from any con    2011  * This function is safe to call from any context including IRQ handler.
2579  * See try_to_grab_pending() for details.        2012  * See try_to_grab_pending() for details.
2580  */                                              2013  */
2581 bool mod_delayed_work_on(int cpu, struct work    2014 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
2582                          struct delayed_work     2015                          struct delayed_work *dwork, unsigned long delay)
2583 {                                                2016 {
2584         unsigned long irq_flags;              !! 2017         unsigned long flags;
2585         bool ret;                             !! 2018         int ret;
2586                                                  2019 
2587         ret = work_grab_pending(&dwork->work, !! 2020         do {
                                                   >> 2021                 ret = try_to_grab_pending(&dwork->work, true, &flags);
                                                   >> 2022         } while (unlikely(ret == -EAGAIN));
2588                                                  2023 
2589         if (!clear_pending_if_disabled(&dwork !! 2024         if (likely(ret >= 0)) {
2590                 __queue_delayed_work(cpu, wq,    2025                 __queue_delayed_work(cpu, wq, dwork, delay);
                                                   >> 2026                 local_irq_restore(flags);
                                                   >> 2027         }
2591                                                  2028 
2592         local_irq_restore(irq_flags);         !! 2029         /* -ENOENT from try_to_grab_pending() becomes %true */
2593         return ret;                              2030         return ret;
2594 }                                                2031 }
2595 EXPORT_SYMBOL_GPL(mod_delayed_work_on);          2032 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
2596                                                  2033 
2597 static void rcu_work_rcufn(struct rcu_head *r    2034 static void rcu_work_rcufn(struct rcu_head *rcu)
2598 {                                                2035 {
2599         struct rcu_work *rwork = container_of    2036         struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
2600                                                  2037 
2601         /* read the comment in __queue_work()    2038         /* read the comment in __queue_work() */
2602         local_irq_disable();                     2039         local_irq_disable();
2603         __queue_work(WORK_CPU_UNBOUND, rwork-    2040         __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
2604         local_irq_enable();                      2041         local_irq_enable();
2605 }                                                2042 }
2606                                                  2043 
2607 /**                                              2044 /**
2608  * queue_rcu_work - queue work after a RCU gr    2045  * queue_rcu_work - queue work after a RCU grace period
2609  * @wq: workqueue to use                         2046  * @wq: workqueue to use
2610  * @rwork: work to queue                         2047  * @rwork: work to queue
2611  *                                               2048  *
2612  * Return: %false if @rwork was already pendi    2049  * Return: %false if @rwork was already pending, %true otherwise.  Note
2613  * that a full RCU grace period is guaranteed    2050  * that a full RCU grace period is guaranteed only after a %true return.
2614  * While @rwork is guaranteed to be executed     2051  * While @rwork is guaranteed to be executed after a %false return, the
2615  * execution may happen before a full RCU gra    2052  * execution may happen before a full RCU grace period has passed.
2616  */                                              2053  */
2617 bool queue_rcu_work(struct workqueue_struct *    2054 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
2618 {                                                2055 {
2619         struct work_struct *work = &rwork->wo    2056         struct work_struct *work = &rwork->work;
2620                                                  2057 
2621         /*                                    !! 2058         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
2622          * rcu_work can't be canceled or disa << 
2623          * inside @rwork and disabled the inn << 
2624          */                                   << 
2625         if (!test_and_set_bit(WORK_STRUCT_PEN << 
2626             !WARN_ON_ONCE(clear_pending_if_di << 
2627                 rwork->wq = wq;                  2059                 rwork->wq = wq;
2628                 call_rcu_hurry(&rwork->rcu, r    2060                 call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
2629                 return true;                     2061                 return true;
2630         }                                        2062         }
2631                                                  2063 
2632         return false;                            2064         return false;
2633 }                                                2065 }
2634 EXPORT_SYMBOL(queue_rcu_work);                   2066 EXPORT_SYMBOL(queue_rcu_work);
2635                                                  2067 
2636 static struct worker *alloc_worker(int node)     2068 static struct worker *alloc_worker(int node)
2637 {                                                2069 {
2638         struct worker *worker;                   2070         struct worker *worker;
2639                                                  2071 
2640         worker = kzalloc_node(sizeof(*worker)    2072         worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
2641         if (worker) {                            2073         if (worker) {
2642                 INIT_LIST_HEAD(&worker->entry    2074                 INIT_LIST_HEAD(&worker->entry);
2643                 INIT_LIST_HEAD(&worker->sched    2075                 INIT_LIST_HEAD(&worker->scheduled);
2644                 INIT_LIST_HEAD(&worker->node)    2076                 INIT_LIST_HEAD(&worker->node);
2645                 /* on creation a worker is in    2077                 /* on creation a worker is in !idle && prep state */
2646                 worker->flags = WORKER_PREP;     2078                 worker->flags = WORKER_PREP;
2647         }                                        2079         }
2648         return worker;                           2080         return worker;
2649 }                                                2081 }
2650                                                  2082 
2651 static cpumask_t *pool_allowed_cpus(struct wo    2083 static cpumask_t *pool_allowed_cpus(struct worker_pool *pool)
2652 {                                                2084 {
2653         if (pool->cpu < 0 && pool->attrs->aff    2085         if (pool->cpu < 0 && pool->attrs->affn_strict)
2654                 return pool->attrs->__pod_cpu    2086                 return pool->attrs->__pod_cpumask;
2655         else                                     2087         else
2656                 return pool->attrs->cpumask;     2088                 return pool->attrs->cpumask;
2657 }                                                2089 }
2658                                                  2090 
2659 /**                                              2091 /**
2660  * worker_attach_to_pool() - attach a worker     2092  * worker_attach_to_pool() - attach a worker to a pool
2661  * @worker: worker to be attached                2093  * @worker: worker to be attached
2662  * @pool: the target pool                        2094  * @pool: the target pool
2663  *                                               2095  *
2664  * Attach @worker to @pool.  Once attached, t    2096  * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
2665  * cpu-binding of @worker are kept coordinate    2097  * cpu-binding of @worker are kept coordinated with the pool across
2666  * cpu-[un]hotplugs.                             2098  * cpu-[un]hotplugs.
2667  */                                              2099  */
2668 static void worker_attach_to_pool(struct work    2100 static void worker_attach_to_pool(struct worker *worker,
2669                                   struct work !! 2101                                    struct worker_pool *pool)
2670 {                                                2102 {
2671         mutex_lock(&wq_pool_attach_mutex);       2103         mutex_lock(&wq_pool_attach_mutex);
2672                                                  2104 
2673         /*                                       2105         /*
2674          * The wq_pool_attach_mutex ensures % !! 2106          * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
2675          * across this function. See the comm !! 2107          * stable across this function.  See the comments above the flag
2676          * details. BH workers are, while per !! 2108          * definition for details.
2677          */                                      2109          */
2678         if (pool->flags & POOL_DISASSOCIATED) !! 2110         if (pool->flags & POOL_DISASSOCIATED)
2679                 worker->flags |= WORKER_UNBOU    2111                 worker->flags |= WORKER_UNBOUND;
2680         } else {                              !! 2112         else
2681                 WARN_ON_ONCE(pool->flags & PO << 
2682                 kthread_set_per_cpu(worker->t    2113                 kthread_set_per_cpu(worker->task, pool->cpu);
2683         }                                     << 
2684                                                  2114 
2685         if (worker->rescue_wq)                   2115         if (worker->rescue_wq)
2686                 set_cpus_allowed_ptr(worker->    2116                 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool));
2687                                                  2117 
2688         list_add_tail(&worker->node, &pool->w    2118         list_add_tail(&worker->node, &pool->workers);
2689         worker->pool = pool;                     2119         worker->pool = pool;
2690                                                  2120 
2691         mutex_unlock(&wq_pool_attach_mutex);     2121         mutex_unlock(&wq_pool_attach_mutex);
2692 }                                                2122 }
2693                                                  2123 
2694 static void unbind_worker(struct worker *work << 
2695 {                                             << 
2696         lockdep_assert_held(&wq_pool_attach_m << 
2697                                               << 
2698         kthread_set_per_cpu(worker->task, -1) << 
2699         if (cpumask_intersects(wq_unbound_cpu << 
2700                 WARN_ON_ONCE(set_cpus_allowed << 
2701         else                                  << 
2702                 WARN_ON_ONCE(set_cpus_allowed << 
2703 }                                             << 
2704                                               << 
2705                                               << 
2706 static void detach_worker(struct worker *work << 
2707 {                                             << 
2708         lockdep_assert_held(&wq_pool_attach_m << 
2709                                               << 
2710         unbind_worker(worker);                << 
2711         list_del(&worker->node);              << 
2712 }                                             << 
2713                                               << 
2714 /**                                              2124 /**
2715  * worker_detach_from_pool() - detach a worke    2125  * worker_detach_from_pool() - detach a worker from its pool
2716  * @worker: worker which is attached to its p    2126  * @worker: worker which is attached to its pool
2717  *                                               2127  *
2718  * Undo the attaching which had been done in     2128  * Undo the attaching which had been done in worker_attach_to_pool().  The
2719  * caller worker shouldn't access to the pool    2129  * caller worker shouldn't access to the pool after detached except it has
2720  * other reference to the pool.                  2130  * other reference to the pool.
2721  */                                              2131  */
2722 static void worker_detach_from_pool(struct wo    2132 static void worker_detach_from_pool(struct worker *worker)
2723 {                                                2133 {
2724         struct worker_pool *pool = worker->po    2134         struct worker_pool *pool = worker->pool;
2725                                               !! 2135         struct completion *detach_completion = NULL;
2726         /* there is one permanent BH worker p << 
2727         WARN_ON_ONCE(pool->flags & POOL_BH);  << 
2728                                                  2136 
2729         mutex_lock(&wq_pool_attach_mutex);       2137         mutex_lock(&wq_pool_attach_mutex);
2730         detach_worker(worker);                !! 2138 
                                                   >> 2139         kthread_set_per_cpu(worker->task, -1);
                                                   >> 2140         list_del(&worker->node);
2731         worker->pool = NULL;                     2141         worker->pool = NULL;
                                                   >> 2142 
                                                   >> 2143         if (list_empty(&pool->workers) && list_empty(&pool->dying_workers))
                                                   >> 2144                 detach_completion = pool->detach_completion;
2732         mutex_unlock(&wq_pool_attach_mutex);     2145         mutex_unlock(&wq_pool_attach_mutex);
2733                                                  2146 
2734         /* clear leftover flags without pool-    2147         /* clear leftover flags without pool->lock after it is detached */
2735         worker->flags &= ~(WORKER_UNBOUND | W    2148         worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
2736 }                                             << 
2737                                               << 
2738 static int format_worker_id(char *buf, size_t << 
2739                             struct worker_poo << 
2740 {                                             << 
2741         if (worker->rescue_wq)                << 
2742                 return scnprintf(buf, size, " << 
2743                                  worker->resc << 
2744                                                  2149 
2745         if (pool) {                           !! 2150         if (detach_completion)
2746                 if (pool->cpu >= 0)           !! 2151                 complete(detach_completion);
2747                         return scnprintf(buf, << 
2748                                          pool << 
2749                                          pool << 
2750                 else                          << 
2751                         return scnprintf(buf, << 
2752                                          pool << 
2753         } else {                              << 
2754                 return scnprintf(buf, size, " << 
2755         }                                     << 
2756 }                                                2152 }
2757                                                  2153 
2758 /**                                              2154 /**
2759  * create_worker - create a new workqueue wor    2155  * create_worker - create a new workqueue worker
2760  * @pool: pool the new worker will belong to     2156  * @pool: pool the new worker will belong to
2761  *                                               2157  *
2762  * Create and start a new worker which is att    2158  * Create and start a new worker which is attached to @pool.
2763  *                                               2159  *
2764  * CONTEXT:                                      2160  * CONTEXT:
2765  * Might sleep.  Does GFP_KERNEL allocations.    2161  * Might sleep.  Does GFP_KERNEL allocations.
2766  *                                               2162  *
2767  * Return:                                       2163  * Return:
2768  * Pointer to the newly created worker.          2164  * Pointer to the newly created worker.
2769  */                                              2165  */
2770 static struct worker *create_worker(struct wo    2166 static struct worker *create_worker(struct worker_pool *pool)
2771 {                                                2167 {
2772         struct worker *worker;                   2168         struct worker *worker;
2773         int id;                                  2169         int id;
                                                   >> 2170         char id_buf[23];
2774                                                  2171 
2775         /* ID is needed to determine kthread     2172         /* ID is needed to determine kthread name */
2776         id = ida_alloc(&pool->worker_ida, GFP    2173         id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
2777         if (id < 0) {                            2174         if (id < 0) {
2778                 pr_err_once("workqueue: Faile    2175                 pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n",
2779                             ERR_PTR(id));        2176                             ERR_PTR(id));
2780                 return NULL;                     2177                 return NULL;
2781         }                                        2178         }
2782                                                  2179 
2783         worker = alloc_worker(pool->node);       2180         worker = alloc_worker(pool->node);
2784         if (!worker) {                           2181         if (!worker) {
2785                 pr_err_once("workqueue: Faile    2182                 pr_err_once("workqueue: Failed to allocate a worker\n");
2786                 goto fail;                       2183                 goto fail;
2787         }                                        2184         }
2788                                                  2185 
2789         worker->id = id;                         2186         worker->id = id;
2790                                                  2187 
2791         if (!(pool->flags & POOL_BH)) {       !! 2188         if (pool->cpu >= 0)
2792                 char id_buf[WORKER_ID_LEN];   !! 2189                 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
                                                   >> 2190                          pool->attrs->nice < 0  ? "H" : "");
                                                   >> 2191         else
                                                   >> 2192                 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
2793                                                  2193 
2794                 format_worker_id(id_buf, size !! 2194         worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
2795                 worker->task = kthread_create !! 2195                                               "kworker/%s", id_buf);
2796                                               !! 2196         if (IS_ERR(worker->task)) {
2797                 if (IS_ERR(worker->task)) {   !! 2197                 if (PTR_ERR(worker->task) == -EINTR) {
2798                         if (PTR_ERR(worker->t !! 2198                         pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n",
2799                                 pr_err("workq !! 2199                                id_buf);
2800                                        id_buf !! 2200                 } else {
2801                         } else {              !! 2201                         pr_err_once("workqueue: Failed to create a worker thread: %pe",
2802                                 pr_err_once(" !! 2202                                     worker->task);
2803                                             w << 
2804                         }                     << 
2805                         goto fail;            << 
2806                 }                                2203                 }
2807                                               !! 2204                 goto fail;
2808                 set_user_nice(worker->task, p << 
2809                 kthread_bind_mask(worker->tas << 
2810         }                                        2205         }
2811                                                  2206 
                                                   >> 2207         set_user_nice(worker->task, pool->attrs->nice);
                                                   >> 2208         kthread_bind_mask(worker->task, pool_allowed_cpus(pool));
                                                   >> 2209 
2812         /* successful, attach the worker to t    2210         /* successful, attach the worker to the pool */
2813         worker_attach_to_pool(worker, pool);     2211         worker_attach_to_pool(worker, pool);
2814                                                  2212 
2815         /* start the newly created worker */     2213         /* start the newly created worker */
2816         raw_spin_lock_irq(&pool->lock);          2214         raw_spin_lock_irq(&pool->lock);
2817                                                  2215 
2818         worker->pool->nr_workers++;              2216         worker->pool->nr_workers++;
2819         worker_enter_idle(worker);               2217         worker_enter_idle(worker);
                                                   >> 2218         kick_pool(pool);
2820                                                  2219 
2821         /*                                       2220         /*
2822          * @worker is waiting on a completion    2221          * @worker is waiting on a completion in kthread() and will trigger hung
2823          * check if not woken up soon. As kic !! 2222          * check if not woken up soon. As kick_pool() might not have waken it
2824          * wake it up explicitly.             !! 2223          * up, wake it up explicitly once more.
2825          */                                      2224          */
2826         if (worker->task)                     !! 2225         wake_up_process(worker->task);
2827                 wake_up_process(worker->task) << 
2828                                                  2226 
2829         raw_spin_unlock_irq(&pool->lock);        2227         raw_spin_unlock_irq(&pool->lock);
2830                                                  2228 
2831         return worker;                           2229         return worker;
2832                                                  2230 
2833 fail:                                            2231 fail:
2834         ida_free(&pool->worker_ida, id);         2232         ida_free(&pool->worker_ida, id);
2835         kfree(worker);                           2233         kfree(worker);
2836         return NULL;                             2234         return NULL;
2837 }                                                2235 }
2838                                                  2236 
2839 static void detach_dying_workers(struct list_ !! 2237 static void unbind_worker(struct worker *worker)
2840 {                                                2238 {
2841         struct worker *worker;                !! 2239         lockdep_assert_held(&wq_pool_attach_mutex);
2842                                                  2240 
2843         list_for_each_entry(worker, cull_list !! 2241         kthread_set_per_cpu(worker->task, -1);
2844                 detach_worker(worker);        !! 2242         if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
                                                   >> 2243                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
                                                   >> 2244         else
                                                   >> 2245                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
2845 }                                                2246 }
2846                                                  2247 
2847 static void reap_dying_workers(struct list_he !! 2248 static void wake_dying_workers(struct list_head *cull_list)
2848 {                                                2249 {
2849         struct worker *worker, *tmp;             2250         struct worker *worker, *tmp;
2850                                                  2251 
2851         list_for_each_entry_safe(worker, tmp,    2252         list_for_each_entry_safe(worker, tmp, cull_list, entry) {
2852                 list_del_init(&worker->entry)    2253                 list_del_init(&worker->entry);
2853                 kthread_stop_put(worker->task !! 2254                 unbind_worker(worker);
2854                 kfree(worker);                !! 2255                 /*
                                                   >> 2256                  * If the worker was somehow already running, then it had to be
                                                   >> 2257                  * in pool->idle_list when set_worker_dying() happened or we
                                                   >> 2258                  * wouldn't have gotten here.
                                                   >> 2259                  *
                                                   >> 2260                  * Thus, the worker must either have observed the WORKER_DIE
                                                   >> 2261                  * flag, or have set its state to TASK_IDLE. Either way, the
                                                   >> 2262                  * below will be observed by the worker and is safe to do
                                                   >> 2263                  * outside of pool->lock.
                                                   >> 2264                  */
                                                   >> 2265                 wake_up_process(worker->task);
2855         }                                        2266         }
2856 }                                                2267 }
2857                                                  2268 
2858 /**                                              2269 /**
2859  * set_worker_dying - Tag a worker for destru    2270  * set_worker_dying - Tag a worker for destruction
2860  * @worker: worker to be destroyed               2271  * @worker: worker to be destroyed
2861  * @list: transfer worker away from its pool-    2272  * @list: transfer worker away from its pool->idle_list and into list
2862  *                                               2273  *
2863  * Tag @worker for destruction and adjust @po    2274  * Tag @worker for destruction and adjust @pool stats accordingly.  The worker
2864  * should be idle.                               2275  * should be idle.
2865  *                                               2276  *
2866  * CONTEXT:                                      2277  * CONTEXT:
2867  * raw_spin_lock_irq(pool->lock).                2278  * raw_spin_lock_irq(pool->lock).
2868  */                                              2279  */
2869 static void set_worker_dying(struct worker *w    2280 static void set_worker_dying(struct worker *worker, struct list_head *list)
2870 {                                                2281 {
2871         struct worker_pool *pool = worker->po    2282         struct worker_pool *pool = worker->pool;
2872                                                  2283 
2873         lockdep_assert_held(&pool->lock);        2284         lockdep_assert_held(&pool->lock);
2874         lockdep_assert_held(&wq_pool_attach_m    2285         lockdep_assert_held(&wq_pool_attach_mutex);
2875                                                  2286 
2876         /* sanity check frenzy */                2287         /* sanity check frenzy */
2877         if (WARN_ON(worker->current_work) ||     2288         if (WARN_ON(worker->current_work) ||
2878             WARN_ON(!list_empty(&worker->sche    2289             WARN_ON(!list_empty(&worker->scheduled)) ||
2879             WARN_ON(!(worker->flags & WORKER_    2290             WARN_ON(!(worker->flags & WORKER_IDLE)))
2880                 return;                          2291                 return;
2881                                                  2292 
2882         pool->nr_workers--;                      2293         pool->nr_workers--;
2883         pool->nr_idle--;                         2294         pool->nr_idle--;
2884                                                  2295 
2885         worker->flags |= WORKER_DIE;             2296         worker->flags |= WORKER_DIE;
2886                                                  2297 
2887         list_move(&worker->entry, list);         2298         list_move(&worker->entry, list);
2888                                               !! 2299         list_move(&worker->node, &pool->dying_workers);
2889         /* get an extra task struct reference << 
2890         get_task_struct(worker->task);        << 
2891 }                                                2300 }
2892                                                  2301 
2893 /**                                              2302 /**
2894  * idle_worker_timeout - check if some idle w    2303  * idle_worker_timeout - check if some idle workers can now be deleted.
2895  * @t: The pool's idle_timer that just expire    2304  * @t: The pool's idle_timer that just expired
2896  *                                               2305  *
2897  * The timer is armed in worker_enter_idle().    2306  * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in
2898  * worker_leave_idle(), as a worker flicking     2307  * worker_leave_idle(), as a worker flicking between idle and active while its
2899  * pool is at the too_many_workers() tipping     2308  * pool is at the too_many_workers() tipping point would cause too much timer
2900  * housekeeping overhead. Since IDLE_WORKER_T    2309  * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let
2901  * it expire and re-evaluate things from ther    2310  * it expire and re-evaluate things from there.
2902  */                                              2311  */
2903 static void idle_worker_timeout(struct timer_    2312 static void idle_worker_timeout(struct timer_list *t)
2904 {                                                2313 {
2905         struct worker_pool *pool = from_timer    2314         struct worker_pool *pool = from_timer(pool, t, idle_timer);
2906         bool do_cull = false;                    2315         bool do_cull = false;
2907                                                  2316 
2908         if (work_pending(&pool->idle_cull_wor    2317         if (work_pending(&pool->idle_cull_work))
2909                 return;                          2318                 return;
2910                                                  2319 
2911         raw_spin_lock_irq(&pool->lock);          2320         raw_spin_lock_irq(&pool->lock);
2912                                                  2321 
2913         if (too_many_workers(pool)) {            2322         if (too_many_workers(pool)) {
2914                 struct worker *worker;           2323                 struct worker *worker;
2915                 unsigned long expires;           2324                 unsigned long expires;
2916                                                  2325 
2917                 /* idle_list is kept in LIFO     2326                 /* idle_list is kept in LIFO order, check the last one */
2918                 worker = list_last_entry(&poo !! 2327                 worker = list_entry(pool->idle_list.prev, struct worker, entry);
2919                 expires = worker->last_active    2328                 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2920                 do_cull = !time_before(jiffie    2329                 do_cull = !time_before(jiffies, expires);
2921                                                  2330 
2922                 if (!do_cull)                    2331                 if (!do_cull)
2923                         mod_timer(&pool->idle    2332                         mod_timer(&pool->idle_timer, expires);
2924         }                                        2333         }
2925         raw_spin_unlock_irq(&pool->lock);        2334         raw_spin_unlock_irq(&pool->lock);
2926                                                  2335 
2927         if (do_cull)                             2336         if (do_cull)
2928                 queue_work(system_unbound_wq,    2337                 queue_work(system_unbound_wq, &pool->idle_cull_work);
2929 }                                                2338 }
2930                                                  2339 
2931 /**                                              2340 /**
2932  * idle_cull_fn - cull workers that have been    2341  * idle_cull_fn - cull workers that have been idle for too long.
2933  * @work: the pool's work for handling these     2342  * @work: the pool's work for handling these idle workers
2934  *                                               2343  *
2935  * This goes through a pool's idle workers an    2344  * This goes through a pool's idle workers and gets rid of those that have been
2936  * idle for at least IDLE_WORKER_TIMEOUT seco    2345  * idle for at least IDLE_WORKER_TIMEOUT seconds.
2937  *                                               2346  *
2938  * We don't want to disturb isolated CPUs bec    2347  * We don't want to disturb isolated CPUs because of a pcpu kworker being
2939  * culled, so this also resets worker affinit    2348  * culled, so this also resets worker affinity. This requires a sleepable
2940  * context, hence the split between timer cal    2349  * context, hence the split between timer callback and work item.
2941  */                                              2350  */
2942 static void idle_cull_fn(struct work_struct *    2351 static void idle_cull_fn(struct work_struct *work)
2943 {                                                2352 {
2944         struct worker_pool *pool = container_    2353         struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
2945         LIST_HEAD(cull_list);                    2354         LIST_HEAD(cull_list);
2946                                                  2355 
2947         /*                                       2356         /*
2948          * Grabbing wq_pool_attach_mutex here    2357          * Grabbing wq_pool_attach_mutex here ensures an already-running worker
2949          * cannot proceed beyong set_pf_worke !! 2358          * cannot proceed beyong worker_detach_from_pool() in its self-destruct
2950          * This is required as a previously-p !! 2359          * path. This is required as a previously-preempted worker could run after
2951          * set_worker_dying() has happened bu !! 2360          * set_worker_dying() has happened but before wake_dying_workers() did.
2952          */                                      2361          */
2953         mutex_lock(&wq_pool_attach_mutex);       2362         mutex_lock(&wq_pool_attach_mutex);
2954         raw_spin_lock_irq(&pool->lock);          2363         raw_spin_lock_irq(&pool->lock);
2955                                                  2364 
2956         while (too_many_workers(pool)) {         2365         while (too_many_workers(pool)) {
2957                 struct worker *worker;           2366                 struct worker *worker;
2958                 unsigned long expires;           2367                 unsigned long expires;
2959                                                  2368 
2960                 worker = list_last_entry(&poo !! 2369                 worker = list_entry(pool->idle_list.prev, struct worker, entry);
2961                 expires = worker->last_active    2370                 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2962                                                  2371 
2963                 if (time_before(jiffies, expi    2372                 if (time_before(jiffies, expires)) {
2964                         mod_timer(&pool->idle    2373                         mod_timer(&pool->idle_timer, expires);
2965                         break;                   2374                         break;
2966                 }                                2375                 }
2967                                                  2376 
2968                 set_worker_dying(worker, &cul    2377                 set_worker_dying(worker, &cull_list);
2969         }                                        2378         }
2970                                                  2379 
2971         raw_spin_unlock_irq(&pool->lock);        2380         raw_spin_unlock_irq(&pool->lock);
2972         detach_dying_workers(&cull_list);     !! 2381         wake_dying_workers(&cull_list);
2973         mutex_unlock(&wq_pool_attach_mutex);     2382         mutex_unlock(&wq_pool_attach_mutex);
2974                                               << 
2975         reap_dying_workers(&cull_list);       << 
2976 }                                                2383 }
2977                                                  2384 
2978 static void send_mayday(struct work_struct *w    2385 static void send_mayday(struct work_struct *work)
2979 {                                                2386 {
2980         struct pool_workqueue *pwq = get_work    2387         struct pool_workqueue *pwq = get_work_pwq(work);
2981         struct workqueue_struct *wq = pwq->wq    2388         struct workqueue_struct *wq = pwq->wq;
2982                                                  2389 
2983         lockdep_assert_held(&wq_mayday_lock);    2390         lockdep_assert_held(&wq_mayday_lock);
2984                                                  2391 
2985         if (!wq->rescuer)                        2392         if (!wq->rescuer)
2986                 return;                          2393                 return;
2987                                                  2394 
2988         /* mayday mayday mayday */               2395         /* mayday mayday mayday */
2989         if (list_empty(&pwq->mayday_node)) {     2396         if (list_empty(&pwq->mayday_node)) {
2990                 /*                               2397                 /*
2991                  * If @pwq is for an unbound     2398                  * If @pwq is for an unbound wq, its base ref may be put at
2992                  * any time due to an attribu    2399                  * any time due to an attribute change.  Pin @pwq until the
2993                  * rescuer is done with it.      2400                  * rescuer is done with it.
2994                  */                              2401                  */
2995                 get_pwq(pwq);                    2402                 get_pwq(pwq);
2996                 list_add_tail(&pwq->mayday_no    2403                 list_add_tail(&pwq->mayday_node, &wq->maydays);
2997                 wake_up_process(wq->rescuer->    2404                 wake_up_process(wq->rescuer->task);
2998                 pwq->stats[PWQ_STAT_MAYDAY]++    2405                 pwq->stats[PWQ_STAT_MAYDAY]++;
2999         }                                        2406         }
3000 }                                                2407 }
3001                                                  2408 
3002 static void pool_mayday_timeout(struct timer_    2409 static void pool_mayday_timeout(struct timer_list *t)
3003 {                                                2410 {
3004         struct worker_pool *pool = from_timer    2411         struct worker_pool *pool = from_timer(pool, t, mayday_timer);
3005         struct work_struct *work;                2412         struct work_struct *work;
3006                                                  2413 
3007         raw_spin_lock_irq(&pool->lock);          2414         raw_spin_lock_irq(&pool->lock);
3008         raw_spin_lock(&wq_mayday_lock);          2415         raw_spin_lock(&wq_mayday_lock);         /* for wq->maydays */
3009                                                  2416 
3010         if (need_to_create_worker(pool)) {       2417         if (need_to_create_worker(pool)) {
3011                 /*                               2418                 /*
3012                  * We've been trying to creat    2419                  * We've been trying to create a new worker but
3013                  * haven't been successful.      2420                  * haven't been successful.  We might be hitting an
3014                  * allocation deadlock.  Send    2421                  * allocation deadlock.  Send distress signals to
3015                  * rescuers.                     2422                  * rescuers.
3016                  */                              2423                  */
3017                 list_for_each_entry(work, &po    2424                 list_for_each_entry(work, &pool->worklist, entry)
3018                         send_mayday(work);       2425                         send_mayday(work);
3019         }                                        2426         }
3020                                                  2427 
3021         raw_spin_unlock(&wq_mayday_lock);        2428         raw_spin_unlock(&wq_mayday_lock);
3022         raw_spin_unlock_irq(&pool->lock);        2429         raw_spin_unlock_irq(&pool->lock);
3023                                                  2430 
3024         mod_timer(&pool->mayday_timer, jiffie    2431         mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
3025 }                                                2432 }
3026                                                  2433 
3027 /**                                              2434 /**
3028  * maybe_create_worker - create a new worker     2435  * maybe_create_worker - create a new worker if necessary
3029  * @pool: pool to create a new worker for        2436  * @pool: pool to create a new worker for
3030  *                                               2437  *
3031  * Create a new worker for @pool if necessary    2438  * Create a new worker for @pool if necessary.  @pool is guaranteed to
3032  * have at least one idle worker on return fr    2439  * have at least one idle worker on return from this function.  If
3033  * creating a new worker takes longer than MA    2440  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
3034  * sent to all rescuers with works scheduled     2441  * sent to all rescuers with works scheduled on @pool to resolve
3035  * possible allocation deadlock.                 2442  * possible allocation deadlock.
3036  *                                               2443  *
3037  * On return, need_to_create_worker() is guar    2444  * On return, need_to_create_worker() is guaranteed to be %false and
3038  * may_start_working() %true.                    2445  * may_start_working() %true.
3039  *                                               2446  *
3040  * LOCKING:                                      2447  * LOCKING:
3041  * raw_spin_lock_irq(pool->lock) which may be    2448  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
3042  * multiple times.  Does GFP_KERNEL allocatio    2449  * multiple times.  Does GFP_KERNEL allocations.  Called only from
3043  * manager.                                      2450  * manager.
3044  */                                              2451  */
3045 static void maybe_create_worker(struct worker    2452 static void maybe_create_worker(struct worker_pool *pool)
3046 __releases(&pool->lock)                          2453 __releases(&pool->lock)
3047 __acquires(&pool->lock)                          2454 __acquires(&pool->lock)
3048 {                                                2455 {
3049 restart:                                         2456 restart:
3050         raw_spin_unlock_irq(&pool->lock);        2457         raw_spin_unlock_irq(&pool->lock);
3051                                                  2458 
3052         /* if we don't make progress in MAYDA    2459         /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
3053         mod_timer(&pool->mayday_timer, jiffie    2460         mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
3054                                                  2461 
3055         while (true) {                           2462         while (true) {
3056                 if (create_worker(pool) || !n    2463                 if (create_worker(pool) || !need_to_create_worker(pool))
3057                         break;                   2464                         break;
3058                                                  2465 
3059                 schedule_timeout_interruptibl    2466                 schedule_timeout_interruptible(CREATE_COOLDOWN);
3060                                                  2467 
3061                 if (!need_to_create_worker(po    2468                 if (!need_to_create_worker(pool))
3062                         break;                   2469                         break;
3063         }                                        2470         }
3064                                                  2471 
3065         del_timer_sync(&pool->mayday_timer);     2472         del_timer_sync(&pool->mayday_timer);
3066         raw_spin_lock_irq(&pool->lock);          2473         raw_spin_lock_irq(&pool->lock);
3067         /*                                       2474         /*
3068          * This is necessary even after a new    2475          * This is necessary even after a new worker was just successfully
3069          * created as @pool->lock was dropped    2476          * created as @pool->lock was dropped and the new worker might have
3070          * already become busy.                  2477          * already become busy.
3071          */                                      2478          */
3072         if (need_to_create_worker(pool))         2479         if (need_to_create_worker(pool))
3073                 goto restart;                    2480                 goto restart;
3074 }                                                2481 }
3075                                                  2482 
3076 /**                                              2483 /**
3077  * manage_workers - manage worker pool           2484  * manage_workers - manage worker pool
3078  * @worker: self                                 2485  * @worker: self
3079  *                                               2486  *
3080  * Assume the manager role and manage the wor    2487  * Assume the manager role and manage the worker pool @worker belongs
3081  * to.  At any given time, there can be only     2488  * to.  At any given time, there can be only zero or one manager per
3082  * pool.  The exclusion is handled automatica    2489  * pool.  The exclusion is handled automatically by this function.
3083  *                                               2490  *
3084  * The caller can safely start processing wor    2491  * The caller can safely start processing works on false return.  On
3085  * true return, it's guaranteed that need_to_    2492  * true return, it's guaranteed that need_to_create_worker() is false
3086  * and may_start_working() is true.              2493  * and may_start_working() is true.
3087  *                                               2494  *
3088  * CONTEXT:                                      2495  * CONTEXT:
3089  * raw_spin_lock_irq(pool->lock) which may be    2496  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
3090  * multiple times.  Does GFP_KERNEL allocatio    2497  * multiple times.  Does GFP_KERNEL allocations.
3091  *                                               2498  *
3092  * Return:                                       2499  * Return:
3093  * %false if the pool doesn't need management    2500  * %false if the pool doesn't need management and the caller can safely
3094  * start processing works, %true if managemen    2501  * start processing works, %true if management function was performed and
3095  * the conditions that the caller verified be    2502  * the conditions that the caller verified before calling the function may
3096  * no longer be true.                            2503  * no longer be true.
3097  */                                              2504  */
3098 static bool manage_workers(struct worker *wor    2505 static bool manage_workers(struct worker *worker)
3099 {                                                2506 {
3100         struct worker_pool *pool = worker->po    2507         struct worker_pool *pool = worker->pool;
3101                                                  2508 
3102         if (pool->flags & POOL_MANAGER_ACTIVE    2509         if (pool->flags & POOL_MANAGER_ACTIVE)
3103                 return false;                    2510                 return false;
3104                                                  2511 
3105         pool->flags |= POOL_MANAGER_ACTIVE;      2512         pool->flags |= POOL_MANAGER_ACTIVE;
3106         pool->manager = worker;                  2513         pool->manager = worker;
3107                                                  2514 
3108         maybe_create_worker(pool);               2515         maybe_create_worker(pool);
3109                                                  2516 
3110         pool->manager = NULL;                    2517         pool->manager = NULL;
3111         pool->flags &= ~POOL_MANAGER_ACTIVE;     2518         pool->flags &= ~POOL_MANAGER_ACTIVE;
3112         rcuwait_wake_up(&manager_wait);          2519         rcuwait_wake_up(&manager_wait);
3113         return true;                             2520         return true;
3114 }                                                2521 }
3115                                                  2522 
3116 /**                                              2523 /**
3117  * process_one_work - process single work        2524  * process_one_work - process single work
3118  * @worker: self                                 2525  * @worker: self
3119  * @work: work to process                        2526  * @work: work to process
3120  *                                               2527  *
3121  * Process @work.  This function contains all    2528  * Process @work.  This function contains all the logics necessary to
3122  * process a single work including synchroniz    2529  * process a single work including synchronization against and
3123  * interaction with other workers on the same    2530  * interaction with other workers on the same cpu, queueing and
3124  * flushing.  As long as context requirement     2531  * flushing.  As long as context requirement is met, any worker can
3125  * call this function to process a work.         2532  * call this function to process a work.
3126  *                                               2533  *
3127  * CONTEXT:                                      2534  * CONTEXT:
3128  * raw_spin_lock_irq(pool->lock) which is rel    2535  * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
3129  */                                              2536  */
3130 static void process_one_work(struct worker *w    2537 static void process_one_work(struct worker *worker, struct work_struct *work)
3131 __releases(&pool->lock)                          2538 __releases(&pool->lock)
3132 __acquires(&pool->lock)                          2539 __acquires(&pool->lock)
3133 {                                                2540 {
3134         struct pool_workqueue *pwq = get_work    2541         struct pool_workqueue *pwq = get_work_pwq(work);
3135         struct worker_pool *pool = worker->po    2542         struct worker_pool *pool = worker->pool;
3136         unsigned long work_data;                 2543         unsigned long work_data;
3137         int lockdep_start_depth, rcu_start_de << 
3138         bool bh_draining = pool->flags & POOL << 
3139 #ifdef CONFIG_LOCKDEP                            2544 #ifdef CONFIG_LOCKDEP
3140         /*                                       2545         /*
3141          * It is permissible to free the stru    2546          * It is permissible to free the struct work_struct from
3142          * inside the function that is called    2547          * inside the function that is called from it, this we need to
3143          * take into account for lockdep too.    2548          * take into account for lockdep too.  To avoid bogus "held
3144          * lock freed" warnings as well as pr    2549          * lock freed" warnings as well as problems when looking into
3145          * work->lockdep_map, make a copy and    2550          * work->lockdep_map, make a copy and use that here.
3146          */                                      2551          */
3147         struct lockdep_map lockdep_map;          2552         struct lockdep_map lockdep_map;
3148                                                  2553 
3149         lockdep_copy_map(&lockdep_map, &work-    2554         lockdep_copy_map(&lockdep_map, &work->lockdep_map);
3150 #endif                                           2555 #endif
3151         /* ensure we're on the correct CPU */    2556         /* ensure we're on the correct CPU */
3152         WARN_ON_ONCE(!(pool->flags & POOL_DIS    2557         WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
3153                      raw_smp_processor_id() !    2558                      raw_smp_processor_id() != pool->cpu);
3154                                                  2559 
3155         /* claim and dequeue */                  2560         /* claim and dequeue */
3156         debug_work_deactivate(work);             2561         debug_work_deactivate(work);
3157         hash_add(pool->busy_hash, &worker->he    2562         hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
3158         worker->current_work = work;             2563         worker->current_work = work;
3159         worker->current_func = work->func;       2564         worker->current_func = work->func;
3160         worker->current_pwq = pwq;               2565         worker->current_pwq = pwq;
3161         if (worker->task)                     !! 2566         worker->current_at = worker->task->se.sum_exec_runtime;
3162                 worker->current_at = worker-> << 
3163         work_data = *work_data_bits(work);       2567         work_data = *work_data_bits(work);
3164         worker->current_color = get_work_colo    2568         worker->current_color = get_work_color(work_data);
3165                                                  2569 
3166         /*                                       2570         /*
3167          * Record wq name for cmdline and deb    2571          * Record wq name for cmdline and debug reporting, may get
3168          * overridden through set_worker_desc    2572          * overridden through set_worker_desc().
3169          */                                      2573          */
3170         strscpy(worker->desc, pwq->wq->name,     2574         strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
3171                                                  2575 
3172         list_del_init(&work->entry);             2576         list_del_init(&work->entry);
3173                                                  2577 
3174         /*                                       2578         /*
3175          * CPU intensive works don't particip    2579          * CPU intensive works don't participate in concurrency management.
3176          * They're the scheduler's responsibi    2580          * They're the scheduler's responsibility.  This takes @worker out
3177          * of concurrency management and the     2581          * of concurrency management and the next code block will chain
3178          * execution of the pending work item    2582          * execution of the pending work items.
3179          */                                      2583          */
3180         if (unlikely(pwq->wq->flags & WQ_CPU_    2584         if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE))
3181                 worker_set_flags(worker, WORK    2585                 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
3182                                                  2586 
3183         /*                                       2587         /*
3184          * Kick @pool if necessary. It's alwa    2588          * Kick @pool if necessary. It's always noop for per-cpu worker pools
3185          * since nr_running would always be >    2589          * since nr_running would always be >= 1 at this point. This is used to
3186          * chain execution of the pending wor    2590          * chain execution of the pending work items for WORKER_NOT_RUNNING
3187          * workers such as the UNBOUND and CP    2591          * workers such as the UNBOUND and CPU_INTENSIVE ones.
3188          */                                      2592          */
3189         kick_pool(pool);                         2593         kick_pool(pool);
3190                                                  2594 
3191         /*                                       2595         /*
3192          * Record the last pool and clear PEN    2596          * Record the last pool and clear PENDING which should be the last
3193          * update to @work.  Also, do this in    2597          * update to @work.  Also, do this inside @pool->lock so that
3194          * PENDING and queued state changes h    2598          * PENDING and queued state changes happen together while IRQ is
3195          * disabled.                             2599          * disabled.
3196          */                                      2600          */
3197         set_work_pool_and_clear_pending(work, !! 2601         set_work_pool_and_clear_pending(work, pool->id);
3198                                                  2602 
3199         pwq->stats[PWQ_STAT_STARTED]++;          2603         pwq->stats[PWQ_STAT_STARTED]++;
3200         raw_spin_unlock_irq(&pool->lock);        2604         raw_spin_unlock_irq(&pool->lock);
3201                                                  2605 
3202         rcu_start_depth = rcu_preempt_depth() !! 2606         lock_map_acquire(&pwq->wq->lockdep_map);
3203         lockdep_start_depth = lockdep_depth(c << 
3204         /* see drain_dead_softirq_workfn() */ << 
3205         if (!bh_draining)                     << 
3206                 lock_map_acquire(&pwq->wq->lo << 
3207         lock_map_acquire(&lockdep_map);          2607         lock_map_acquire(&lockdep_map);
3208         /*                                       2608         /*
3209          * Strictly speaking we should mark t    2609          * Strictly speaking we should mark the invariant state without holding
3210          * any locks, that is, before these t    2610          * any locks, that is, before these two lock_map_acquire()'s.
3211          *                                       2611          *
3212          * However, that would result in:        2612          * However, that would result in:
3213          *                                       2613          *
3214          *   A(W1)                               2614          *   A(W1)
3215          *   WFC(C)                              2615          *   WFC(C)
3216          *              A(W1)                    2616          *              A(W1)
3217          *              C(C)                     2617          *              C(C)
3218          *                                       2618          *
3219          * Which would create W1->C->W1 depen    2619          * Which would create W1->C->W1 dependencies, even though there is no
3220          * actual deadlock possible. There ar    2620          * actual deadlock possible. There are two solutions, using a
3221          * read-recursive acquire on the work    2621          * read-recursive acquire on the work(queue) 'locks', but this will then
3222          * hit the lockdep limitation on recu    2622          * hit the lockdep limitation on recursive locks, or simply discard
3223          * these locks.                          2623          * these locks.
3224          *                                       2624          *
3225          * AFAICT there is no possible deadlo    2625          * AFAICT there is no possible deadlock scenario between the
3226          * flush_work() and complete() primit    2626          * flush_work() and complete() primitives (except for single-threaded
3227          * workqueues), so hiding them isn't     2627          * workqueues), so hiding them isn't a problem.
3228          */                                      2628          */
3229         lockdep_invariant_state(true);           2629         lockdep_invariant_state(true);
3230         trace_workqueue_execute_start(work);     2630         trace_workqueue_execute_start(work);
3231         worker->current_func(work);              2631         worker->current_func(work);
3232         /*                                       2632         /*
3233          * While we must be careful to not us    2633          * While we must be careful to not use "work" after this, the trace
3234          * point will only record its address    2634          * point will only record its address.
3235          */                                      2635          */
3236         trace_workqueue_execute_end(work, wor    2636         trace_workqueue_execute_end(work, worker->current_func);
3237         pwq->stats[PWQ_STAT_COMPLETED]++;        2637         pwq->stats[PWQ_STAT_COMPLETED]++;
3238         lock_map_release(&lockdep_map);          2638         lock_map_release(&lockdep_map);
3239         if (!bh_draining)                     !! 2639         lock_map_release(&pwq->wq->lockdep_map);
3240                 lock_map_release(&pwq->wq->lo << 
3241                                                  2640 
3242         if (unlikely((worker->task && in_atom !! 2641         if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
3243                      lockdep_depth(current) ! !! 2642                 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
3244                      rcu_preempt_depth() != r !! 2643                        "     last function: %ps\n",
3245                 pr_err("BUG: workqueue leaked !! 2644                        current->comm, preempt_count(), task_pid_nr(current),
3246                        "     preempt=0x%08x l << 
3247                        current->comm, task_pi << 
3248                        lockdep_start_depth, l << 
3249                        rcu_start_depth, rcu_p << 
3250                        worker->current_func);    2645                        worker->current_func);
3251                 debug_show_held_locks(current    2646                 debug_show_held_locks(current);
3252                 dump_stack();                    2647                 dump_stack();
3253         }                                        2648         }
3254                                                  2649 
3255         /*                                       2650         /*
3256          * The following prevents a kworker f    2651          * The following prevents a kworker from hogging CPU on !PREEMPTION
3257          * kernels, where a requeueing work i    2652          * kernels, where a requeueing work item waiting for something to
3258          * happen could deadlock with stop_ma    2653          * happen could deadlock with stop_machine as such work item could
3259          * indefinitely requeue itself while     2654          * indefinitely requeue itself while all other CPUs are trapped in
3260          * stop_machine. At the same time, re    2655          * stop_machine. At the same time, report a quiescent RCU state so
3261          * the same condition doesn't freeze     2656          * the same condition doesn't freeze RCU.
3262          */                                      2657          */
3263         if (worker->task)                     !! 2658         cond_resched();
3264                 cond_resched();               << 
3265                                                  2659 
3266         raw_spin_lock_irq(&pool->lock);          2660         raw_spin_lock_irq(&pool->lock);
3267                                                  2661 
3268         /*                                       2662         /*
3269          * In addition to %WQ_CPU_INTENSIVE,     2663          * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked
3270          * CPU intensive by wq_worker_tick()     2664          * CPU intensive by wq_worker_tick() if @work hogged CPU longer than
3271          * wq_cpu_intensive_thresh_us. Clear     2665          * wq_cpu_intensive_thresh_us. Clear it.
3272          */                                      2666          */
3273         worker_clr_flags(worker, WORKER_CPU_I    2667         worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
3274                                                  2668 
3275         /* tag the worker for identification     2669         /* tag the worker for identification in schedule() */
3276         worker->last_func = worker->current_f    2670         worker->last_func = worker->current_func;
3277                                                  2671 
3278         /* we're done with it, release */        2672         /* we're done with it, release */
3279         hash_del(&worker->hentry);               2673         hash_del(&worker->hentry);
3280         worker->current_work = NULL;             2674         worker->current_work = NULL;
3281         worker->current_func = NULL;             2675         worker->current_func = NULL;
3282         worker->current_pwq = NULL;              2676         worker->current_pwq = NULL;
3283         worker->current_color = INT_MAX;         2677         worker->current_color = INT_MAX;
3284                                               << 
3285         /* must be the last step, see the fun << 
3286         pwq_dec_nr_in_flight(pwq, work_data);    2678         pwq_dec_nr_in_flight(pwq, work_data);
3287 }                                                2679 }
3288                                                  2680 
3289 /**                                              2681 /**
3290  * process_scheduled_works - process schedule    2682  * process_scheduled_works - process scheduled works
3291  * @worker: self                                 2683  * @worker: self
3292  *                                               2684  *
3293  * Process all scheduled works.  Please note     2685  * Process all scheduled works.  Please note that the scheduled list
3294  * may change while processing a work, so thi    2686  * may change while processing a work, so this function repeatedly
3295  * fetches a work from the top and executes i    2687  * fetches a work from the top and executes it.
3296  *                                               2688  *
3297  * CONTEXT:                                      2689  * CONTEXT:
3298  * raw_spin_lock_irq(pool->lock) which may be    2690  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
3299  * multiple times.                               2691  * multiple times.
3300  */                                              2692  */
3301 static void process_scheduled_works(struct wo    2693 static void process_scheduled_works(struct worker *worker)
3302 {                                                2694 {
3303         struct work_struct *work;                2695         struct work_struct *work;
3304         bool first = true;                       2696         bool first = true;
3305                                                  2697 
3306         while ((work = list_first_entry_or_nu    2698         while ((work = list_first_entry_or_null(&worker->scheduled,
3307                                                  2699                                                 struct work_struct, entry))) {
3308                 if (first) {                     2700                 if (first) {
3309                         worker->pool->watchdo    2701                         worker->pool->watchdog_ts = jiffies;
3310                         first = false;           2702                         first = false;
3311                 }                                2703                 }
3312                 process_one_work(worker, work    2704                 process_one_work(worker, work);
3313         }                                        2705         }
3314 }                                                2706 }
3315                                                  2707 
3316 static void set_pf_worker(bool val)              2708 static void set_pf_worker(bool val)
3317 {                                                2709 {
3318         mutex_lock(&wq_pool_attach_mutex);       2710         mutex_lock(&wq_pool_attach_mutex);
3319         if (val)                                 2711         if (val)
3320                 current->flags |= PF_WQ_WORKE    2712                 current->flags |= PF_WQ_WORKER;
3321         else                                     2713         else
3322                 current->flags &= ~PF_WQ_WORK    2714                 current->flags &= ~PF_WQ_WORKER;
3323         mutex_unlock(&wq_pool_attach_mutex);     2715         mutex_unlock(&wq_pool_attach_mutex);
3324 }                                                2716 }
3325                                                  2717 
3326 /**                                              2718 /**
3327  * worker_thread - the worker thread function    2719  * worker_thread - the worker thread function
3328  * @__worker: self                               2720  * @__worker: self
3329  *                                               2721  *
3330  * The worker thread function.  All workers b    2722  * The worker thread function.  All workers belong to a worker_pool -
3331  * either a per-cpu one or dynamic unbound on    2723  * either a per-cpu one or dynamic unbound one.  These workers process all
3332  * work items regardless of their specific ta    2724  * work items regardless of their specific target workqueue.  The only
3333  * exception is work items which belong to wo    2725  * exception is work items which belong to workqueues with a rescuer which
3334  * will be explained in rescuer_thread().        2726  * will be explained in rescuer_thread().
3335  *                                               2727  *
3336  * Return: 0                                     2728  * Return: 0
3337  */                                              2729  */
3338 static int worker_thread(void *__worker)         2730 static int worker_thread(void *__worker)
3339 {                                                2731 {
3340         struct worker *worker = __worker;        2732         struct worker *worker = __worker;
3341         struct worker_pool *pool = worker->po    2733         struct worker_pool *pool = worker->pool;
3342                                                  2734 
3343         /* tell the scheduler that this is a     2735         /* tell the scheduler that this is a workqueue worker */
3344         set_pf_worker(true);                     2736         set_pf_worker(true);
3345 woke_up:                                         2737 woke_up:
3346         raw_spin_lock_irq(&pool->lock);          2738         raw_spin_lock_irq(&pool->lock);
3347                                                  2739 
3348         /* am I supposed to die? */              2740         /* am I supposed to die? */
3349         if (unlikely(worker->flags & WORKER_D    2741         if (unlikely(worker->flags & WORKER_DIE)) {
3350                 raw_spin_unlock_irq(&pool->lo    2742                 raw_spin_unlock_irq(&pool->lock);
3351                 set_pf_worker(false);            2743                 set_pf_worker(false);
3352                 /*                            !! 2744 
3353                  * The worker is dead and PF_ !! 2745                 set_task_comm(worker->task, "kworker/dying");
3354                  * shouldn't be accessed, res << 
3355                  */                           << 
3356                 worker->pool = NULL;          << 
3357                 ida_free(&pool->worker_ida, w    2746                 ida_free(&pool->worker_ida, worker->id);
                                                   >> 2747                 worker_detach_from_pool(worker);
                                                   >> 2748                 WARN_ON_ONCE(!list_empty(&worker->entry));
                                                   >> 2749                 kfree(worker);
3358                 return 0;                        2750                 return 0;
3359         }                                        2751         }
3360                                                  2752 
3361         worker_leave_idle(worker);               2753         worker_leave_idle(worker);
3362 recheck:                                         2754 recheck:
3363         /* no more worker necessary? */          2755         /* no more worker necessary? */
3364         if (!need_more_worker(pool))             2756         if (!need_more_worker(pool))
3365                 goto sleep;                      2757                 goto sleep;
3366                                                  2758 
3367         /* do we need to manage? */              2759         /* do we need to manage? */
3368         if (unlikely(!may_start_working(pool)    2760         if (unlikely(!may_start_working(pool)) && manage_workers(worker))
3369                 goto recheck;                    2761                 goto recheck;
3370                                                  2762 
3371         /*                                       2763         /*
3372          * ->scheduled list can only be fille    2764          * ->scheduled list can only be filled while a worker is
3373          * preparing to process a work or act    2765          * preparing to process a work or actually processing it.
3374          * Make sure nobody diddled with it w    2766          * Make sure nobody diddled with it while I was sleeping.
3375          */                                      2767          */
3376         WARN_ON_ONCE(!list_empty(&worker->sch    2768         WARN_ON_ONCE(!list_empty(&worker->scheduled));
3377                                                  2769 
3378         /*                                       2770         /*
3379          * Finish PREP stage.  We're guarante    2771          * Finish PREP stage.  We're guaranteed to have at least one idle
3380          * worker or that someone else has al    2772          * worker or that someone else has already assumed the manager
3381          * role.  This is where @worker start    2773          * role.  This is where @worker starts participating in concurrency
3382          * management if applicable and concu    2774          * management if applicable and concurrency management is restored
3383          * after being rebound.  See rebind_w    2775          * after being rebound.  See rebind_workers() for details.
3384          */                                      2776          */
3385         worker_clr_flags(worker, WORKER_PREP     2777         worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
3386                                                  2778 
3387         do {                                     2779         do {
3388                 struct work_struct *work =       2780                 struct work_struct *work =
3389                         list_first_entry(&poo    2781                         list_first_entry(&pool->worklist,
3390                                          stru    2782                                          struct work_struct, entry);
3391                                                  2783 
3392                 if (assign_work(work, worker,    2784                 if (assign_work(work, worker, NULL))
3393                         process_scheduled_wor    2785                         process_scheduled_works(worker);
3394         } while (keep_working(pool));            2786         } while (keep_working(pool));
3395                                                  2787 
3396         worker_set_flags(worker, WORKER_PREP)    2788         worker_set_flags(worker, WORKER_PREP);
3397 sleep:                                           2789 sleep:
3398         /*                                       2790         /*
3399          * pool->lock is held and there's no     2791          * pool->lock is held and there's no work to process and no need to
3400          * manage, sleep.  Workers are woken     2792          * manage, sleep.  Workers are woken up only while holding
3401          * pool->lock or from local cpu, so s    2793          * pool->lock or from local cpu, so setting the current state
3402          * before releasing pool->lock is eno    2794          * before releasing pool->lock is enough to prevent losing any
3403          * event.                                2795          * event.
3404          */                                      2796          */
3405         worker_enter_idle(worker);               2797         worker_enter_idle(worker);
3406         __set_current_state(TASK_IDLE);          2798         __set_current_state(TASK_IDLE);
3407         raw_spin_unlock_irq(&pool->lock);        2799         raw_spin_unlock_irq(&pool->lock);
3408         schedule();                              2800         schedule();
3409         goto woke_up;                            2801         goto woke_up;
3410 }                                                2802 }
3411                                                  2803 
3412 /**                                              2804 /**
3413  * rescuer_thread - the rescuer thread functi    2805  * rescuer_thread - the rescuer thread function
3414  * @__rescuer: self                              2806  * @__rescuer: self
3415  *                                               2807  *
3416  * Workqueue rescuer thread function.  There'    2808  * Workqueue rescuer thread function.  There's one rescuer for each
3417  * workqueue which has WQ_MEM_RECLAIM set.       2809  * workqueue which has WQ_MEM_RECLAIM set.
3418  *                                               2810  *
3419  * Regular work processing on a pool may bloc    2811  * Regular work processing on a pool may block trying to create a new
3420  * worker which uses GFP_KERNEL allocation wh    2812  * worker which uses GFP_KERNEL allocation which has slight chance of
3421  * developing into deadlock if some works cur    2813  * developing into deadlock if some works currently on the same queue
3422  * need to be processed to satisfy the GFP_KE    2814  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
3423  * the problem rescuer solves.                   2815  * the problem rescuer solves.
3424  *                                               2816  *
3425  * When such condition is possible, the pool     2817  * When such condition is possible, the pool summons rescuers of all
3426  * workqueues which have works queued on the     2818  * workqueues which have works queued on the pool and let them process
3427  * those works so that forward progress can b    2819  * those works so that forward progress can be guaranteed.
3428  *                                               2820  *
3429  * This should happen rarely.                    2821  * This should happen rarely.
3430  *                                               2822  *
3431  * Return: 0                                     2823  * Return: 0
3432  */                                              2824  */
3433 static int rescuer_thread(void *__rescuer)       2825 static int rescuer_thread(void *__rescuer)
3434 {                                                2826 {
3435         struct worker *rescuer = __rescuer;      2827         struct worker *rescuer = __rescuer;
3436         struct workqueue_struct *wq = rescuer    2828         struct workqueue_struct *wq = rescuer->rescue_wq;
3437         bool should_stop;                        2829         bool should_stop;
3438                                                  2830 
3439         set_user_nice(current, RESCUER_NICE_L    2831         set_user_nice(current, RESCUER_NICE_LEVEL);
3440                                                  2832 
3441         /*                                       2833         /*
3442          * Mark rescuer as worker too.  As WO    2834          * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
3443          * doesn't participate in concurrency    2835          * doesn't participate in concurrency management.
3444          */                                      2836          */
3445         set_pf_worker(true);                     2837         set_pf_worker(true);
3446 repeat:                                          2838 repeat:
3447         set_current_state(TASK_IDLE);            2839         set_current_state(TASK_IDLE);
3448                                                  2840 
3449         /*                                       2841         /*
3450          * By the time the rescuer is request    2842          * By the time the rescuer is requested to stop, the workqueue
3451          * shouldn't have any work pending, b    2843          * shouldn't have any work pending, but @wq->maydays may still have
3452          * pwq(s) queued.  This can happen by    2844          * pwq(s) queued.  This can happen by non-rescuer workers consuming
3453          * all the work items before the resc    2845          * all the work items before the rescuer got to them.  Go through
3454          * @wq->maydays processing before act    2846          * @wq->maydays processing before acting on should_stop so that the
3455          * list is always empty on exit.         2847          * list is always empty on exit.
3456          */                                      2848          */
3457         should_stop = kthread_should_stop();     2849         should_stop = kthread_should_stop();
3458                                                  2850 
3459         /* see whether any pwq is asking for     2851         /* see whether any pwq is asking for help */
3460         raw_spin_lock_irq(&wq_mayday_lock);      2852         raw_spin_lock_irq(&wq_mayday_lock);
3461                                                  2853 
3462         while (!list_empty(&wq->maydays)) {      2854         while (!list_empty(&wq->maydays)) {
3463                 struct pool_workqueue *pwq =     2855                 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
3464                                         struc    2856                                         struct pool_workqueue, mayday_node);
3465                 struct worker_pool *pool = pw    2857                 struct worker_pool *pool = pwq->pool;
3466                 struct work_struct *work, *n;    2858                 struct work_struct *work, *n;
3467                                                  2859 
3468                 __set_current_state(TASK_RUNN    2860                 __set_current_state(TASK_RUNNING);
3469                 list_del_init(&pwq->mayday_no    2861                 list_del_init(&pwq->mayday_node);
3470                                                  2862 
3471                 raw_spin_unlock_irq(&wq_mayda    2863                 raw_spin_unlock_irq(&wq_mayday_lock);
3472                                                  2864 
3473                 worker_attach_to_pool(rescuer    2865                 worker_attach_to_pool(rescuer, pool);
3474                                                  2866 
3475                 raw_spin_lock_irq(&pool->lock    2867                 raw_spin_lock_irq(&pool->lock);
3476                                                  2868 
3477                 /*                               2869                 /*
3478                  * Slurp in all works issued     2870                  * Slurp in all works issued via this workqueue and
3479                  * process'em.                   2871                  * process'em.
3480                  */                              2872                  */
3481                 WARN_ON_ONCE(!list_empty(&res    2873                 WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
3482                 list_for_each_entry_safe(work    2874                 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
3483                         if (get_work_pwq(work    2875                         if (get_work_pwq(work) == pwq &&
3484                             assign_work(work,    2876                             assign_work(work, rescuer, &n))
3485                                 pwq->stats[PW    2877                                 pwq->stats[PWQ_STAT_RESCUED]++;
3486                 }                                2878                 }
3487                                                  2879 
3488                 if (!list_empty(&rescuer->sch    2880                 if (!list_empty(&rescuer->scheduled)) {
3489                         process_scheduled_wor    2881                         process_scheduled_works(rescuer);
3490                                                  2882 
3491                         /*                       2883                         /*
3492                          * The above executio    2884                          * The above execution of rescued work items could
3493                          * have created more     2885                          * have created more to rescue through
3494                          * pwq_activate_first    2886                          * pwq_activate_first_inactive() or chained
3495                          * queueing.  Let's p    2887                          * queueing.  Let's put @pwq back on mayday list so
3496                          * that such back-to-    2888                          * that such back-to-back work items, which may be
3497                          * being used to reli    2889                          * being used to relieve memory pressure, don't
3498                          * incur MAYDAY_INTER    2890                          * incur MAYDAY_INTERVAL delay inbetween.
3499                          */                      2891                          */
3500                         if (pwq->nr_active &&    2892                         if (pwq->nr_active && need_to_create_worker(pool)) {
3501                                 raw_spin_lock    2893                                 raw_spin_lock(&wq_mayday_lock);
3502                                 /*               2894                                 /*
3503                                  * Queue iff     2895                                  * Queue iff we aren't racing destruction
3504                                  * and somebo    2896                                  * and somebody else hasn't queued it already.
3505                                  */              2897                                  */
3506                                 if (wq->rescu    2898                                 if (wq->rescuer && list_empty(&pwq->mayday_node)) {
3507                                         get_p    2899                                         get_pwq(pwq);
3508                                         list_    2900                                         list_add_tail(&pwq->mayday_node, &wq->maydays);
3509                                 }                2901                                 }
3510                                 raw_spin_unlo    2902                                 raw_spin_unlock(&wq_mayday_lock);
3511                         }                        2903                         }
3512                 }                                2904                 }
3513                                                  2905 
3514                 /*                               2906                 /*
3515                  * Put the reference grabbed     2907                  * Put the reference grabbed by send_mayday().  @pool won't
3516                  * go away while we're still     2908                  * go away while we're still attached to it.
3517                  */                              2909                  */
3518                 put_pwq(pwq);                    2910                 put_pwq(pwq);
3519                                                  2911 
3520                 /*                               2912                 /*
3521                  * Leave this pool. Notify re    2913                  * Leave this pool. Notify regular workers; otherwise, we end up
3522                  * with 0 concurrency and sta    2914                  * with 0 concurrency and stalling the execution.
3523                  */                              2915                  */
3524                 kick_pool(pool);                 2916                 kick_pool(pool);
3525                                                  2917 
3526                 raw_spin_unlock_irq(&pool->lo    2918                 raw_spin_unlock_irq(&pool->lock);
3527                                                  2919 
3528                 worker_detach_from_pool(rescu    2920                 worker_detach_from_pool(rescuer);
3529                                                  2921 
3530                 raw_spin_lock_irq(&wq_mayday_    2922                 raw_spin_lock_irq(&wq_mayday_lock);
3531         }                                        2923         }
3532                                                  2924 
3533         raw_spin_unlock_irq(&wq_mayday_lock);    2925         raw_spin_unlock_irq(&wq_mayday_lock);
3534                                                  2926 
3535         if (should_stop) {                       2927         if (should_stop) {
3536                 __set_current_state(TASK_RUNN    2928                 __set_current_state(TASK_RUNNING);
3537                 set_pf_worker(false);            2929                 set_pf_worker(false);
3538                 return 0;                        2930                 return 0;
3539         }                                        2931         }
3540                                                  2932 
3541         /* rescuers should never participate     2933         /* rescuers should never participate in concurrency management */
3542         WARN_ON_ONCE(!(rescuer->flags & WORKE    2934         WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
3543         schedule();                              2935         schedule();
3544         goto repeat;                             2936         goto repeat;
3545 }                                                2937 }
3546                                                  2938 
3547 static void bh_worker(struct worker *worker)  << 
3548 {                                             << 
3549         struct worker_pool *pool = worker->po << 
3550         int nr_restarts = BH_WORKER_RESTARTS; << 
3551         unsigned long end = jiffies + BH_WORK << 
3552                                               << 
3553         raw_spin_lock_irq(&pool->lock);       << 
3554         worker_leave_idle(worker);            << 
3555                                               << 
3556         /*                                    << 
3557          * This function follows the structur << 
3558          * explanations on each step.         << 
3559          */                                   << 
3560         if (!need_more_worker(pool))          << 
3561                 goto done;                    << 
3562                                               << 
3563         WARN_ON_ONCE(!list_empty(&worker->sch << 
3564         worker_clr_flags(worker, WORKER_PREP  << 
3565                                               << 
3566         do {                                  << 
3567                 struct work_struct *work =    << 
3568                         list_first_entry(&poo << 
3569                                          stru << 
3570                                               << 
3571                 if (assign_work(work, worker, << 
3572                         process_scheduled_wor << 
3573         } while (keep_working(pool) &&        << 
3574                  --nr_restarts && time_before << 
3575                                               << 
3576         worker_set_flags(worker, WORKER_PREP) << 
3577 done:                                         << 
3578         worker_enter_idle(worker);            << 
3579         kick_pool(pool);                      << 
3580         raw_spin_unlock_irq(&pool->lock);     << 
3581 }                                             << 
3582                                               << 
3583 /*                                            << 
3584  * TODO: Convert all tasklet users to workque << 
3585  *                                            << 
3586  * This is currently called from tasklet[_hi] << 
3587  * whenever there are tasklets to run. Let's  << 
3588  * queued. Once conversion from tasklet is co << 
3589  * can be dropped.                            << 
3590  *                                            << 
3591  * After full conversion, we'll add worker->s << 
3592  * softirq action and obtain the worker point << 
3593  */                                           << 
3594 void workqueue_softirq_action(bool highpri)   << 
3595 {                                             << 
3596         struct worker_pool *pool =            << 
3597                 &per_cpu(bh_worker_pools, smp << 
3598         if (need_more_worker(pool))           << 
3599                 bh_worker(list_first_entry(&p << 
3600 }                                             << 
3601                                               << 
3602 struct wq_drain_dead_softirq_work {           << 
3603         struct work_struct      work;         << 
3604         struct worker_pool      *pool;        << 
3605         struct completion       done;         << 
3606 };                                            << 
3607                                               << 
3608 static void drain_dead_softirq_workfn(struct  << 
3609 {                                             << 
3610         struct wq_drain_dead_softirq_work *de << 
3611                 container_of(work, struct wq_ << 
3612         struct worker_pool *pool = dead_work- << 
3613         bool repeat;                          << 
3614                                               << 
3615         /*                                    << 
3616          * @pool's CPU is dead and we want to << 
3617          * items from this BH work item which << 
3618          * its CPU is dead, @pool can't be ki << 
3619          * will be nested, a lockdep annotati << 
3620          * @pool with %POOL_BH_DRAINING for t << 
3621          */                                   << 
3622         raw_spin_lock_irq(&pool->lock);       << 
3623         pool->flags |= POOL_BH_DRAINING;      << 
3624         raw_spin_unlock_irq(&pool->lock);     << 
3625                                               << 
3626         bh_worker(list_first_entry(&pool->wor << 
3627                                               << 
3628         raw_spin_lock_irq(&pool->lock);       << 
3629         pool->flags &= ~POOL_BH_DRAINING;     << 
3630         repeat = need_more_worker(pool);      << 
3631         raw_spin_unlock_irq(&pool->lock);     << 
3632                                               << 
3633         /*                                    << 
3634          * bh_worker() might hit consecutive  << 
3635          * still are pending work items, resc << 
3636          * don't hog this CPU's BH.           << 
3637          */                                   << 
3638         if (repeat) {                         << 
3639                 if (pool->attrs->nice == HIGH << 
3640                         queue_work(system_bh_ << 
3641                 else                          << 
3642                         queue_work(system_bh_ << 
3643         } else {                              << 
3644                 complete(&dead_work->done);   << 
3645         }                                     << 
3646 }                                             << 
3647                                               << 
3648 /*                                            << 
3649  * @cpu is dead. Drain the remaining BH work  << 
3650  * possible to allocate dead_work per CPU and << 
3651  * have to worry about draining overlapping w << 
3652  * nesting (one CPU's dead_work queued on ano << 
3653  * on). Let's keep it simple and drain them s << 
3654  * items which shouldn't be requeued on the s << 
3655  */                                           << 
3656 void workqueue_softirq_dead(unsigned int cpu) << 
3657 {                                             << 
3658         int i;                                << 
3659                                               << 
3660         for (i = 0; i < NR_STD_WORKER_POOLS;  << 
3661                 struct worker_pool *pool = &p << 
3662                 struct wq_drain_dead_softirq_ << 
3663                                               << 
3664                 if (!need_more_worker(pool))  << 
3665                         continue;             << 
3666                                               << 
3667                 INIT_WORK_ONSTACK(&dead_work. << 
3668                 dead_work.pool = pool;        << 
3669                 init_completion(&dead_work.do << 
3670                                               << 
3671                 if (pool->attrs->nice == HIGH << 
3672                         queue_work(system_bh_ << 
3673                 else                          << 
3674                         queue_work(system_bh_ << 
3675                                               << 
3676                 wait_for_completion(&dead_wor << 
3677                 destroy_work_on_stack(&dead_w << 
3678         }                                     << 
3679 }                                             << 
3680                                               << 
3681 /**                                              2939 /**
3682  * check_flush_dependency - check for flush d    2940  * check_flush_dependency - check for flush dependency sanity
3683  * @target_wq: workqueue being flushed           2941  * @target_wq: workqueue being flushed
3684  * @target_work: work item being flushed (NUL    2942  * @target_work: work item being flushed (NULL for workqueue flushes)
3685  *                                               2943  *
3686  * %current is trying to flush the whole @tar    2944  * %current is trying to flush the whole @target_wq or @target_work on it.
3687  * If @target_wq doesn't have %WQ_MEM_RECLAIM    2945  * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
3688  * reclaiming memory or running on a workqueu    2946  * reclaiming memory or running on a workqueue which doesn't have
3689  * %WQ_MEM_RECLAIM as that can break forward-    2947  * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
3690  * a deadlock.                                   2948  * a deadlock.
3691  */                                              2949  */
3692 static void check_flush_dependency(struct wor    2950 static void check_flush_dependency(struct workqueue_struct *target_wq,
3693                                    struct wor    2951                                    struct work_struct *target_work)
3694 {                                                2952 {
3695         work_func_t target_func = target_work    2953         work_func_t target_func = target_work ? target_work->func : NULL;
3696         struct worker *worker;                   2954         struct worker *worker;
3697                                                  2955 
3698         if (target_wq->flags & WQ_MEM_RECLAIM    2956         if (target_wq->flags & WQ_MEM_RECLAIM)
3699                 return;                          2957                 return;
3700                                                  2958 
3701         worker = current_wq_worker();            2959         worker = current_wq_worker();
3702                                                  2960 
3703         WARN_ONCE(current->flags & PF_MEMALLO    2961         WARN_ONCE(current->flags & PF_MEMALLOC,
3704                   "workqueue: PF_MEMALLOC tas    2962                   "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
3705                   current->pid, current->comm    2963                   current->pid, current->comm, target_wq->name, target_func);
3706         WARN_ONCE(worker && ((worker->current    2964         WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
3707                               (WQ_MEM_RECLAIM    2965                               (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
3708                   "workqueue: WQ_MEM_RECLAIM     2966                   "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
3709                   worker->current_pwq->wq->na    2967                   worker->current_pwq->wq->name, worker->current_func,
3710                   target_wq->name, target_fun    2968                   target_wq->name, target_func);
3711 }                                                2969 }
3712                                                  2970 
3713 struct wq_barrier {                              2971 struct wq_barrier {
3714         struct work_struct      work;            2972         struct work_struct      work;
3715         struct completion       done;            2973         struct completion       done;
3716         struct task_struct      *task;  /* pu    2974         struct task_struct      *task;  /* purely informational */
3717 };                                               2975 };
3718                                                  2976 
3719 static void wq_barrier_func(struct work_struc    2977 static void wq_barrier_func(struct work_struct *work)
3720 {                                                2978 {
3721         struct wq_barrier *barr = container_o    2979         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
3722         complete(&barr->done);                   2980         complete(&barr->done);
3723 }                                                2981 }
3724                                                  2982 
3725 /**                                              2983 /**
3726  * insert_wq_barrier - insert a barrier work     2984  * insert_wq_barrier - insert a barrier work
3727  * @pwq: pwq to insert barrier into              2985  * @pwq: pwq to insert barrier into
3728  * @barr: wq_barrier to insert                   2986  * @barr: wq_barrier to insert
3729  * @target: target work to attach @barr to       2987  * @target: target work to attach @barr to
3730  * @worker: worker currently executing @targe    2988  * @worker: worker currently executing @target, NULL if @target is not executing
3731  *                                               2989  *
3732  * @barr is linked to @target such that @barr    2990  * @barr is linked to @target such that @barr is completed only after
3733  * @target finishes execution.  Please note t    2991  * @target finishes execution.  Please note that the ordering
3734  * guarantee is observed only with respect to    2992  * guarantee is observed only with respect to @target and on the local
3735  * cpu.                                          2993  * cpu.
3736  *                                               2994  *
3737  * Currently, a queued barrier can't be cance    2995  * Currently, a queued barrier can't be canceled.  This is because
3738  * try_to_grab_pending() can't determine whet    2996  * try_to_grab_pending() can't determine whether the work to be
3739  * grabbed is at the head of the queue and th    2997  * grabbed is at the head of the queue and thus can't clear LINKED
3740  * flag of the previous work while there must    2998  * flag of the previous work while there must be a valid next work
3741  * after a work with LINKED flag set.            2999  * after a work with LINKED flag set.
3742  *                                               3000  *
3743  * Note that when @worker is non-NULL, @targe    3001  * Note that when @worker is non-NULL, @target may be modified
3744  * underneath us, so we can't reliably determ    3002  * underneath us, so we can't reliably determine pwq from @target.
3745  *                                               3003  *
3746  * CONTEXT:                                      3004  * CONTEXT:
3747  * raw_spin_lock_irq(pool->lock).                3005  * raw_spin_lock_irq(pool->lock).
3748  */                                              3006  */
3749 static void insert_wq_barrier(struct pool_wor    3007 static void insert_wq_barrier(struct pool_workqueue *pwq,
3750                               struct wq_barri    3008                               struct wq_barrier *barr,
3751                               struct work_str    3009                               struct work_struct *target, struct worker *worker)
3752 {                                                3010 {
3753         static __maybe_unused struct lock_cla << 
3754         unsigned int work_flags = 0;             3011         unsigned int work_flags = 0;
3755         unsigned int work_color;                 3012         unsigned int work_color;
3756         struct list_head *head;                  3013         struct list_head *head;
3757                                                  3014 
3758         /*                                       3015         /*
3759          * debugobject calls are safe here ev    3016          * debugobject calls are safe here even with pool->lock locked
3760          * as we know for sure that this will    3017          * as we know for sure that this will not trigger any of the
3761          * checks and call back into the fixu    3018          * checks and call back into the fixup functions where we
3762          * might deadlock.                       3019          * might deadlock.
3763          *                                    << 
3764          * BH and threaded workqueues need se << 
3765          * spuriously triggering "inconsisten << 
3766          * usage".                            << 
3767          */                                      3020          */
3768         INIT_WORK_ONSTACK_KEY(&barr->work, wq !! 3021         INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
3769                               (pwq->wq->flags << 
3770         __set_bit(WORK_STRUCT_PENDING_BIT, wo    3022         __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
3771                                                  3023 
3772         init_completion_map(&barr->done, &tar    3024         init_completion_map(&barr->done, &target->lockdep_map);
3773                                                  3025 
3774         barr->task = current;                    3026         barr->task = current;
3775                                                  3027 
3776         /* The barrier work item does not par !! 3028         /* The barrier work item does not participate in pwq->nr_active. */
3777         work_flags |= WORK_STRUCT_INACTIVE;      3029         work_flags |= WORK_STRUCT_INACTIVE;
3778                                                  3030 
3779         /*                                       3031         /*
3780          * If @target is currently being exec    3032          * If @target is currently being executed, schedule the
3781          * barrier to the worker; otherwise,     3033          * barrier to the worker; otherwise, put it after @target.
3782          */                                      3034          */
3783         if (worker) {                            3035         if (worker) {
3784                 head = worker->scheduled.next    3036                 head = worker->scheduled.next;
3785                 work_color = worker->current_    3037                 work_color = worker->current_color;
3786         } else {                                 3038         } else {
3787                 unsigned long *bits = work_da    3039                 unsigned long *bits = work_data_bits(target);
3788                                                  3040 
3789                 head = target->entry.next;       3041                 head = target->entry.next;
3790                 /* there can already be other    3042                 /* there can already be other linked works, inherit and set */
3791                 work_flags |= *bits & WORK_ST    3043                 work_flags |= *bits & WORK_STRUCT_LINKED;
3792                 work_color = get_work_color(*    3044                 work_color = get_work_color(*bits);
3793                 __set_bit(WORK_STRUCT_LINKED_    3045                 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
3794         }                                        3046         }
3795                                                  3047 
3796         pwq->nr_in_flight[work_color]++;         3048         pwq->nr_in_flight[work_color]++;
3797         work_flags |= work_color_to_flags(wor    3049         work_flags |= work_color_to_flags(work_color);
3798                                                  3050 
3799         insert_work(pwq, &barr->work, head, w    3051         insert_work(pwq, &barr->work, head, work_flags);
3800 }                                                3052 }
3801                                                  3053 
3802 /**                                              3054 /**
3803  * flush_workqueue_prep_pwqs - prepare pwqs f    3055  * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
3804  * @wq: workqueue being flushed                  3056  * @wq: workqueue being flushed
3805  * @flush_color: new flush color, < 0 for no-    3057  * @flush_color: new flush color, < 0 for no-op
3806  * @work_color: new work color, < 0 for no-op    3058  * @work_color: new work color, < 0 for no-op
3807  *                                               3059  *
3808  * Prepare pwqs for workqueue flushing.          3060  * Prepare pwqs for workqueue flushing.
3809  *                                               3061  *
3810  * If @flush_color is non-negative, flush_col    3062  * If @flush_color is non-negative, flush_color on all pwqs should be
3811  * -1.  If no pwq has in-flight commands at t    3063  * -1.  If no pwq has in-flight commands at the specified color, all
3812  * pwq->flush_color's stay at -1 and %false i    3064  * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
3813  * has in flight commands, its pwq->flush_col    3065  * has in flight commands, its pwq->flush_color is set to
3814  * @flush_color, @wq->nr_pwqs_to_flush is upd    3066  * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
3815  * wakeup logic is armed and %true is returne    3067  * wakeup logic is armed and %true is returned.
3816  *                                               3068  *
3817  * The caller should have initialized @wq->fi    3069  * The caller should have initialized @wq->first_flusher prior to
3818  * calling this function with non-negative @f    3070  * calling this function with non-negative @flush_color.  If
3819  * @flush_color is negative, no flush color u    3071  * @flush_color is negative, no flush color update is done and %false
3820  * is returned.                                  3072  * is returned.
3821  *                                               3073  *
3822  * If @work_color is non-negative, all pwqs s    3074  * If @work_color is non-negative, all pwqs should have the same
3823  * work_color which is previous to @work_colo    3075  * work_color which is previous to @work_color and all will be
3824  * advanced to @work_color.                      3076  * advanced to @work_color.
3825  *                                               3077  *
3826  * CONTEXT:                                      3078  * CONTEXT:
3827  * mutex_lock(wq->mutex).                        3079  * mutex_lock(wq->mutex).
3828  *                                               3080  *
3829  * Return:                                       3081  * Return:
3830  * %true if @flush_color >= 0 and there's som    3082  * %true if @flush_color >= 0 and there's something to flush.  %false
3831  * otherwise.                                    3083  * otherwise.
3832  */                                              3084  */
3833 static bool flush_workqueue_prep_pwqs(struct     3085 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
3834                                       int flu    3086                                       int flush_color, int work_color)
3835 {                                                3087 {
3836         bool wait = false;                       3088         bool wait = false;
3837         struct pool_workqueue *pwq;              3089         struct pool_workqueue *pwq;
3838                                                  3090 
3839         if (flush_color >= 0) {                  3091         if (flush_color >= 0) {
3840                 WARN_ON_ONCE(atomic_read(&wq-    3092                 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
3841                 atomic_set(&wq->nr_pwqs_to_fl    3093                 atomic_set(&wq->nr_pwqs_to_flush, 1);
3842         }                                        3094         }
3843                                                  3095 
3844         for_each_pwq(pwq, wq) {                  3096         for_each_pwq(pwq, wq) {
3845                 struct worker_pool *pool = pw    3097                 struct worker_pool *pool = pwq->pool;
3846                                                  3098 
3847                 raw_spin_lock_irq(&pool->lock    3099                 raw_spin_lock_irq(&pool->lock);
3848                                                  3100 
3849                 if (flush_color >= 0) {          3101                 if (flush_color >= 0) {
3850                         WARN_ON_ONCE(pwq->flu    3102                         WARN_ON_ONCE(pwq->flush_color != -1);
3851                                                  3103 
3852                         if (pwq->nr_in_flight    3104                         if (pwq->nr_in_flight[flush_color]) {
3853                                 pwq->flush_co    3105                                 pwq->flush_color = flush_color;
3854                                 atomic_inc(&w    3106                                 atomic_inc(&wq->nr_pwqs_to_flush);
3855                                 wait = true;     3107                                 wait = true;
3856                         }                        3108                         }
3857                 }                                3109                 }
3858                                                  3110 
3859                 if (work_color >= 0) {           3111                 if (work_color >= 0) {
3860                         WARN_ON_ONCE(work_col    3112                         WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
3861                         pwq->work_color = wor    3113                         pwq->work_color = work_color;
3862                 }                                3114                 }
3863                                                  3115 
3864                 raw_spin_unlock_irq(&pool->lo    3116                 raw_spin_unlock_irq(&pool->lock);
3865         }                                        3117         }
3866                                                  3118 
3867         if (flush_color >= 0 && atomic_dec_an    3119         if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
3868                 complete(&wq->first_flusher->    3120                 complete(&wq->first_flusher->done);
3869                                                  3121 
3870         return wait;                             3122         return wait;
3871 }                                                3123 }
3872                                                  3124 
3873 static void touch_wq_lockdep_map(struct workq << 
3874 {                                             << 
3875 #ifdef CONFIG_LOCKDEP                         << 
3876         if (wq->flags & WQ_BH)                << 
3877                 local_bh_disable();           << 
3878                                               << 
3879         lock_map_acquire(&wq->lockdep_map);   << 
3880         lock_map_release(&wq->lockdep_map);   << 
3881                                               << 
3882         if (wq->flags & WQ_BH)                << 
3883                 local_bh_enable();            << 
3884 #endif                                        << 
3885 }                                             << 
3886                                               << 
3887 static void touch_work_lockdep_map(struct wor << 
3888                                    struct wor << 
3889 {                                             << 
3890 #ifdef CONFIG_LOCKDEP                         << 
3891         if (wq->flags & WQ_BH)                << 
3892                 local_bh_disable();           << 
3893                                               << 
3894         lock_map_acquire(&work->lockdep_map); << 
3895         lock_map_release(&work->lockdep_map); << 
3896                                               << 
3897         if (wq->flags & WQ_BH)                << 
3898                 local_bh_enable();            << 
3899 #endif                                        << 
3900 }                                             << 
3901                                               << 
3902 /**                                              3125 /**
3903  * __flush_workqueue - ensure that any schedu    3126  * __flush_workqueue - ensure that any scheduled work has run to completion.
3904  * @wq: workqueue to flush                       3127  * @wq: workqueue to flush
3905  *                                               3128  *
3906  * This function sleeps until all work items     3129  * This function sleeps until all work items which were queued on entry
3907  * have finished execution, but it is not liv    3130  * have finished execution, but it is not livelocked by new incoming ones.
3908  */                                              3131  */
3909 void __flush_workqueue(struct workqueue_struc    3132 void __flush_workqueue(struct workqueue_struct *wq)
3910 {                                                3133 {
3911         struct wq_flusher this_flusher = {       3134         struct wq_flusher this_flusher = {
3912                 .list = LIST_HEAD_INIT(this_f    3135                 .list = LIST_HEAD_INIT(this_flusher.list),
3913                 .flush_color = -1,               3136                 .flush_color = -1,
3914                 .done = COMPLETION_INITIALIZE    3137                 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
3915         };                                       3138         };
3916         int next_color;                          3139         int next_color;
3917                                                  3140 
3918         if (WARN_ON(!wq_online))                 3141         if (WARN_ON(!wq_online))
3919                 return;                          3142                 return;
3920                                                  3143 
3921         touch_wq_lockdep_map(wq);             !! 3144         lock_map_acquire(&wq->lockdep_map);
                                                   >> 3145         lock_map_release(&wq->lockdep_map);
3922                                                  3146 
3923         mutex_lock(&wq->mutex);                  3147         mutex_lock(&wq->mutex);
3924                                                  3148 
3925         /*                                       3149         /*
3926          * Start-to-wait phase                   3150          * Start-to-wait phase
3927          */                                      3151          */
3928         next_color = work_next_color(wq->work    3152         next_color = work_next_color(wq->work_color);
3929                                                  3153 
3930         if (next_color != wq->flush_color) {     3154         if (next_color != wq->flush_color) {
3931                 /*                               3155                 /*
3932                  * Color space is not full.      3156                  * Color space is not full.  The current work_color
3933                  * becomes our flush_color an    3157                  * becomes our flush_color and work_color is advanced
3934                  * by one.                       3158                  * by one.
3935                  */                              3159                  */
3936                 WARN_ON_ONCE(!list_empty(&wq-    3160                 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
3937                 this_flusher.flush_color = wq    3161                 this_flusher.flush_color = wq->work_color;
3938                 wq->work_color = next_color;     3162                 wq->work_color = next_color;
3939                                                  3163 
3940                 if (!wq->first_flusher) {        3164                 if (!wq->first_flusher) {
3941                         /* no flush in progre    3165                         /* no flush in progress, become the first flusher */
3942                         WARN_ON_ONCE(wq->flus    3166                         WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
3943                                                  3167 
3944                         wq->first_flusher = &    3168                         wq->first_flusher = &this_flusher;
3945                                                  3169 
3946                         if (!flush_workqueue_    3170                         if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
3947                                                  3171                                                        wq->work_color)) {
3948                                 /* nothing to    3172                                 /* nothing to flush, done */
3949                                 wq->flush_col    3173                                 wq->flush_color = next_color;
3950                                 wq->first_flu    3174                                 wq->first_flusher = NULL;
3951                                 goto out_unlo    3175                                 goto out_unlock;
3952                         }                        3176                         }
3953                 } else {                         3177                 } else {
3954                         /* wait in queue */      3178                         /* wait in queue */
3955                         WARN_ON_ONCE(wq->flus    3179                         WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
3956                         list_add_tail(&this_f    3180                         list_add_tail(&this_flusher.list, &wq->flusher_queue);
3957                         flush_workqueue_prep_    3181                         flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
3958                 }                                3182                 }
3959         } else {                                 3183         } else {
3960                 /*                               3184                 /*
3961                  * Oops, color space is full,    3185                  * Oops, color space is full, wait on overflow queue.
3962                  * The next flush completion     3186                  * The next flush completion will assign us
3963                  * flush_color and transfer t    3187                  * flush_color and transfer to flusher_queue.
3964                  */                              3188                  */
3965                 list_add_tail(&this_flusher.l    3189                 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
3966         }                                        3190         }
3967                                                  3191 
3968         check_flush_dependency(wq, NULL);        3192         check_flush_dependency(wq, NULL);
3969                                                  3193 
3970         mutex_unlock(&wq->mutex);                3194         mutex_unlock(&wq->mutex);
3971                                                  3195 
3972         wait_for_completion(&this_flusher.don    3196         wait_for_completion(&this_flusher.done);
3973                                                  3197 
3974         /*                                       3198         /*
3975          * Wake-up-and-cascade phase             3199          * Wake-up-and-cascade phase
3976          *                                       3200          *
3977          * First flushers are responsible for    3201          * First flushers are responsible for cascading flushes and
3978          * handling overflow.  Non-first flus    3202          * handling overflow.  Non-first flushers can simply return.
3979          */                                      3203          */
3980         if (READ_ONCE(wq->first_flusher) != &    3204         if (READ_ONCE(wq->first_flusher) != &this_flusher)
3981                 return;                          3205                 return;
3982                                                  3206 
3983         mutex_lock(&wq->mutex);                  3207         mutex_lock(&wq->mutex);
3984                                                  3208 
3985         /* we might have raced, check again w    3209         /* we might have raced, check again with mutex held */
3986         if (wq->first_flusher != &this_flushe    3210         if (wq->first_flusher != &this_flusher)
3987                 goto out_unlock;                 3211                 goto out_unlock;
3988                                                  3212 
3989         WRITE_ONCE(wq->first_flusher, NULL);     3213         WRITE_ONCE(wq->first_flusher, NULL);
3990                                                  3214 
3991         WARN_ON_ONCE(!list_empty(&this_flushe    3215         WARN_ON_ONCE(!list_empty(&this_flusher.list));
3992         WARN_ON_ONCE(wq->flush_color != this_    3216         WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
3993                                                  3217 
3994         while (true) {                           3218         while (true) {
3995                 struct wq_flusher *next, *tmp    3219                 struct wq_flusher *next, *tmp;
3996                                                  3220 
3997                 /* complete all the flushers     3221                 /* complete all the flushers sharing the current flush color */
3998                 list_for_each_entry_safe(next    3222                 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
3999                         if (next->flush_color    3223                         if (next->flush_color != wq->flush_color)
4000                                 break;           3224                                 break;
4001                         list_del_init(&next->    3225                         list_del_init(&next->list);
4002                         complete(&next->done)    3226                         complete(&next->done);
4003                 }                                3227                 }
4004                                                  3228 
4005                 WARN_ON_ONCE(!list_empty(&wq-    3229                 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
4006                              wq->flush_color     3230                              wq->flush_color != work_next_color(wq->work_color));
4007                                                  3231 
4008                 /* this flush_color is finish    3232                 /* this flush_color is finished, advance by one */
4009                 wq->flush_color = work_next_c    3233                 wq->flush_color = work_next_color(wq->flush_color);
4010                                                  3234 
4011                 /* one color has been freed,     3235                 /* one color has been freed, handle overflow queue */
4012                 if (!list_empty(&wq->flusher_    3236                 if (!list_empty(&wq->flusher_overflow)) {
4013                         /*                       3237                         /*
4014                          * Assign the same co    3238                          * Assign the same color to all overflowed
4015                          * flushers, advance     3239                          * flushers, advance work_color and append to
4016                          * flusher_queue.  Th    3240                          * flusher_queue.  This is the start-to-wait
4017                          * phase for these ov    3241                          * phase for these overflowed flushers.
4018                          */                      3242                          */
4019                         list_for_each_entry(t    3243                         list_for_each_entry(tmp, &wq->flusher_overflow, list)
4020                                 tmp->flush_co    3244                                 tmp->flush_color = wq->work_color;
4021                                                  3245 
4022                         wq->work_color = work    3246                         wq->work_color = work_next_color(wq->work_color);
4023                                                  3247 
4024                         list_splice_tail_init    3248                         list_splice_tail_init(&wq->flusher_overflow,
4025                                                  3249                                               &wq->flusher_queue);
4026                         flush_workqueue_prep_    3250                         flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
4027                 }                                3251                 }
4028                                                  3252 
4029                 if (list_empty(&wq->flusher_q    3253                 if (list_empty(&wq->flusher_queue)) {
4030                         WARN_ON_ONCE(wq->flus    3254                         WARN_ON_ONCE(wq->flush_color != wq->work_color);
4031                         break;                   3255                         break;
4032                 }                                3256                 }
4033                                                  3257 
4034                 /*                               3258                 /*
4035                  * Need to flush more colors.    3259                  * Need to flush more colors.  Make the next flusher
4036                  * the new first flusher and     3260                  * the new first flusher and arm pwqs.
4037                  */                              3261                  */
4038                 WARN_ON_ONCE(wq->flush_color     3262                 WARN_ON_ONCE(wq->flush_color == wq->work_color);
4039                 WARN_ON_ONCE(wq->flush_color     3263                 WARN_ON_ONCE(wq->flush_color != next->flush_color);
4040                                                  3264 
4041                 list_del_init(&next->list);      3265                 list_del_init(&next->list);
4042                 wq->first_flusher = next;        3266                 wq->first_flusher = next;
4043                                                  3267 
4044                 if (flush_workqueue_prep_pwqs    3268                 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
4045                         break;                   3269                         break;
4046                                                  3270 
4047                 /*                               3271                 /*
4048                  * Meh... this color is alrea    3272                  * Meh... this color is already done, clear first
4049                  * flusher and repeat cascadi    3273                  * flusher and repeat cascading.
4050                  */                              3274                  */
4051                 wq->first_flusher = NULL;        3275                 wq->first_flusher = NULL;
4052         }                                        3276         }
4053                                                  3277 
4054 out_unlock:                                      3278 out_unlock:
4055         mutex_unlock(&wq->mutex);                3279         mutex_unlock(&wq->mutex);
4056 }                                                3280 }
4057 EXPORT_SYMBOL(__flush_workqueue);                3281 EXPORT_SYMBOL(__flush_workqueue);
4058                                                  3282 
4059 /**                                              3283 /**
4060  * drain_workqueue - drain a workqueue           3284  * drain_workqueue - drain a workqueue
4061  * @wq: workqueue to drain                       3285  * @wq: workqueue to drain
4062  *                                               3286  *
4063  * Wait until the workqueue becomes empty.  W    3287  * Wait until the workqueue becomes empty.  While draining is in progress,
4064  * only chain queueing is allowed.  IOW, only    3288  * only chain queueing is allowed.  IOW, only currently pending or running
4065  * work items on @wq can queue further work i    3289  * work items on @wq can queue further work items on it.  @wq is flushed
4066  * repeatedly until it becomes empty.  The nu    3290  * repeatedly until it becomes empty.  The number of flushing is determined
4067  * by the depth of chaining and should be rel    3291  * by the depth of chaining and should be relatively short.  Whine if it
4068  * takes too long.                               3292  * takes too long.
4069  */                                              3293  */
4070 void drain_workqueue(struct workqueue_struct     3294 void drain_workqueue(struct workqueue_struct *wq)
4071 {                                                3295 {
4072         unsigned int flush_cnt = 0;              3296         unsigned int flush_cnt = 0;
4073         struct pool_workqueue *pwq;              3297         struct pool_workqueue *pwq;
4074                                                  3298 
4075         /*                                       3299         /*
4076          * __queue_work() needs to test wheth    3300          * __queue_work() needs to test whether there are drainers, is much
4077          * hotter than drain_workqueue() and     3301          * hotter than drain_workqueue() and already looks at @wq->flags.
4078          * Use __WQ_DRAINING so that queue do    3302          * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
4079          */                                      3303          */
4080         mutex_lock(&wq->mutex);                  3304         mutex_lock(&wq->mutex);
4081         if (!wq->nr_drainers++)                  3305         if (!wq->nr_drainers++)
4082                 wq->flags |= __WQ_DRAINING;      3306                 wq->flags |= __WQ_DRAINING;
4083         mutex_unlock(&wq->mutex);                3307         mutex_unlock(&wq->mutex);
4084 reflush:                                         3308 reflush:
4085         __flush_workqueue(wq);                   3309         __flush_workqueue(wq);
4086                                                  3310 
4087         mutex_lock(&wq->mutex);                  3311         mutex_lock(&wq->mutex);
4088                                                  3312 
4089         for_each_pwq(pwq, wq) {                  3313         for_each_pwq(pwq, wq) {
4090                 bool drained;                    3314                 bool drained;
4091                                                  3315 
4092                 raw_spin_lock_irq(&pwq->pool-    3316                 raw_spin_lock_irq(&pwq->pool->lock);
4093                 drained = pwq_is_empty(pwq);  !! 3317                 drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
4094                 raw_spin_unlock_irq(&pwq->poo    3318                 raw_spin_unlock_irq(&pwq->pool->lock);
4095                                                  3319 
4096                 if (drained)                     3320                 if (drained)
4097                         continue;                3321                         continue;
4098                                                  3322 
4099                 if (++flush_cnt == 10 ||         3323                 if (++flush_cnt == 10 ||
4100                     (flush_cnt % 100 == 0 &&     3324                     (flush_cnt % 100 == 0 && flush_cnt <= 1000))
4101                         pr_warn("workqueue %s    3325                         pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
4102                                 wq->name, __f    3326                                 wq->name, __func__, flush_cnt);
4103                                                  3327 
4104                 mutex_unlock(&wq->mutex);        3328                 mutex_unlock(&wq->mutex);
4105                 goto reflush;                    3329                 goto reflush;
4106         }                                        3330         }
4107                                                  3331 
4108         if (!--wq->nr_drainers)                  3332         if (!--wq->nr_drainers)
4109                 wq->flags &= ~__WQ_DRAINING;     3333                 wq->flags &= ~__WQ_DRAINING;
4110         mutex_unlock(&wq->mutex);                3334         mutex_unlock(&wq->mutex);
4111 }                                                3335 }
4112 EXPORT_SYMBOL_GPL(drain_workqueue);              3336 EXPORT_SYMBOL_GPL(drain_workqueue);
4113                                                  3337 
4114 static bool start_flush_work(struct work_stru    3338 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
4115                              bool from_cancel    3339                              bool from_cancel)
4116 {                                                3340 {
4117         struct worker *worker = NULL;            3341         struct worker *worker = NULL;
4118         struct worker_pool *pool;                3342         struct worker_pool *pool;
4119         struct pool_workqueue *pwq;              3343         struct pool_workqueue *pwq;
4120         struct workqueue_struct *wq;          !! 3344 
                                                   >> 3345         might_sleep();
4121                                                  3346 
4122         rcu_read_lock();                         3347         rcu_read_lock();
4123         pool = get_work_pool(work);              3348         pool = get_work_pool(work);
4124         if (!pool) {                             3349         if (!pool) {
4125                 rcu_read_unlock();               3350                 rcu_read_unlock();
4126                 return false;                    3351                 return false;
4127         }                                        3352         }
4128                                                  3353 
4129         raw_spin_lock_irq(&pool->lock);          3354         raw_spin_lock_irq(&pool->lock);
4130         /* see the comment in try_to_grab_pen    3355         /* see the comment in try_to_grab_pending() with the same code */
4131         pwq = get_work_pwq(work);                3356         pwq = get_work_pwq(work);
4132         if (pwq) {                               3357         if (pwq) {
4133                 if (unlikely(pwq->pool != poo    3358                 if (unlikely(pwq->pool != pool))
4134                         goto already_gone;       3359                         goto already_gone;
4135         } else {                                 3360         } else {
4136                 worker = find_worker_executin    3361                 worker = find_worker_executing_work(pool, work);
4137                 if (!worker)                     3362                 if (!worker)
4138                         goto already_gone;       3363                         goto already_gone;
4139                 pwq = worker->current_pwq;       3364                 pwq = worker->current_pwq;
4140         }                                        3365         }
4141                                                  3366 
4142         wq = pwq->wq;                         !! 3367         check_flush_dependency(pwq->wq, work);
4143         check_flush_dependency(wq, work);     << 
4144                                                  3368 
4145         insert_wq_barrier(pwq, barr, work, wo    3369         insert_wq_barrier(pwq, barr, work, worker);
4146         raw_spin_unlock_irq(&pool->lock);        3370         raw_spin_unlock_irq(&pool->lock);
4147                                                  3371 
4148         touch_work_lockdep_map(work, wq);     << 
4149                                               << 
4150         /*                                       3372         /*
4151          * Force a lock recursion deadlock wh    3373          * Force a lock recursion deadlock when using flush_work() inside a
4152          * single-threaded or rescuer equippe    3374          * single-threaded or rescuer equipped workqueue.
4153          *                                       3375          *
4154          * For single threaded workqueues the    3376          * For single threaded workqueues the deadlock happens when the work
4155          * is after the work issuing the flus    3377          * is after the work issuing the flush_work(). For rescuer equipped
4156          * workqueues the deadlock happens wh    3378          * workqueues the deadlock happens when the rescuer stalls, blocking
4157          * forward progress.                     3379          * forward progress.
4158          */                                      3380          */
4159         if (!from_cancel && (wq->saved_max_ac !! 3381         if (!from_cancel &&
4160                 touch_wq_lockdep_map(wq);     !! 3382             (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
4161                                               !! 3383                 lock_map_acquire(&pwq->wq->lockdep_map);
                                                   >> 3384                 lock_map_release(&pwq->wq->lockdep_map);
                                                   >> 3385         }
4162         rcu_read_unlock();                       3386         rcu_read_unlock();
4163         return true;                             3387         return true;
4164 already_gone:                                    3388 already_gone:
4165         raw_spin_unlock_irq(&pool->lock);        3389         raw_spin_unlock_irq(&pool->lock);
4166         rcu_read_unlock();                       3390         rcu_read_unlock();
4167         return false;                            3391         return false;
4168 }                                                3392 }
4169                                                  3393 
4170 static bool __flush_work(struct work_struct *    3394 static bool __flush_work(struct work_struct *work, bool from_cancel)
4171 {                                                3395 {
4172         struct wq_barrier barr;                  3396         struct wq_barrier barr;
4173                                                  3397 
4174         if (WARN_ON(!wq_online))                 3398         if (WARN_ON(!wq_online))
4175                 return false;                    3399                 return false;
4176                                                  3400 
4177         if (WARN_ON(!work->func))                3401         if (WARN_ON(!work->func))
4178                 return false;                    3402                 return false;
4179                                                  3403 
4180         if (!start_flush_work(work, &barr, fr !! 3404         lock_map_acquire(&work->lockdep_map);
4181                 return false;                 !! 3405         lock_map_release(&work->lockdep_map);
4182                                               << 
4183         /*                                    << 
4184          * start_flush_work() returned %true. << 
4185          * that @work must have been executin << 
4186          * can't currently be queued. Its dat << 
4187          * was queued on a BH workqueue, we a << 
4188          * BH context and thus can be busy-wa << 
4189          */                                   << 
4190         if (from_cancel) {                    << 
4191                 unsigned long data = *work_da << 
4192                                                  3406 
4193                 if (!WARN_ON_ONCE(data & WORK !! 3407         if (start_flush_work(work, &barr, from_cancel)) {
4194                     (data & WORK_OFFQ_BH)) {  !! 3408                 wait_for_completion(&barr.done);
4195                         /*                    !! 3409                 destroy_work_on_stack(&barr.work);
4196                          * On RT, prevent a l !! 3410                 return true;
4197                          * soft interrupt pro !! 3411         } else {
4198                          * running by keeping !! 3412                 return false;
4199                          * runs on a differen << 
4200                          * than doing the BH  << 
4201                          * This is copied fro << 
4202                          * kernel/softirq.c:: << 
4203                          */                   << 
4204                         while (!try_wait_for_ << 
4205                                 if (IS_ENABLE << 
4206                                         local << 
4207                                         local << 
4208                                 } else {      << 
4209                                         cpu_r << 
4210                                 }             << 
4211                         }                     << 
4212                         goto out_destroy;     << 
4213                 }                             << 
4214         }                                        3413         }
4215                                               << 
4216         wait_for_completion(&barr.done);      << 
4217                                               << 
4218 out_destroy:                                  << 
4219         destroy_work_on_stack(&barr.work);    << 
4220         return true;                          << 
4221 }                                                3414 }
4222                                                  3415 
4223 /**                                              3416 /**
4224  * flush_work - wait for a work to finish exe    3417  * flush_work - wait for a work to finish executing the last queueing instance
4225  * @work: the work to flush                      3418  * @work: the work to flush
4226  *                                               3419  *
4227  * Wait until @work has finished execution.      3420  * Wait until @work has finished execution.  @work is guaranteed to be idle
4228  * on return if it hasn't been requeued since    3421  * on return if it hasn't been requeued since flush started.
4229  *                                               3422  *
4230  * Return:                                       3423  * Return:
4231  * %true if flush_work() waited for the work     3424  * %true if flush_work() waited for the work to finish execution,
4232  * %false if it was already idle.                3425  * %false if it was already idle.
4233  */                                              3426  */
4234 bool flush_work(struct work_struct *work)        3427 bool flush_work(struct work_struct *work)
4235 {                                                3428 {
4236         might_sleep();                        << 
4237         return __flush_work(work, false);        3429         return __flush_work(work, false);
4238 }                                                3430 }
4239 EXPORT_SYMBOL_GPL(flush_work);                   3431 EXPORT_SYMBOL_GPL(flush_work);
4240                                                  3432 
                                                   >> 3433 struct cwt_wait {
                                                   >> 3434         wait_queue_entry_t              wait;
                                                   >> 3435         struct work_struct      *work;
                                                   >> 3436 };
                                                   >> 3437 
                                                   >> 3438 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
                                                   >> 3439 {
                                                   >> 3440         struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
                                                   >> 3441 
                                                   >> 3442         if (cwait->work != key)
                                                   >> 3443                 return 0;
                                                   >> 3444         return autoremove_wake_function(wait, mode, sync, key);
                                                   >> 3445 }
                                                   >> 3446 
                                                   >> 3447 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
                                                   >> 3448 {
                                                   >> 3449         static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
                                                   >> 3450         unsigned long flags;
                                                   >> 3451         int ret;
                                                   >> 3452 
                                                   >> 3453         do {
                                                   >> 3454                 ret = try_to_grab_pending(work, is_dwork, &flags);
                                                   >> 3455                 /*
                                                   >> 3456                  * If someone else is already canceling, wait for it to
                                                   >> 3457                  * finish.  flush_work() doesn't work for PREEMPT_NONE
                                                   >> 3458                  * because we may get scheduled between @work's completion
                                                   >> 3459                  * and the other canceling task resuming and clearing
                                                   >> 3460                  * CANCELING - flush_work() will return false immediately
                                                   >> 3461                  * as @work is no longer busy, try_to_grab_pending() will
                                                   >> 3462                  * return -ENOENT as @work is still being canceled and the
                                                   >> 3463                  * other canceling task won't be able to clear CANCELING as
                                                   >> 3464                  * we're hogging the CPU.
                                                   >> 3465                  *
                                                   >> 3466                  * Let's wait for completion using a waitqueue.  As this
                                                   >> 3467                  * may lead to the thundering herd problem, use a custom
                                                   >> 3468                  * wake function which matches @work along with exclusive
                                                   >> 3469                  * wait and wakeup.
                                                   >> 3470                  */
                                                   >> 3471                 if (unlikely(ret == -ENOENT)) {
                                                   >> 3472                         struct cwt_wait cwait;
                                                   >> 3473 
                                                   >> 3474                         init_wait(&cwait.wait);
                                                   >> 3475                         cwait.wait.func = cwt_wakefn;
                                                   >> 3476                         cwait.work = work;
                                                   >> 3477 
                                                   >> 3478                         prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
                                                   >> 3479                                                   TASK_UNINTERRUPTIBLE);
                                                   >> 3480                         if (work_is_canceling(work))
                                                   >> 3481                                 schedule();
                                                   >> 3482                         finish_wait(&cancel_waitq, &cwait.wait);
                                                   >> 3483                 }
                                                   >> 3484         } while (unlikely(ret < 0));
                                                   >> 3485 
                                                   >> 3486         /* tell other tasks trying to grab @work to back off */
                                                   >> 3487         mark_work_canceling(work);
                                                   >> 3488         local_irq_restore(flags);
                                                   >> 3489 
                                                   >> 3490         /*
                                                   >> 3491          * This allows canceling during early boot.  We know that @work
                                                   >> 3492          * isn't executing.
                                                   >> 3493          */
                                                   >> 3494         if (wq_online)
                                                   >> 3495                 __flush_work(work, true);
                                                   >> 3496 
                                                   >> 3497         clear_work_data(work);
                                                   >> 3498 
                                                   >> 3499         /*
                                                   >> 3500          * Paired with prepare_to_wait() above so that either
                                                   >> 3501          * waitqueue_active() is visible here or !work_is_canceling() is
                                                   >> 3502          * visible there.
                                                   >> 3503          */
                                                   >> 3504         smp_mb();
                                                   >> 3505         if (waitqueue_active(&cancel_waitq))
                                                   >> 3506                 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
                                                   >> 3507 
                                                   >> 3508         return ret;
                                                   >> 3509 }
                                                   >> 3510 
                                                   >> 3511 /**
                                                   >> 3512  * cancel_work_sync - cancel a work and wait for it to finish
                                                   >> 3513  * @work: the work to cancel
                                                   >> 3514  *
                                                   >> 3515  * Cancel @work and wait for its execution to finish.  This function
                                                   >> 3516  * can be used even if the work re-queues itself or migrates to
                                                   >> 3517  * another workqueue.  On return from this function, @work is
                                                   >> 3518  * guaranteed to be not pending or executing on any CPU.
                                                   >> 3519  *
                                                   >> 3520  * cancel_work_sync(&delayed_work->work) must not be used for
                                                   >> 3521  * delayed_work's.  Use cancel_delayed_work_sync() instead.
                                                   >> 3522  *
                                                   >> 3523  * The caller must ensure that the workqueue on which @work was last
                                                   >> 3524  * queued can't be destroyed before this function returns.
                                                   >> 3525  *
                                                   >> 3526  * Return:
                                                   >> 3527  * %true if @work was pending, %false otherwise.
                                                   >> 3528  */
                                                   >> 3529 bool cancel_work_sync(struct work_struct *work)
                                                   >> 3530 {
                                                   >> 3531         return __cancel_work_timer(work, false);
                                                   >> 3532 }
                                                   >> 3533 EXPORT_SYMBOL_GPL(cancel_work_sync);
                                                   >> 3534 
4241 /**                                              3535 /**
4242  * flush_delayed_work - wait for a dwork to f    3536  * flush_delayed_work - wait for a dwork to finish executing the last queueing
4243  * @dwork: the delayed work to flush             3537  * @dwork: the delayed work to flush
4244  *                                               3538  *
4245  * Delayed timer is cancelled and the pending    3539  * Delayed timer is cancelled and the pending work is queued for
4246  * immediate execution.  Like flush_work(), t    3540  * immediate execution.  Like flush_work(), this function only
4247  * considers the last queueing instance of @d    3541  * considers the last queueing instance of @dwork.
4248  *                                               3542  *
4249  * Return:                                       3543  * Return:
4250  * %true if flush_work() waited for the work     3544  * %true if flush_work() waited for the work to finish execution,
4251  * %false if it was already idle.                3545  * %false if it was already idle.
4252  */                                              3546  */
4253 bool flush_delayed_work(struct delayed_work *    3547 bool flush_delayed_work(struct delayed_work *dwork)
4254 {                                                3548 {
4255         local_irq_disable();                     3549         local_irq_disable();
4256         if (del_timer_sync(&dwork->timer))       3550         if (del_timer_sync(&dwork->timer))
4257                 __queue_work(dwork->cpu, dwor    3551                 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
4258         local_irq_enable();                      3552         local_irq_enable();
4259         return flush_work(&dwork->work);         3553         return flush_work(&dwork->work);
4260 }                                                3554 }
4261 EXPORT_SYMBOL(flush_delayed_work);               3555 EXPORT_SYMBOL(flush_delayed_work);
4262                                                  3556 
4263 /**                                              3557 /**
4264  * flush_rcu_work - wait for a rwork to finis    3558  * flush_rcu_work - wait for a rwork to finish executing the last queueing
4265  * @rwork: the rcu work to flush                 3559  * @rwork: the rcu work to flush
4266  *                                               3560  *
4267  * Return:                                       3561  * Return:
4268  * %true if flush_rcu_work() waited for the w    3562  * %true if flush_rcu_work() waited for the work to finish execution,
4269  * %false if it was already idle.                3563  * %false if it was already idle.
4270  */                                              3564  */
4271 bool flush_rcu_work(struct rcu_work *rwork)      3565 bool flush_rcu_work(struct rcu_work *rwork)
4272 {                                                3566 {
4273         if (test_bit(WORK_STRUCT_PENDING_BIT,    3567         if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
4274                 rcu_barrier();                   3568                 rcu_barrier();
4275                 flush_work(&rwork->work);        3569                 flush_work(&rwork->work);
4276                 return true;                     3570                 return true;
4277         } else {                                 3571         } else {
4278                 return flush_work(&rwork->wor    3572                 return flush_work(&rwork->work);
4279         }                                        3573         }
4280 }                                                3574 }
4281 EXPORT_SYMBOL(flush_rcu_work);                   3575 EXPORT_SYMBOL(flush_rcu_work);
4282                                                  3576 
4283 static void work_offqd_disable(struct work_of !! 3577 static bool __cancel_work(struct work_struct *work, bool is_dwork)
4284 {                                             << 
4285         const unsigned long max = (1lu << WOR << 
4286                                               << 
4287         if (likely(offqd->disable < max))     << 
4288                 offqd->disable++;             << 
4289         else                                  << 
4290                 WARN_ONCE(true, "workqueue: w << 
4291 }                                             << 
4292                                               << 
4293 static void work_offqd_enable(struct work_off << 
4294 {                                                3578 {
4295         if (likely(offqd->disable > 0))       !! 3579         unsigned long flags;
4296                 offqd->disable--;             << 
4297         else                                  << 
4298                 WARN_ONCE(true, "workqueue: w << 
4299 }                                             << 
4300                                               << 
4301 static bool __cancel_work(struct work_struct  << 
4302 {                                             << 
4303         struct work_offq_data offqd;          << 
4304         unsigned long irq_flags;              << 
4305         int ret;                                 3580         int ret;
4306                                                  3581 
4307         ret = work_grab_pending(work, cflags, !! 3582         do {
4308                                               !! 3583                 ret = try_to_grab_pending(work, is_dwork, &flags);
4309         work_offqd_unpack(&offqd, *work_data_ !! 3584         } while (unlikely(ret == -EAGAIN));
4310                                               << 
4311         if (cflags & WORK_CANCEL_DISABLE)     << 
4312                 work_offqd_disable(&offqd);   << 
4313                                               << 
4314         set_work_pool_and_clear_pending(work, << 
4315                                         work_ << 
4316         local_irq_restore(irq_flags);         << 
4317         return ret;                           << 
4318 }                                             << 
4319                                               << 
4320 static bool __cancel_work_sync(struct work_st << 
4321 {                                             << 
4322         bool ret;                             << 
4323                                               << 
4324         ret = __cancel_work(work, cflags | WO << 
4325                                               << 
4326         if (*work_data_bits(work) & WORK_OFFQ << 
4327                 WARN_ON_ONCE(in_hardirq());   << 
4328         else                                  << 
4329                 might_sleep();                << 
4330                                               << 
4331         /*                                    << 
4332          * Skip __flush_work() during early b << 
4333          * executing. This allows canceling d << 
4334          */                                   << 
4335         if (wq_online)                        << 
4336                 __flush_work(work, true);     << 
4337                                                  3585 
4338         if (!(cflags & WORK_CANCEL_DISABLE))  !! 3586         if (unlikely(ret < 0))
4339                 enable_work(work);            !! 3587                 return false;
4340                                                  3588 
                                                   >> 3589         set_work_pool_and_clear_pending(work, get_work_pool_id(work));
                                                   >> 3590         local_irq_restore(flags);
4341         return ret;                              3591         return ret;
4342 }                                                3592 }
4343                                                  3593 
4344 /*                                               3594 /*
4345  * See cancel_delayed_work()                     3595  * See cancel_delayed_work()
4346  */                                              3596  */
4347 bool cancel_work(struct work_struct *work)       3597 bool cancel_work(struct work_struct *work)
4348 {                                                3598 {
4349         return __cancel_work(work, 0);        !! 3599         return __cancel_work(work, false);
4350 }                                                3600 }
4351 EXPORT_SYMBOL(cancel_work);                      3601 EXPORT_SYMBOL(cancel_work);
4352                                                  3602 
4353 /**                                              3603 /**
4354  * cancel_work_sync - cancel a work and wait  << 
4355  * @work: the work to cancel                  << 
4356  *                                            << 
4357  * Cancel @work and wait for its execution to << 
4358  * even if the work re-queues itself or migra << 
4359  * from this function, @work is guaranteed to << 
4360  * CPU as long as there aren't racing enqueue << 
4361  *                                            << 
4362  * cancel_work_sync(&delayed_work->work) must << 
4363  * Use cancel_delayed_work_sync() instead.    << 
4364  *                                            << 
4365  * Must be called from a sleepable context if << 
4366  * workqueue. Can also be called from non-har << 
4367  * if @work was last queued on a BH workqueue << 
4368  *                                            << 
4369  * Returns %true if @work was pending, %false << 
4370  */                                           << 
4371 bool cancel_work_sync(struct work_struct *wor << 
4372 {                                             << 
4373         return __cancel_work_sync(work, 0);   << 
4374 }                                             << 
4375 EXPORT_SYMBOL_GPL(cancel_work_sync);          << 
4376                                               << 
4377 /**                                           << 
4378  * cancel_delayed_work - cancel a delayed wor    3604  * cancel_delayed_work - cancel a delayed work
4379  * @dwork: delayed_work to cancel                3605  * @dwork: delayed_work to cancel
4380  *                                               3606  *
4381  * Kill off a pending delayed_work.              3607  * Kill off a pending delayed_work.
4382  *                                               3608  *
4383  * Return: %true if @dwork was pending and ca    3609  * Return: %true if @dwork was pending and canceled; %false if it wasn't
4384  * pending.                                      3610  * pending.
4385  *                                               3611  *
4386  * Note:                                         3612  * Note:
4387  * The work callback function may still be ru    3613  * The work callback function may still be running on return, unless
4388  * it returns %true and the work doesn't re-a    3614  * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
4389  * use cancel_delayed_work_sync() to wait on     3615  * use cancel_delayed_work_sync() to wait on it.
4390  *                                               3616  *
4391  * This function is safe to call from any con    3617  * This function is safe to call from any context including IRQ handler.
4392  */                                              3618  */
4393 bool cancel_delayed_work(struct delayed_work     3619 bool cancel_delayed_work(struct delayed_work *dwork)
4394 {                                                3620 {
4395         return __cancel_work(&dwork->work, WO !! 3621         return __cancel_work(&dwork->work, true);
4396 }                                                3622 }
4397 EXPORT_SYMBOL(cancel_delayed_work);              3623 EXPORT_SYMBOL(cancel_delayed_work);
4398                                                  3624 
4399 /**                                              3625 /**
4400  * cancel_delayed_work_sync - cancel a delaye    3626  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
4401  * @dwork: the delayed work cancel               3627  * @dwork: the delayed work cancel
4402  *                                               3628  *
4403  * This is cancel_work_sync() for delayed wor    3629  * This is cancel_work_sync() for delayed works.
4404  *                                               3630  *
4405  * Return:                                       3631  * Return:
4406  * %true if @dwork was pending, %false otherw    3632  * %true if @dwork was pending, %false otherwise.
4407  */                                              3633  */
4408 bool cancel_delayed_work_sync(struct delayed_    3634 bool cancel_delayed_work_sync(struct delayed_work *dwork)
4409 {                                                3635 {
4410         return __cancel_work_sync(&dwork->wor !! 3636         return __cancel_work_timer(&dwork->work, true);
4411 }                                                3637 }
4412 EXPORT_SYMBOL(cancel_delayed_work_sync);         3638 EXPORT_SYMBOL(cancel_delayed_work_sync);
4413                                                  3639 
4414 /**                                              3640 /**
4415  * disable_work - Disable and cancel a work i << 
4416  * @work: work item to disable                << 
4417  *                                            << 
4418  * Disable @work by incrementing its disable  << 
4419  * pending. As long as the disable count is n << 
4420  * will fail and return %false. The maximum s << 
4421  * power of %WORK_OFFQ_DISABLE_BITS, currentl << 
4422  *                                            << 
4423  * Can be called from any context. Returns %t << 
4424  * otherwise.                                 << 
4425  */                                           << 
4426 bool disable_work(struct work_struct *work)   << 
4427 {                                             << 
4428         return __cancel_work(work, WORK_CANCE << 
4429 }                                             << 
4430 EXPORT_SYMBOL_GPL(disable_work);              << 
4431                                               << 
4432 /**                                           << 
4433  * disable_work_sync - Disable, cancel and dr << 
4434  * @work: work item to disable                << 
4435  *                                            << 
4436  * Similar to disable_work() but also wait fo << 
4437  * executing.                                 << 
4438  *                                            << 
4439  * Must be called from a sleepable context if << 
4440  * workqueue. Can also be called from non-har << 
4441  * if @work was last queued on a BH workqueue << 
4442  *                                            << 
4443  * Returns %true if @work was pending, %false << 
4444  */                                           << 
4445 bool disable_work_sync(struct work_struct *wo << 
4446 {                                             << 
4447         return __cancel_work_sync(work, WORK_ << 
4448 }                                             << 
4449 EXPORT_SYMBOL_GPL(disable_work_sync);         << 
4450                                               << 
4451 /**                                           << 
4452  * enable_work - Enable a work item           << 
4453  * @work: work item to enable                 << 
4454  *                                            << 
4455  * Undo disable_work[_sync]() by decrementing << 
4456  * only be queued if its disable count is 0.  << 
4457  *                                            << 
4458  * Can be called from any context. Returns %t << 
4459  * Otherwise, %false.                         << 
4460  */                                           << 
4461 bool enable_work(struct work_struct *work)    << 
4462 {                                             << 
4463         struct work_offq_data offqd;          << 
4464         unsigned long irq_flags;              << 
4465                                               << 
4466         work_grab_pending(work, 0, &irq_flags << 
4467                                               << 
4468         work_offqd_unpack(&offqd, *work_data_ << 
4469         work_offqd_enable(&offqd);            << 
4470         set_work_pool_and_clear_pending(work, << 
4471                                         work_ << 
4472         local_irq_restore(irq_flags);         << 
4473                                               << 
4474         return !offqd.disable;                << 
4475 }                                             << 
4476 EXPORT_SYMBOL_GPL(enable_work);               << 
4477                                               << 
4478 /**                                           << 
4479  * disable_delayed_work - Disable and cancel  << 
4480  * @dwork: delayed work item to disable       << 
4481  *                                            << 
4482  * disable_work() for delayed work items.     << 
4483  */                                           << 
4484 bool disable_delayed_work(struct delayed_work << 
4485 {                                             << 
4486         return __cancel_work(&dwork->work,    << 
4487                              WORK_CANCEL_DELA << 
4488 }                                             << 
4489 EXPORT_SYMBOL_GPL(disable_delayed_work);      << 
4490                                               << 
4491 /**                                           << 
4492  * disable_delayed_work_sync - Disable, cance << 
4493  * @dwork: delayed work item to disable       << 
4494  *                                            << 
4495  * disable_work_sync() for delayed work items << 
4496  */                                           << 
4497 bool disable_delayed_work_sync(struct delayed << 
4498 {                                             << 
4499         return __cancel_work_sync(&dwork->wor << 
4500                                   WORK_CANCEL << 
4501 }                                             << 
4502 EXPORT_SYMBOL_GPL(disable_delayed_work_sync); << 
4503                                               << 
4504 /**                                           << 
4505  * enable_delayed_work - Enable a delayed wor << 
4506  * @dwork: delayed work item to enable        << 
4507  *                                            << 
4508  * enable_work() for delayed work items.      << 
4509  */                                           << 
4510 bool enable_delayed_work(struct delayed_work  << 
4511 {                                             << 
4512         return enable_work(&dwork->work);     << 
4513 }                                             << 
4514 EXPORT_SYMBOL_GPL(enable_delayed_work);       << 
4515                                               << 
4516 /**                                           << 
4517  * schedule_on_each_cpu - execute a function     3641  * schedule_on_each_cpu - execute a function synchronously on each online CPU
4518  * @func: the function to call                   3642  * @func: the function to call
4519  *                                               3643  *
4520  * schedule_on_each_cpu() executes @func on e    3644  * schedule_on_each_cpu() executes @func on each online CPU using the
4521  * system workqueue and blocks until all CPUs    3645  * system workqueue and blocks until all CPUs have completed.
4522  * schedule_on_each_cpu() is very slow.          3646  * schedule_on_each_cpu() is very slow.
4523  *                                               3647  *
4524  * Return:                                       3648  * Return:
4525  * 0 on success, -errno on failure.              3649  * 0 on success, -errno on failure.
4526  */                                              3650  */
4527 int schedule_on_each_cpu(work_func_t func)       3651 int schedule_on_each_cpu(work_func_t func)
4528 {                                                3652 {
4529         int cpu;                                 3653         int cpu;
4530         struct work_struct __percpu *works;      3654         struct work_struct __percpu *works;
4531                                                  3655 
4532         works = alloc_percpu(struct work_stru    3656         works = alloc_percpu(struct work_struct);
4533         if (!works)                              3657         if (!works)
4534                 return -ENOMEM;                  3658                 return -ENOMEM;
4535                                                  3659 
4536         cpus_read_lock();                        3660         cpus_read_lock();
4537                                                  3661 
4538         for_each_online_cpu(cpu) {               3662         for_each_online_cpu(cpu) {
4539                 struct work_struct *work = pe    3663                 struct work_struct *work = per_cpu_ptr(works, cpu);
4540                                                  3664 
4541                 INIT_WORK(work, func);           3665                 INIT_WORK(work, func);
4542                 schedule_work_on(cpu, work);     3666                 schedule_work_on(cpu, work);
4543         }                                        3667         }
4544                                                  3668 
4545         for_each_online_cpu(cpu)                 3669         for_each_online_cpu(cpu)
4546                 flush_work(per_cpu_ptr(works,    3670                 flush_work(per_cpu_ptr(works, cpu));
4547                                                  3671 
4548         cpus_read_unlock();                      3672         cpus_read_unlock();
4549         free_percpu(works);                      3673         free_percpu(works);
4550         return 0;                                3674         return 0;
4551 }                                                3675 }
4552                                                  3676 
4553 /**                                              3677 /**
4554  * execute_in_process_context - reliably exec    3678  * execute_in_process_context - reliably execute the routine with user context
4555  * @fn:         the function to execute          3679  * @fn:         the function to execute
4556  * @ew:         guaranteed storage for the ex    3680  * @ew:         guaranteed storage for the execute work structure (must
4557  *              be available when the work ex    3681  *              be available when the work executes)
4558  *                                               3682  *
4559  * Executes the function immediately if proce    3683  * Executes the function immediately if process context is available,
4560  * otherwise schedules the function for delay    3684  * otherwise schedules the function for delayed execution.
4561  *                                               3685  *
4562  * Return:      0 - function was executed        3686  * Return:      0 - function was executed
4563  *              1 - function was scheduled fo    3687  *              1 - function was scheduled for execution
4564  */                                              3688  */
4565 int execute_in_process_context(work_func_t fn    3689 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
4566 {                                                3690 {
4567         if (!in_interrupt()) {                   3691         if (!in_interrupt()) {
4568                 fn(&ew->work);                   3692                 fn(&ew->work);
4569                 return 0;                        3693                 return 0;
4570         }                                        3694         }
4571                                                  3695 
4572         INIT_WORK(&ew->work, fn);                3696         INIT_WORK(&ew->work, fn);
4573         schedule_work(&ew->work);                3697         schedule_work(&ew->work);
4574                                                  3698 
4575         return 1;                                3699         return 1;
4576 }                                                3700 }
4577 EXPORT_SYMBOL_GPL(execute_in_process_context)    3701 EXPORT_SYMBOL_GPL(execute_in_process_context);
4578                                                  3702 
4579 /**                                              3703 /**
4580  * free_workqueue_attrs - free a workqueue_at    3704  * free_workqueue_attrs - free a workqueue_attrs
4581  * @attrs: workqueue_attrs to free               3705  * @attrs: workqueue_attrs to free
4582  *                                               3706  *
4583  * Undo alloc_workqueue_attrs().                 3707  * Undo alloc_workqueue_attrs().
4584  */                                              3708  */
4585 void free_workqueue_attrs(struct workqueue_at    3709 void free_workqueue_attrs(struct workqueue_attrs *attrs)
4586 {                                                3710 {
4587         if (attrs) {                             3711         if (attrs) {
4588                 free_cpumask_var(attrs->cpuma    3712                 free_cpumask_var(attrs->cpumask);
4589                 free_cpumask_var(attrs->__pod    3713                 free_cpumask_var(attrs->__pod_cpumask);
4590                 kfree(attrs);                    3714                 kfree(attrs);
4591         }                                        3715         }
4592 }                                                3716 }
4593                                                  3717 
4594 /**                                              3718 /**
4595  * alloc_workqueue_attrs - allocate a workque    3719  * alloc_workqueue_attrs - allocate a workqueue_attrs
4596  *                                               3720  *
4597  * Allocate a new workqueue_attrs, initialize    3721  * Allocate a new workqueue_attrs, initialize with default settings and
4598  * return it.                                    3722  * return it.
4599  *                                               3723  *
4600  * Return: The allocated new workqueue_attr o    3724  * Return: The allocated new workqueue_attr on success. %NULL on failure.
4601  */                                              3725  */
4602 struct workqueue_attrs *alloc_workqueue_attrs    3726 struct workqueue_attrs *alloc_workqueue_attrs(void)
4603 {                                                3727 {
4604         struct workqueue_attrs *attrs;           3728         struct workqueue_attrs *attrs;
4605                                                  3729 
4606         attrs = kzalloc(sizeof(*attrs), GFP_K    3730         attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
4607         if (!attrs)                              3731         if (!attrs)
4608                 goto fail;                       3732                 goto fail;
4609         if (!alloc_cpumask_var(&attrs->cpumas    3733         if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
4610                 goto fail;                       3734                 goto fail;
4611         if (!alloc_cpumask_var(&attrs->__pod_    3735         if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL))
4612                 goto fail;                       3736                 goto fail;
4613                                                  3737 
4614         cpumask_copy(attrs->cpumask, cpu_poss    3738         cpumask_copy(attrs->cpumask, cpu_possible_mask);
4615         attrs->affn_scope = WQ_AFFN_DFL;         3739         attrs->affn_scope = WQ_AFFN_DFL;
4616         return attrs;                            3740         return attrs;
4617 fail:                                            3741 fail:
4618         free_workqueue_attrs(attrs);             3742         free_workqueue_attrs(attrs);
4619         return NULL;                             3743         return NULL;
4620 }                                                3744 }
4621                                                  3745 
4622 static void copy_workqueue_attrs(struct workq    3746 static void copy_workqueue_attrs(struct workqueue_attrs *to,
4623                                  const struct    3747                                  const struct workqueue_attrs *from)
4624 {                                                3748 {
4625         to->nice = from->nice;                   3749         to->nice = from->nice;
4626         cpumask_copy(to->cpumask, from->cpuma    3750         cpumask_copy(to->cpumask, from->cpumask);
4627         cpumask_copy(to->__pod_cpumask, from-    3751         cpumask_copy(to->__pod_cpumask, from->__pod_cpumask);
4628         to->affn_strict = from->affn_strict;     3752         to->affn_strict = from->affn_strict;
4629                                                  3753 
4630         /*                                       3754         /*
4631          * Unlike hash and equality test, cop    3755          * Unlike hash and equality test, copying shouldn't ignore wq-only
4632          * fields as copying is used for both    3756          * fields as copying is used for both pool and wq attrs. Instead,
4633          * get_unbound_pool() explicitly clea    3757          * get_unbound_pool() explicitly clears the fields.
4634          */                                      3758          */
4635         to->affn_scope = from->affn_scope;       3759         to->affn_scope = from->affn_scope;
4636         to->ordered = from->ordered;             3760         to->ordered = from->ordered;
4637 }                                                3761 }
4638                                                  3762 
4639 /*                                               3763 /*
4640  * Some attrs fields are workqueue-only. Clea    3764  * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the
4641  * comments in 'struct workqueue_attrs' defin    3765  * comments in 'struct workqueue_attrs' definition.
4642  */                                              3766  */
4643 static void wqattrs_clear_for_pool(struct wor    3767 static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs)
4644 {                                                3768 {
4645         attrs->affn_scope = WQ_AFFN_NR_TYPES;    3769         attrs->affn_scope = WQ_AFFN_NR_TYPES;
4646         attrs->ordered = false;                  3770         attrs->ordered = false;
4647         if (attrs->affn_strict)               << 
4648                 cpumask_copy(attrs->cpumask,  << 
4649 }                                                3771 }
4650                                                  3772 
4651 /* hash value of the content of @attr */         3773 /* hash value of the content of @attr */
4652 static u32 wqattrs_hash(const struct workqueu    3774 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
4653 {                                                3775 {
4654         u32 hash = 0;                            3776         u32 hash = 0;
4655                                                  3777 
4656         hash = jhash_1word(attrs->nice, hash)    3778         hash = jhash_1word(attrs->nice, hash);
4657         hash = jhash_1word(attrs->affn_strict !! 3779         hash = jhash(cpumask_bits(attrs->cpumask),
                                                   >> 3780                      BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
4658         hash = jhash(cpumask_bits(attrs->__po    3781         hash = jhash(cpumask_bits(attrs->__pod_cpumask),
4659                      BITS_TO_LONGS(nr_cpumask    3782                      BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
4660         if (!attrs->affn_strict)              !! 3783         hash = jhash_1word(attrs->affn_strict, hash);
4661                 hash = jhash(cpumask_bits(att << 
4662                              BITS_TO_LONGS(nr << 
4663         return hash;                             3784         return hash;
4664 }                                                3785 }
4665                                                  3786 
4666 /* content equality test */                      3787 /* content equality test */
4667 static bool wqattrs_equal(const struct workqu    3788 static bool wqattrs_equal(const struct workqueue_attrs *a,
4668                           const struct workqu    3789                           const struct workqueue_attrs *b)
4669 {                                                3790 {
4670         if (a->nice != b->nice)                  3791         if (a->nice != b->nice)
4671                 return false;                    3792                 return false;
4672         if (a->affn_strict != b->affn_strict) !! 3793         if (!cpumask_equal(a->cpumask, b->cpumask))
4673                 return false;                    3794                 return false;
4674         if (!cpumask_equal(a->__pod_cpumask,     3795         if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask))
4675                 return false;                    3796                 return false;
4676         if (!a->affn_strict && !cpumask_equal !! 3797         if (a->affn_strict != b->affn_strict)
4677                 return false;                    3798                 return false;
4678         return true;                             3799         return true;
4679 }                                                3800 }
4680                                                  3801 
4681 /* Update @attrs with actually available CPUs    3802 /* Update @attrs with actually available CPUs */
4682 static void wqattrs_actualize_cpumask(struct     3803 static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs,
4683                                       const c    3804                                       const cpumask_t *unbound_cpumask)
4684 {                                                3805 {
4685         /*                                       3806         /*
4686          * Calculate the effective CPU mask o    3807          * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If
4687          * @attrs->cpumask doesn't overlap wi    3808          * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to
4688          * @unbound_cpumask.                     3809          * @unbound_cpumask.
4689          */                                      3810          */
4690         cpumask_and(attrs->cpumask, attrs->cp    3811         cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask);
4691         if (unlikely(cpumask_empty(attrs->cpu    3812         if (unlikely(cpumask_empty(attrs->cpumask)))
4692                 cpumask_copy(attrs->cpumask,     3813                 cpumask_copy(attrs->cpumask, unbound_cpumask);
4693 }                                                3814 }
4694                                                  3815 
4695 /* find wq_pod_type to use for @attrs */         3816 /* find wq_pod_type to use for @attrs */
4696 static const struct wq_pod_type *                3817 static const struct wq_pod_type *
4697 wqattrs_pod_type(const struct workqueue_attrs    3818 wqattrs_pod_type(const struct workqueue_attrs *attrs)
4698 {                                                3819 {
4699         enum wq_affn_scope scope;                3820         enum wq_affn_scope scope;
4700         struct wq_pod_type *pt;                  3821         struct wq_pod_type *pt;
4701                                                  3822 
4702         /* to synchronize access to wq_affn_d    3823         /* to synchronize access to wq_affn_dfl */
4703         lockdep_assert_held(&wq_pool_mutex);     3824         lockdep_assert_held(&wq_pool_mutex);
4704                                                  3825 
4705         if (attrs->affn_scope == WQ_AFFN_DFL)    3826         if (attrs->affn_scope == WQ_AFFN_DFL)
4706                 scope = wq_affn_dfl;             3827                 scope = wq_affn_dfl;
4707         else                                     3828         else
4708                 scope = attrs->affn_scope;       3829                 scope = attrs->affn_scope;
4709                                                  3830 
4710         pt = &wq_pod_types[scope];               3831         pt = &wq_pod_types[scope];
4711                                                  3832 
4712         if (!WARN_ON_ONCE(attrs->affn_scope =    3833         if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) &&
4713             likely(pt->nr_pods))                 3834             likely(pt->nr_pods))
4714                 return pt;                       3835                 return pt;
4715                                                  3836 
4716         /*                                       3837         /*
4717          * Before workqueue_init_topology(),     3838          * Before workqueue_init_topology(), only SYSTEM is available which is
4718          * initialized in workqueue_init_earl    3839          * initialized in workqueue_init_early().
4719          */                                      3840          */
4720         pt = &wq_pod_types[WQ_AFFN_SYSTEM];      3841         pt = &wq_pod_types[WQ_AFFN_SYSTEM];
4721         BUG_ON(!pt->nr_pods);                    3842         BUG_ON(!pt->nr_pods);
4722         return pt;                               3843         return pt;
4723 }                                                3844 }
4724                                                  3845 
4725 /**                                              3846 /**
4726  * init_worker_pool - initialize a newly zall    3847  * init_worker_pool - initialize a newly zalloc'd worker_pool
4727  * @pool: worker_pool to initialize              3848  * @pool: worker_pool to initialize
4728  *                                               3849  *
4729  * Initialize a newly zalloc'd @pool.  It als    3850  * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
4730  *                                               3851  *
4731  * Return: 0 on success, -errno on failure.      3852  * Return: 0 on success, -errno on failure.  Even on failure, all fields
4732  * inside @pool proper are initialized and pu    3853  * inside @pool proper are initialized and put_unbound_pool() can be called
4733  * on @pool safely to release it.                3854  * on @pool safely to release it.
4734  */                                              3855  */
4735 static int init_worker_pool(struct worker_poo    3856 static int init_worker_pool(struct worker_pool *pool)
4736 {                                                3857 {
4737         raw_spin_lock_init(&pool->lock);         3858         raw_spin_lock_init(&pool->lock);
4738         pool->id = -1;                           3859         pool->id = -1;
4739         pool->cpu = -1;                          3860         pool->cpu = -1;
4740         pool->node = NUMA_NO_NODE;               3861         pool->node = NUMA_NO_NODE;
4741         pool->flags |= POOL_DISASSOCIATED;       3862         pool->flags |= POOL_DISASSOCIATED;
4742         pool->watchdog_ts = jiffies;             3863         pool->watchdog_ts = jiffies;
4743         INIT_LIST_HEAD(&pool->worklist);         3864         INIT_LIST_HEAD(&pool->worklist);
4744         INIT_LIST_HEAD(&pool->idle_list);        3865         INIT_LIST_HEAD(&pool->idle_list);
4745         hash_init(pool->busy_hash);              3866         hash_init(pool->busy_hash);
4746                                                  3867 
4747         timer_setup(&pool->idle_timer, idle_w    3868         timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
4748         INIT_WORK(&pool->idle_cull_work, idle    3869         INIT_WORK(&pool->idle_cull_work, idle_cull_fn);
4749                                                  3870 
4750         timer_setup(&pool->mayday_timer, pool    3871         timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
4751                                                  3872 
4752         INIT_LIST_HEAD(&pool->workers);          3873         INIT_LIST_HEAD(&pool->workers);
                                                   >> 3874         INIT_LIST_HEAD(&pool->dying_workers);
4753                                                  3875 
4754         ida_init(&pool->worker_ida);             3876         ida_init(&pool->worker_ida);
4755         INIT_HLIST_NODE(&pool->hash_node);       3877         INIT_HLIST_NODE(&pool->hash_node);
4756         pool->refcnt = 1;                        3878         pool->refcnt = 1;
4757                                                  3879 
4758         /* shouldn't fail above this point */    3880         /* shouldn't fail above this point */
4759         pool->attrs = alloc_workqueue_attrs()    3881         pool->attrs = alloc_workqueue_attrs();
4760         if (!pool->attrs)                        3882         if (!pool->attrs)
4761                 return -ENOMEM;                  3883                 return -ENOMEM;
4762                                                  3884 
4763         wqattrs_clear_for_pool(pool->attrs);     3885         wqattrs_clear_for_pool(pool->attrs);
4764                                                  3886 
4765         return 0;                                3887         return 0;
4766 }                                                3888 }
4767                                                  3889 
4768 #ifdef CONFIG_LOCKDEP                            3890 #ifdef CONFIG_LOCKDEP
4769 static void wq_init_lockdep(struct workqueue_    3891 static void wq_init_lockdep(struct workqueue_struct *wq)
4770 {                                                3892 {
4771         char *lock_name;                         3893         char *lock_name;
4772                                                  3894 
4773         lockdep_register_key(&wq->key);          3895         lockdep_register_key(&wq->key);
4774         lock_name = kasprintf(GFP_KERNEL, "%s    3896         lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
4775         if (!lock_name)                          3897         if (!lock_name)
4776                 lock_name = wq->name;            3898                 lock_name = wq->name;
4777                                                  3899 
4778         wq->lock_name = lock_name;               3900         wq->lock_name = lock_name;
4779         lockdep_init_map(&wq->lockdep_map, lo    3901         lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
4780 }                                                3902 }
4781                                                  3903 
4782 static void wq_unregister_lockdep(struct work    3904 static void wq_unregister_lockdep(struct workqueue_struct *wq)
4783 {                                                3905 {
4784         lockdep_unregister_key(&wq->key);        3906         lockdep_unregister_key(&wq->key);
4785 }                                                3907 }
4786                                                  3908 
4787 static void wq_free_lockdep(struct workqueue_    3909 static void wq_free_lockdep(struct workqueue_struct *wq)
4788 {                                                3910 {
4789         if (wq->lock_name != wq->name)           3911         if (wq->lock_name != wq->name)
4790                 kfree(wq->lock_name);            3912                 kfree(wq->lock_name);
4791 }                                                3913 }
4792 #else                                            3914 #else
4793 static void wq_init_lockdep(struct workqueue_    3915 static void wq_init_lockdep(struct workqueue_struct *wq)
4794 {                                                3916 {
4795 }                                                3917 }
4796                                                  3918 
4797 static void wq_unregister_lockdep(struct work    3919 static void wq_unregister_lockdep(struct workqueue_struct *wq)
4798 {                                                3920 {
4799 }                                                3921 }
4800                                                  3922 
4801 static void wq_free_lockdep(struct workqueue_    3923 static void wq_free_lockdep(struct workqueue_struct *wq)
4802 {                                                3924 {
4803 }                                                3925 }
4804 #endif                                           3926 #endif
4805                                                  3927 
4806 static void free_node_nr_active(struct wq_nod << 
4807 {                                             << 
4808         int node;                             << 
4809                                               << 
4810         for_each_node(node) {                 << 
4811                 kfree(nna_ar[node]);          << 
4812                 nna_ar[node] = NULL;          << 
4813         }                                     << 
4814                                               << 
4815         kfree(nna_ar[nr_node_ids]);           << 
4816         nna_ar[nr_node_ids] = NULL;           << 
4817 }                                             << 
4818                                               << 
4819 static void init_node_nr_active(struct wq_nod << 
4820 {                                             << 
4821         nna->max = WQ_DFL_MIN_ACTIVE;         << 
4822         atomic_set(&nna->nr, 0);              << 
4823         raw_spin_lock_init(&nna->lock);       << 
4824         INIT_LIST_HEAD(&nna->pending_pwqs);   << 
4825 }                                             << 
4826                                               << 
4827 /*                                            << 
4828  * Each node's nr_active counter will be acce << 
4829  * should be allocated in the node.           << 
4830  */                                           << 
4831 static int alloc_node_nr_active(struct wq_nod << 
4832 {                                             << 
4833         struct wq_node_nr_active *nna;        << 
4834         int node;                             << 
4835                                               << 
4836         for_each_node(node) {                 << 
4837                 nna = kzalloc_node(sizeof(*nn << 
4838                 if (!nna)                     << 
4839                         goto err_free;        << 
4840                 init_node_nr_active(nna);     << 
4841                 nna_ar[node] = nna;           << 
4842         }                                     << 
4843                                               << 
4844         /* [nr_node_ids] is used as the fallb << 
4845         nna = kzalloc_node(sizeof(*nna), GFP_ << 
4846         if (!nna)                             << 
4847                 goto err_free;                << 
4848         init_node_nr_active(nna);             << 
4849         nna_ar[nr_node_ids] = nna;            << 
4850                                               << 
4851         return 0;                             << 
4852                                               << 
4853 err_free:                                     << 
4854         free_node_nr_active(nna_ar);          << 
4855         return -ENOMEM;                       << 
4856 }                                             << 
4857                                               << 
4858 static void rcu_free_wq(struct rcu_head *rcu)    3928 static void rcu_free_wq(struct rcu_head *rcu)
4859 {                                                3929 {
4860         struct workqueue_struct *wq =            3930         struct workqueue_struct *wq =
4861                 container_of(rcu, struct work    3931                 container_of(rcu, struct workqueue_struct, rcu);
4862                                                  3932 
4863         if (wq->flags & WQ_UNBOUND)           << 
4864                 free_node_nr_active(wq->node_ << 
4865                                               << 
4866         wq_free_lockdep(wq);                     3933         wq_free_lockdep(wq);
4867         free_percpu(wq->cpu_pwq);                3934         free_percpu(wq->cpu_pwq);
4868         free_workqueue_attrs(wq->unbound_attr    3935         free_workqueue_attrs(wq->unbound_attrs);
4869         kfree(wq);                               3936         kfree(wq);
4870 }                                                3937 }
4871                                                  3938 
4872 static void rcu_free_pool(struct rcu_head *rc    3939 static void rcu_free_pool(struct rcu_head *rcu)
4873 {                                                3940 {
4874         struct worker_pool *pool = container_    3941         struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
4875                                                  3942 
4876         ida_destroy(&pool->worker_ida);          3943         ida_destroy(&pool->worker_ida);
4877         free_workqueue_attrs(pool->attrs);       3944         free_workqueue_attrs(pool->attrs);
4878         kfree(pool);                             3945         kfree(pool);
4879 }                                                3946 }
4880                                                  3947 
4881 /**                                              3948 /**
4882  * put_unbound_pool - put a worker_pool          3949  * put_unbound_pool - put a worker_pool
4883  * @pool: worker_pool to put                     3950  * @pool: worker_pool to put
4884  *                                               3951  *
4885  * Put @pool.  If its refcnt reaches zero, it    3952  * Put @pool.  If its refcnt reaches zero, it gets destroyed in RCU
4886  * safe manner.  get_unbound_pool() calls thi    3953  * safe manner.  get_unbound_pool() calls this function on its failure path
4887  * and this function should be able to releas    3954  * and this function should be able to release pools which went through,
4888  * successfully or not, init_worker_pool().      3955  * successfully or not, init_worker_pool().
4889  *                                               3956  *
4890  * Should be called with wq_pool_mutex held.     3957  * Should be called with wq_pool_mutex held.
4891  */                                              3958  */
4892 static void put_unbound_pool(struct worker_po    3959 static void put_unbound_pool(struct worker_pool *pool)
4893 {                                                3960 {
                                                   >> 3961         DECLARE_COMPLETION_ONSTACK(detach_completion);
4894         struct worker *worker;                   3962         struct worker *worker;
4895         LIST_HEAD(cull_list);                    3963         LIST_HEAD(cull_list);
4896                                                  3964 
4897         lockdep_assert_held(&wq_pool_mutex);     3965         lockdep_assert_held(&wq_pool_mutex);
4898                                                  3966 
4899         if (--pool->refcnt)                      3967         if (--pool->refcnt)
4900                 return;                          3968                 return;
4901                                                  3969 
4902         /* sanity checks */                      3970         /* sanity checks */
4903         if (WARN_ON(!(pool->cpu < 0)) ||         3971         if (WARN_ON(!(pool->cpu < 0)) ||
4904             WARN_ON(!list_empty(&pool->workli    3972             WARN_ON(!list_empty(&pool->worklist)))
4905                 return;                          3973                 return;
4906                                                  3974 
4907         /* release id and unhash */              3975         /* release id and unhash */
4908         if (pool->id >= 0)                       3976         if (pool->id >= 0)
4909                 idr_remove(&worker_pool_idr,     3977                 idr_remove(&worker_pool_idr, pool->id);
4910         hash_del(&pool->hash_node);              3978         hash_del(&pool->hash_node);
4911                                                  3979 
4912         /*                                       3980         /*
4913          * Become the manager and destroy all    3981          * Become the manager and destroy all workers.  This prevents
4914          * @pool's workers from blocking on a    3982          * @pool's workers from blocking on attach_mutex.  We're the last
4915          * manager and @pool gets freed with     3983          * manager and @pool gets freed with the flag set.
4916          *                                       3984          *
4917          * Having a concurrent manager is qui    3985          * Having a concurrent manager is quite unlikely to happen as we can
4918          * only get here with                    3986          * only get here with
4919          *   pwq->refcnt == pool->refcnt == 0    3987          *   pwq->refcnt == pool->refcnt == 0
4920          * which implies no work queued to th    3988          * which implies no work queued to the pool, which implies no worker can
4921          * become the manager. However a work    3989          * become the manager. However a worker could have taken the role of
4922          * manager before the refcnts dropped    3990          * manager before the refcnts dropped to 0, since maybe_create_worker()
4923          * drops pool->lock                      3991          * drops pool->lock
4924          */                                      3992          */
4925         while (true) {                           3993         while (true) {
4926                 rcuwait_wait_event(&manager_w    3994                 rcuwait_wait_event(&manager_wait,
4927                                    !(pool->fl    3995                                    !(pool->flags & POOL_MANAGER_ACTIVE),
4928                                    TASK_UNINT    3996                                    TASK_UNINTERRUPTIBLE);
4929                                                  3997 
4930                 mutex_lock(&wq_pool_attach_mu    3998                 mutex_lock(&wq_pool_attach_mutex);
4931                 raw_spin_lock_irq(&pool->lock    3999                 raw_spin_lock_irq(&pool->lock);
4932                 if (!(pool->flags & POOL_MANA    4000                 if (!(pool->flags & POOL_MANAGER_ACTIVE)) {
4933                         pool->flags |= POOL_M    4001                         pool->flags |= POOL_MANAGER_ACTIVE;
4934                         break;                   4002                         break;
4935                 }                                4003                 }
4936                 raw_spin_unlock_irq(&pool->lo    4004                 raw_spin_unlock_irq(&pool->lock);
4937                 mutex_unlock(&wq_pool_attach_    4005                 mutex_unlock(&wq_pool_attach_mutex);
4938         }                                        4006         }
4939                                                  4007 
4940         while ((worker = first_idle_worker(po    4008         while ((worker = first_idle_worker(pool)))
4941                 set_worker_dying(worker, &cul    4009                 set_worker_dying(worker, &cull_list);
4942         WARN_ON(pool->nr_workers || pool->nr_    4010         WARN_ON(pool->nr_workers || pool->nr_idle);
4943         raw_spin_unlock_irq(&pool->lock);        4011         raw_spin_unlock_irq(&pool->lock);
4944                                                  4012 
4945         detach_dying_workers(&cull_list);     !! 4013         wake_dying_workers(&cull_list);
4946                                                  4014 
                                                   >> 4015         if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers))
                                                   >> 4016                 pool->detach_completion = &detach_completion;
4947         mutex_unlock(&wq_pool_attach_mutex);     4017         mutex_unlock(&wq_pool_attach_mutex);
4948                                                  4018 
4949         reap_dying_workers(&cull_list);       !! 4019         if (pool->detach_completion)
                                                   >> 4020                 wait_for_completion(pool->detach_completion);
4950                                                  4021 
4951         /* shut down the timers */               4022         /* shut down the timers */
4952         del_timer_sync(&pool->idle_timer);       4023         del_timer_sync(&pool->idle_timer);
4953         cancel_work_sync(&pool->idle_cull_wor    4024         cancel_work_sync(&pool->idle_cull_work);
4954         del_timer_sync(&pool->mayday_timer);     4025         del_timer_sync(&pool->mayday_timer);
4955                                                  4026 
4956         /* RCU protected to allow dereference    4027         /* RCU protected to allow dereferences from get_work_pool() */
4957         call_rcu(&pool->rcu, rcu_free_pool);     4028         call_rcu(&pool->rcu, rcu_free_pool);
4958 }                                                4029 }
4959                                                  4030 
4960 /**                                              4031 /**
4961  * get_unbound_pool - get a worker_pool with     4032  * get_unbound_pool - get a worker_pool with the specified attributes
4962  * @attrs: the attributes of the worker_pool     4033  * @attrs: the attributes of the worker_pool to get
4963  *                                               4034  *
4964  * Obtain a worker_pool which has the same at    4035  * Obtain a worker_pool which has the same attributes as @attrs, bump the
4965  * reference count and return it.  If there a    4036  * reference count and return it.  If there already is a matching
4966  * worker_pool, it will be used; otherwise, t    4037  * worker_pool, it will be used; otherwise, this function attempts to
4967  * create a new one.                             4038  * create a new one.
4968  *                                               4039  *
4969  * Should be called with wq_pool_mutex held.     4040  * Should be called with wq_pool_mutex held.
4970  *                                               4041  *
4971  * Return: On success, a worker_pool with the    4042  * Return: On success, a worker_pool with the same attributes as @attrs.
4972  * On failure, %NULL.                            4043  * On failure, %NULL.
4973  */                                              4044  */
4974 static struct worker_pool *get_unbound_pool(c    4045 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
4975 {                                                4046 {
4976         struct wq_pod_type *pt = &wq_pod_type    4047         struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA];
4977         u32 hash = wqattrs_hash(attrs);          4048         u32 hash = wqattrs_hash(attrs);
4978         struct worker_pool *pool;                4049         struct worker_pool *pool;
4979         int pod, node = NUMA_NO_NODE;            4050         int pod, node = NUMA_NO_NODE;
4980                                                  4051 
4981         lockdep_assert_held(&wq_pool_mutex);     4052         lockdep_assert_held(&wq_pool_mutex);
4982                                                  4053 
4983         /* do we already have a matching pool    4054         /* do we already have a matching pool? */
4984         hash_for_each_possible(unbound_pool_h    4055         hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
4985                 if (wqattrs_equal(pool->attrs    4056                 if (wqattrs_equal(pool->attrs, attrs)) {
4986                         pool->refcnt++;          4057                         pool->refcnt++;
4987                         return pool;             4058                         return pool;
4988                 }                                4059                 }
4989         }                                        4060         }
4990                                                  4061 
4991         /* If __pod_cpumask is contained insi    4062         /* If __pod_cpumask is contained inside a NUMA pod, that's our node */
4992         for (pod = 0; pod < pt->nr_pods; pod+    4063         for (pod = 0; pod < pt->nr_pods; pod++) {
4993                 if (cpumask_subset(attrs->__p    4064                 if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) {
4994                         node = pt->pod_node[p    4065                         node = pt->pod_node[pod];
4995                         break;                   4066                         break;
4996                 }                                4067                 }
4997         }                                        4068         }
4998                                                  4069 
4999         /* nope, create a new one */             4070         /* nope, create a new one */
5000         pool = kzalloc_node(sizeof(*pool), GF    4071         pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node);
5001         if (!pool || init_worker_pool(pool) <    4072         if (!pool || init_worker_pool(pool) < 0)
5002                 goto fail;                       4073                 goto fail;
5003                                                  4074 
5004         pool->node = node;                       4075         pool->node = node;
5005         copy_workqueue_attrs(pool->attrs, att    4076         copy_workqueue_attrs(pool->attrs, attrs);
5006         wqattrs_clear_for_pool(pool->attrs);     4077         wqattrs_clear_for_pool(pool->attrs);
5007                                                  4078 
5008         if (worker_pool_assign_id(pool) < 0)     4079         if (worker_pool_assign_id(pool) < 0)
5009                 goto fail;                       4080                 goto fail;
5010                                                  4081 
5011         /* create and start the initial worke    4082         /* create and start the initial worker */
5012         if (wq_online && !create_worker(pool)    4083         if (wq_online && !create_worker(pool))
5013                 goto fail;                       4084                 goto fail;
5014                                                  4085 
5015         /* install */                            4086         /* install */
5016         hash_add(unbound_pool_hash, &pool->ha    4087         hash_add(unbound_pool_hash, &pool->hash_node, hash);
5017                                                  4088 
5018         return pool;                             4089         return pool;
5019 fail:                                            4090 fail:
5020         if (pool)                                4091         if (pool)
5021                 put_unbound_pool(pool);          4092                 put_unbound_pool(pool);
5022         return NULL;                             4093         return NULL;
5023 }                                                4094 }
5024                                                  4095 
                                                   >> 4096 static void rcu_free_pwq(struct rcu_head *rcu)
                                                   >> 4097 {
                                                   >> 4098         kmem_cache_free(pwq_cache,
                                                   >> 4099                         container_of(rcu, struct pool_workqueue, rcu));
                                                   >> 4100 }
                                                   >> 4101 
5025 /*                                               4102 /*
5026  * Scheduled on pwq_release_worker by put_pwq    4103  * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero
5027  * refcnt and needs to be destroyed.             4104  * refcnt and needs to be destroyed.
5028  */                                              4105  */
5029 static void pwq_release_workfn(struct kthread    4106 static void pwq_release_workfn(struct kthread_work *work)
5030 {                                                4107 {
5031         struct pool_workqueue *pwq = containe    4108         struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
5032                                                  4109                                                   release_work);
5033         struct workqueue_struct *wq = pwq->wq    4110         struct workqueue_struct *wq = pwq->wq;
5034         struct worker_pool *pool = pwq->pool;    4111         struct worker_pool *pool = pwq->pool;
5035         bool is_last = false;                    4112         bool is_last = false;
5036                                                  4113 
5037         /*                                       4114         /*
5038          * When @pwq is not linked, it doesn'    4115          * When @pwq is not linked, it doesn't hold any reference to the
5039          * @wq, and @wq is invalid to access.    4116          * @wq, and @wq is invalid to access.
5040          */                                      4117          */
5041         if (!list_empty(&pwq->pwqs_node)) {      4118         if (!list_empty(&pwq->pwqs_node)) {
5042                 mutex_lock(&wq->mutex);          4119                 mutex_lock(&wq->mutex);
5043                 list_del_rcu(&pwq->pwqs_node)    4120                 list_del_rcu(&pwq->pwqs_node);
5044                 is_last = list_empty(&wq->pwq    4121                 is_last = list_empty(&wq->pwqs);
5045                                               << 
5046                 /*                            << 
5047                  * For ordered workqueue with << 
5048                  */                           << 
5049                 if (!is_last && (wq->flags &  << 
5050                         unplug_oldest_pwq(wq) << 
5051                                               << 
5052                 mutex_unlock(&wq->mutex);        4122                 mutex_unlock(&wq->mutex);
5053         }                                        4123         }
5054                                                  4124 
5055         if (wq->flags & WQ_UNBOUND) {            4125         if (wq->flags & WQ_UNBOUND) {
5056                 mutex_lock(&wq_pool_mutex);      4126                 mutex_lock(&wq_pool_mutex);
5057                 put_unbound_pool(pool);          4127                 put_unbound_pool(pool);
5058                 mutex_unlock(&wq_pool_mutex);    4128                 mutex_unlock(&wq_pool_mutex);
5059         }                                        4129         }
5060                                                  4130 
5061         if (!list_empty(&pwq->pending_node))  !! 4131         call_rcu(&pwq->rcu, rcu_free_pwq);
5062                 struct wq_node_nr_active *nna << 
5063                         wq_node_nr_active(pwq << 
5064                                               << 
5065                 raw_spin_lock_irq(&nna->lock) << 
5066                 list_del_init(&pwq->pending_n << 
5067                 raw_spin_unlock_irq(&nna->loc << 
5068         }                                     << 
5069                                               << 
5070         kfree_rcu(pwq, rcu);                  << 
5071                                                  4132 
5072         /*                                       4133         /*
5073          * If we're the last pwq going away,     4134          * If we're the last pwq going away, @wq is already dead and no one
5074          * is gonna access it anymore.  Sched    4135          * is gonna access it anymore.  Schedule RCU free.
5075          */                                      4136          */
5076         if (is_last) {                           4137         if (is_last) {
5077                 wq_unregister_lockdep(wq);       4138                 wq_unregister_lockdep(wq);
5078                 call_rcu(&wq->rcu, rcu_free_w    4139                 call_rcu(&wq->rcu, rcu_free_wq);
5079         }                                        4140         }
5080 }                                                4141 }
5081                                                  4142 
                                                   >> 4143 /**
                                                   >> 4144  * pwq_adjust_max_active - update a pwq's max_active to the current setting
                                                   >> 4145  * @pwq: target pool_workqueue
                                                   >> 4146  *
                                                   >> 4147  * If @pwq isn't freezing, set @pwq->max_active to the associated
                                                   >> 4148  * workqueue's saved_max_active and activate inactive work items
                                                   >> 4149  * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
                                                   >> 4150  */
                                                   >> 4151 static void pwq_adjust_max_active(struct pool_workqueue *pwq)
                                                   >> 4152 {
                                                   >> 4153         struct workqueue_struct *wq = pwq->wq;
                                                   >> 4154         bool freezable = wq->flags & WQ_FREEZABLE;
                                                   >> 4155         unsigned long flags;
                                                   >> 4156 
                                                   >> 4157         /* for @wq->saved_max_active */
                                                   >> 4158         lockdep_assert_held(&wq->mutex);
                                                   >> 4159 
                                                   >> 4160         /* fast exit for non-freezable wqs */
                                                   >> 4161         if (!freezable && pwq->max_active == wq->saved_max_active)
                                                   >> 4162                 return;
                                                   >> 4163 
                                                   >> 4164         /* this function can be called during early boot w/ irq disabled */
                                                   >> 4165         raw_spin_lock_irqsave(&pwq->pool->lock, flags);
                                                   >> 4166 
                                                   >> 4167         /*
                                                   >> 4168          * During [un]freezing, the caller is responsible for ensuring that
                                                   >> 4169          * this function is called at least once after @workqueue_freezing
                                                   >> 4170          * is updated and visible.
                                                   >> 4171          */
                                                   >> 4172         if (!freezable || !workqueue_freezing) {
                                                   >> 4173                 pwq->max_active = wq->saved_max_active;
                                                   >> 4174 
                                                   >> 4175                 while (!list_empty(&pwq->inactive_works) &&
                                                   >> 4176                        pwq->nr_active < pwq->max_active)
                                                   >> 4177                         pwq_activate_first_inactive(pwq);
                                                   >> 4178 
                                                   >> 4179                 kick_pool(pwq->pool);
                                                   >> 4180         } else {
                                                   >> 4181                 pwq->max_active = 0;
                                                   >> 4182         }
                                                   >> 4183 
                                                   >> 4184         raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
                                                   >> 4185 }
                                                   >> 4186 
5082 /* initialize newly allocated @pwq which is a    4187 /* initialize newly allocated @pwq which is associated with @wq and @pool */
5083 static void init_pwq(struct pool_workqueue *p    4188 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
5084                      struct worker_pool *pool    4189                      struct worker_pool *pool)
5085 {                                                4190 {
5086         BUG_ON((unsigned long)pwq & ~WORK_STR !! 4191         BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
5087                                                  4192 
5088         memset(pwq, 0, sizeof(*pwq));            4193         memset(pwq, 0, sizeof(*pwq));
5089                                                  4194 
5090         pwq->pool = pool;                        4195         pwq->pool = pool;
5091         pwq->wq = wq;                            4196         pwq->wq = wq;
5092         pwq->flush_color = -1;                   4197         pwq->flush_color = -1;
5093         pwq->refcnt = 1;                         4198         pwq->refcnt = 1;
5094         INIT_LIST_HEAD(&pwq->inactive_works);    4199         INIT_LIST_HEAD(&pwq->inactive_works);
5095         INIT_LIST_HEAD(&pwq->pending_node);   << 
5096         INIT_LIST_HEAD(&pwq->pwqs_node);         4200         INIT_LIST_HEAD(&pwq->pwqs_node);
5097         INIT_LIST_HEAD(&pwq->mayday_node);       4201         INIT_LIST_HEAD(&pwq->mayday_node);
5098         kthread_init_work(&pwq->release_work,    4202         kthread_init_work(&pwq->release_work, pwq_release_workfn);
5099 }                                                4203 }
5100                                                  4204 
5101 /* sync @pwq with the current state of its as    4205 /* sync @pwq with the current state of its associated wq and link it */
5102 static void link_pwq(struct pool_workqueue *p    4206 static void link_pwq(struct pool_workqueue *pwq)
5103 {                                                4207 {
5104         struct workqueue_struct *wq = pwq->wq    4208         struct workqueue_struct *wq = pwq->wq;
5105                                                  4209 
5106         lockdep_assert_held(&wq->mutex);         4210         lockdep_assert_held(&wq->mutex);
5107                                                  4211 
5108         /* may be called multiple times, igno    4212         /* may be called multiple times, ignore if already linked */
5109         if (!list_empty(&pwq->pwqs_node))        4213         if (!list_empty(&pwq->pwqs_node))
5110                 return;                          4214                 return;
5111                                                  4215 
5112         /* set the matching work_color */        4216         /* set the matching work_color */
5113         pwq->work_color = wq->work_color;        4217         pwq->work_color = wq->work_color;
5114                                                  4218 
                                                   >> 4219         /* sync max_active to the current setting */
                                                   >> 4220         pwq_adjust_max_active(pwq);
                                                   >> 4221 
5115         /* link in @pwq */                       4222         /* link in @pwq */
5116         list_add_tail_rcu(&pwq->pwqs_node, &w !! 4223         list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
5117 }                                                4224 }
5118                                                  4225 
5119 /* obtain a pool matching @attr and create a     4226 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
5120 static struct pool_workqueue *alloc_unbound_p    4227 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
5121                                         const    4228                                         const struct workqueue_attrs *attrs)
5122 {                                                4229 {
5123         struct worker_pool *pool;                4230         struct worker_pool *pool;
5124         struct pool_workqueue *pwq;              4231         struct pool_workqueue *pwq;
5125                                                  4232 
5126         lockdep_assert_held(&wq_pool_mutex);     4233         lockdep_assert_held(&wq_pool_mutex);
5127                                                  4234 
5128         pool = get_unbound_pool(attrs);          4235         pool = get_unbound_pool(attrs);
5129         if (!pool)                               4236         if (!pool)
5130                 return NULL;                     4237                 return NULL;
5131                                                  4238 
5132         pwq = kmem_cache_alloc_node(pwq_cache    4239         pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
5133         if (!pwq) {                              4240         if (!pwq) {
5134                 put_unbound_pool(pool);          4241                 put_unbound_pool(pool);
5135                 return NULL;                     4242                 return NULL;
5136         }                                        4243         }
5137                                                  4244 
5138         init_pwq(pwq, wq, pool);                 4245         init_pwq(pwq, wq, pool);
5139         return pwq;                              4246         return pwq;
5140 }                                                4247 }
5141                                                  4248 
5142 static void apply_wqattrs_lock(void)          << 
5143 {                                             << 
5144         mutex_lock(&wq_pool_mutex);           << 
5145 }                                             << 
5146                                               << 
5147 static void apply_wqattrs_unlock(void)        << 
5148 {                                             << 
5149         mutex_unlock(&wq_pool_mutex);         << 
5150 }                                             << 
5151                                               << 
5152 /**                                              4249 /**
5153  * wq_calc_pod_cpumask - calculate a wq_attrs    4250  * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
5154  * @attrs: the wq_attrs of the default pwq of    4251  * @attrs: the wq_attrs of the default pwq of the target workqueue
5155  * @cpu: the target CPU                          4252  * @cpu: the target CPU
                                                   >> 4253  * @cpu_going_down: if >= 0, the CPU to consider as offline
5156  *                                               4254  *
5157  * Calculate the cpumask a workqueue with @at !! 4255  * Calculate the cpumask a workqueue with @attrs should use on @pod. If
                                                   >> 4256  * @cpu_going_down is >= 0, that cpu is considered offline during calculation.
5158  * The result is stored in @attrs->__pod_cpum    4257  * The result is stored in @attrs->__pod_cpumask.
5159  *                                               4258  *
5160  * If pod affinity is not enabled, @attrs->cp    4259  * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled
5161  * and @pod has online CPUs requested by @att    4260  * and @pod has online CPUs requested by @attrs, the returned cpumask is the
5162  * intersection of the possible CPUs of @pod     4261  * intersection of the possible CPUs of @pod and @attrs->cpumask.
5163  *                                               4262  *
5164  * The caller is responsible for ensuring tha    4263  * The caller is responsible for ensuring that the cpumask of @pod stays stable.
5165  */                                              4264  */
5166 static void wq_calc_pod_cpumask(struct workqu !! 4265 static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu,
                                                   >> 4266                                 int cpu_going_down)
5167 {                                                4267 {
5168         const struct wq_pod_type *pt = wqattr    4268         const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
5169         int pod = pt->cpu_pod[cpu];              4269         int pod = pt->cpu_pod[cpu];
5170                                                  4270 
5171         /* calculate possible CPUs in @pod th << 
5172         cpumask_and(attrs->__pod_cpumask, pt- << 
5173         /* does @pod have any online CPUs @at    4271         /* does @pod have any online CPUs @attrs wants? */
5174         if (!cpumask_intersects(attrs->__pod_ !! 4272         cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask);
                                                   >> 4273         cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask);
                                                   >> 4274         if (cpu_going_down >= 0)
                                                   >> 4275                 cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask);
                                                   >> 4276 
                                                   >> 4277         if (cpumask_empty(attrs->__pod_cpumask)) {
5175                 cpumask_copy(attrs->__pod_cpu    4278                 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask);
5176                 return;                          4279                 return;
5177         }                                        4280         }
                                                   >> 4281 
                                                   >> 4282         /* yeap, return possible CPUs in @pod that @attrs wants */
                                                   >> 4283         cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]);
                                                   >> 4284 
                                                   >> 4285         if (cpumask_empty(attrs->__pod_cpumask))
                                                   >> 4286                 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
                                                   >> 4287                                 "possible intersect\n");
5178 }                                                4288 }
5179                                                  4289 
5180 /* install @pwq into @wq and return the old p !! 4290 /* install @pwq into @wq's cpu_pwq and return the old pwq */
5181 static struct pool_workqueue *install_unbound    4291 static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
5182                                         int c    4292                                         int cpu, struct pool_workqueue *pwq)
5183 {                                                4293 {
5184         struct pool_workqueue __rcu **slot =  << 
5185         struct pool_workqueue *old_pwq;          4294         struct pool_workqueue *old_pwq;
5186                                                  4295 
5187         lockdep_assert_held(&wq_pool_mutex);     4296         lockdep_assert_held(&wq_pool_mutex);
5188         lockdep_assert_held(&wq->mutex);         4297         lockdep_assert_held(&wq->mutex);
5189                                                  4298 
5190         /* link_pwq() can handle duplicate ca    4299         /* link_pwq() can handle duplicate calls */
5191         link_pwq(pwq);                           4300         link_pwq(pwq);
5192                                                  4301 
5193         old_pwq = rcu_access_pointer(*slot);  !! 4302         old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
5194         rcu_assign_pointer(*slot, pwq);       !! 4303         rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq);
5195         return old_pwq;                          4304         return old_pwq;
5196 }                                                4305 }
5197                                                  4306 
5198 /* context to store the prepared attrs & pwqs    4307 /* context to store the prepared attrs & pwqs before applying */
5199 struct apply_wqattrs_ctx {                       4308 struct apply_wqattrs_ctx {
5200         struct workqueue_struct *wq;             4309         struct workqueue_struct *wq;            /* target workqueue */
5201         struct workqueue_attrs  *attrs;          4310         struct workqueue_attrs  *attrs;         /* attrs to apply */
5202         struct list_head        list;            4311         struct list_head        list;           /* queued for batching commit */
5203         struct pool_workqueue   *dfl_pwq;        4312         struct pool_workqueue   *dfl_pwq;
5204         struct pool_workqueue   *pwq_tbl[];      4313         struct pool_workqueue   *pwq_tbl[];
5205 };                                               4314 };
5206                                                  4315 
5207 /* free the resources after success or abort     4316 /* free the resources after success or abort */
5208 static void apply_wqattrs_cleanup(struct appl    4317 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
5209 {                                                4318 {
5210         if (ctx) {                               4319         if (ctx) {
5211                 int cpu;                         4320                 int cpu;
5212                                                  4321 
5213                 for_each_possible_cpu(cpu)       4322                 for_each_possible_cpu(cpu)
5214                         put_pwq_unlocked(ctx-    4323                         put_pwq_unlocked(ctx->pwq_tbl[cpu]);
5215                 put_pwq_unlocked(ctx->dfl_pwq    4324                 put_pwq_unlocked(ctx->dfl_pwq);
5216                                                  4325 
5217                 free_workqueue_attrs(ctx->att    4326                 free_workqueue_attrs(ctx->attrs);
5218                                                  4327 
5219                 kfree(ctx);                      4328                 kfree(ctx);
5220         }                                        4329         }
5221 }                                                4330 }
5222                                                  4331 
5223 /* allocate the attrs and pwqs for later inst    4332 /* allocate the attrs and pwqs for later installation */
5224 static struct apply_wqattrs_ctx *                4333 static struct apply_wqattrs_ctx *
5225 apply_wqattrs_prepare(struct workqueue_struct    4334 apply_wqattrs_prepare(struct workqueue_struct *wq,
5226                       const struct workqueue_    4335                       const struct workqueue_attrs *attrs,
5227                       const cpumask_var_t unb    4336                       const cpumask_var_t unbound_cpumask)
5228 {                                                4337 {
5229         struct apply_wqattrs_ctx *ctx;           4338         struct apply_wqattrs_ctx *ctx;
5230         struct workqueue_attrs *new_attrs;       4339         struct workqueue_attrs *new_attrs;
5231         int cpu;                                 4340         int cpu;
5232                                                  4341 
5233         lockdep_assert_held(&wq_pool_mutex);     4342         lockdep_assert_held(&wq_pool_mutex);
5234                                                  4343 
5235         if (WARN_ON(attrs->affn_scope < 0 ||     4344         if (WARN_ON(attrs->affn_scope < 0 ||
5236                     attrs->affn_scope >= WQ_A    4345                     attrs->affn_scope >= WQ_AFFN_NR_TYPES))
5237                 return ERR_PTR(-EINVAL);         4346                 return ERR_PTR(-EINVAL);
5238                                                  4347 
5239         ctx = kzalloc(struct_size(ctx, pwq_tb    4348         ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL);
5240                                                  4349 
5241         new_attrs = alloc_workqueue_attrs();     4350         new_attrs = alloc_workqueue_attrs();
5242         if (!ctx || !new_attrs)                  4351         if (!ctx || !new_attrs)
5243                 goto out_free;                   4352                 goto out_free;
5244                                                  4353 
5245         /*                                       4354         /*
5246          * If something goes wrong during CPU    4355          * If something goes wrong during CPU up/down, we'll fall back to
5247          * the default pwq covering whole @at    4356          * the default pwq covering whole @attrs->cpumask.  Always create
5248          * it even if we don't use it immedia    4357          * it even if we don't use it immediately.
5249          */                                      4358          */
5250         copy_workqueue_attrs(new_attrs, attrs    4359         copy_workqueue_attrs(new_attrs, attrs);
5251         wqattrs_actualize_cpumask(new_attrs,     4360         wqattrs_actualize_cpumask(new_attrs, unbound_cpumask);
5252         cpumask_copy(new_attrs->__pod_cpumask    4361         cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
5253         ctx->dfl_pwq = alloc_unbound_pwq(wq,     4362         ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
5254         if (!ctx->dfl_pwq)                       4363         if (!ctx->dfl_pwq)
5255                 goto out_free;                   4364                 goto out_free;
5256                                                  4365 
5257         for_each_possible_cpu(cpu) {             4366         for_each_possible_cpu(cpu) {
5258                 if (new_attrs->ordered) {        4367                 if (new_attrs->ordered) {
5259                         ctx->dfl_pwq->refcnt+    4368                         ctx->dfl_pwq->refcnt++;
5260                         ctx->pwq_tbl[cpu] = c    4369                         ctx->pwq_tbl[cpu] = ctx->dfl_pwq;
5261                 } else {                         4370                 } else {
5262                         wq_calc_pod_cpumask(n !! 4371                         wq_calc_pod_cpumask(new_attrs, cpu, -1);
5263                         ctx->pwq_tbl[cpu] = a    4372                         ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs);
5264                         if (!ctx->pwq_tbl[cpu    4373                         if (!ctx->pwq_tbl[cpu])
5265                                 goto out_free    4374                                 goto out_free;
5266                 }                                4375                 }
5267         }                                        4376         }
5268                                                  4377 
5269         /* save the user configured attrs and    4378         /* save the user configured attrs and sanitize it. */
5270         copy_workqueue_attrs(new_attrs, attrs    4379         copy_workqueue_attrs(new_attrs, attrs);
5271         cpumask_and(new_attrs->cpumask, new_a    4380         cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
5272         cpumask_copy(new_attrs->__pod_cpumask    4381         cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
5273         ctx->attrs = new_attrs;                  4382         ctx->attrs = new_attrs;
5274                                                  4383 
5275         /*                                    << 
5276          * For initialized ordered workqueues << 
5277          * (dfl_pwq). Set the plugged flag of << 
5278          * of newly queued work items until e << 
5279          * the old pwq's have completed.      << 
5280          */                                   << 
5281         if ((wq->flags & __WQ_ORDERED) && !li << 
5282                 ctx->dfl_pwq->plugged = true; << 
5283                                               << 
5284         ctx->wq = wq;                            4384         ctx->wq = wq;
5285         return ctx;                              4385         return ctx;
5286                                                  4386 
5287 out_free:                                        4387 out_free:
5288         free_workqueue_attrs(new_attrs);         4388         free_workqueue_attrs(new_attrs);
5289         apply_wqattrs_cleanup(ctx);              4389         apply_wqattrs_cleanup(ctx);
5290         return ERR_PTR(-ENOMEM);                 4390         return ERR_PTR(-ENOMEM);
5291 }                                                4391 }
5292                                                  4392 
5293 /* set attrs and install prepared pwqs, @ctx     4393 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
5294 static void apply_wqattrs_commit(struct apply    4394 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
5295 {                                                4395 {
5296         int cpu;                                 4396         int cpu;
5297                                                  4397 
5298         /* all pwqs have been created success    4398         /* all pwqs have been created successfully, let's install'em */
5299         mutex_lock(&ctx->wq->mutex);             4399         mutex_lock(&ctx->wq->mutex);
5300                                                  4400 
5301         copy_workqueue_attrs(ctx->wq->unbound    4401         copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
5302                                                  4402 
5303         /* save the previous pwqs and install !! 4403         /* save the previous pwq and install the new one */
5304         for_each_possible_cpu(cpu)               4404         for_each_possible_cpu(cpu)
5305                 ctx->pwq_tbl[cpu] = install_u    4405                 ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu,
5306                                                  4406                                                         ctx->pwq_tbl[cpu]);
5307         ctx->dfl_pwq = install_unbound_pwq(ct << 
5308                                                  4407 
5309         /* update node_nr_active->max */      !! 4408         /* @dfl_pwq might not have been used, ensure it's linked */
5310         wq_update_node_max_active(ctx->wq, -1 !! 4409         link_pwq(ctx->dfl_pwq);
5311                                               !! 4410         swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
5312         /* rescuer needs to respect wq cpumas << 
5313         if (ctx->wq->rescuer)                 << 
5314                 set_cpus_allowed_ptr(ctx->wq- << 
5315                                      unbound_ << 
5316                                                  4411 
5317         mutex_unlock(&ctx->wq->mutex);           4412         mutex_unlock(&ctx->wq->mutex);
5318 }                                                4413 }
5319                                                  4414 
                                                   >> 4415 static void apply_wqattrs_lock(void)
                                                   >> 4416 {
                                                   >> 4417         /* CPUs should stay stable across pwq creations and installations */
                                                   >> 4418         cpus_read_lock();
                                                   >> 4419         mutex_lock(&wq_pool_mutex);
                                                   >> 4420 }
                                                   >> 4421 
                                                   >> 4422 static void apply_wqattrs_unlock(void)
                                                   >> 4423 {
                                                   >> 4424         mutex_unlock(&wq_pool_mutex);
                                                   >> 4425         cpus_read_unlock();
                                                   >> 4426 }
                                                   >> 4427 
5320 static int apply_workqueue_attrs_locked(struc    4428 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
5321                                         const    4429                                         const struct workqueue_attrs *attrs)
5322 {                                                4430 {
5323         struct apply_wqattrs_ctx *ctx;           4431         struct apply_wqattrs_ctx *ctx;
5324                                                  4432 
5325         /* only unbound workqueues can change    4433         /* only unbound workqueues can change attributes */
5326         if (WARN_ON(!(wq->flags & WQ_UNBOUND)    4434         if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
5327                 return -EINVAL;                  4435                 return -EINVAL;
5328                                                  4436 
                                                   >> 4437         /* creating multiple pwqs breaks ordering guarantee */
                                                   >> 4438         if (!list_empty(&wq->pwqs)) {
                                                   >> 4439                 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
                                                   >> 4440                         return -EINVAL;
                                                   >> 4441 
                                                   >> 4442                 wq->flags &= ~__WQ_ORDERED;
                                                   >> 4443         }
                                                   >> 4444 
5329         ctx = apply_wqattrs_prepare(wq, attrs    4445         ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
5330         if (IS_ERR(ctx))                         4446         if (IS_ERR(ctx))
5331                 return PTR_ERR(ctx);             4447                 return PTR_ERR(ctx);
5332                                                  4448 
5333         /* the ctx has been prepared successf    4449         /* the ctx has been prepared successfully, let's commit it */
5334         apply_wqattrs_commit(ctx);               4450         apply_wqattrs_commit(ctx);
5335         apply_wqattrs_cleanup(ctx);              4451         apply_wqattrs_cleanup(ctx);
5336                                                  4452 
5337         return 0;                                4453         return 0;
5338 }                                                4454 }
5339                                                  4455 
5340 /**                                              4456 /**
5341  * apply_workqueue_attrs - apply new workqueu    4457  * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
5342  * @wq: the target workqueue                     4458  * @wq: the target workqueue
5343  * @attrs: the workqueue_attrs to apply, allo    4459  * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
5344  *                                               4460  *
5345  * Apply @attrs to an unbound workqueue @wq.     4461  * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps
5346  * a separate pwq to each CPU pod with possib    4462  * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that
5347  * work items are affine to the pod it was is    4463  * work items are affine to the pod it was issued on. Older pwqs are released as
5348  * in-flight work items finish. Note that a w    4464  * in-flight work items finish. Note that a work item which repeatedly requeues
5349  * itself back-to-back will stay on its curre    4465  * itself back-to-back will stay on its current pwq.
5350  *                                               4466  *
5351  * Performs GFP_KERNEL allocations.              4467  * Performs GFP_KERNEL allocations.
5352  *                                               4468  *
                                                   >> 4469  * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
                                                   >> 4470  *
5353  * Return: 0 on success and -errno on failure    4471  * Return: 0 on success and -errno on failure.
5354  */                                              4472  */
5355 int apply_workqueue_attrs(struct workqueue_st    4473 int apply_workqueue_attrs(struct workqueue_struct *wq,
5356                           const struct workqu    4474                           const struct workqueue_attrs *attrs)
5357 {                                                4475 {
5358         int ret;                                 4476         int ret;
5359                                                  4477 
                                                   >> 4478         lockdep_assert_cpus_held();
                                                   >> 4479 
5360         mutex_lock(&wq_pool_mutex);              4480         mutex_lock(&wq_pool_mutex);
5361         ret = apply_workqueue_attrs_locked(wq    4481         ret = apply_workqueue_attrs_locked(wq, attrs);
5362         mutex_unlock(&wq_pool_mutex);            4482         mutex_unlock(&wq_pool_mutex);
5363                                                  4483 
5364         return ret;                              4484         return ret;
5365 }                                                4485 }
5366                                                  4486 
5367 /**                                              4487 /**
5368  * unbound_wq_update_pwq - update a pwq slot  !! 4488  * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug
5369  * @wq: the target workqueue                     4489  * @wq: the target workqueue
5370  * @cpu: the CPU to update the pwq slot for   !! 4490  * @cpu: the CPU to update pool association for
                                                   >> 4491  * @hotplug_cpu: the CPU coming up or going down
                                                   >> 4492  * @online: whether @cpu is coming up or going down
5371  *                                               4493  *
5372  * This function is to be called from %CPU_DO    4494  * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
5373  * %CPU_DOWN_FAILED.  @cpu is in the same pod !! 4495  * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update pod affinity of
                                                   >> 4496  * @wq accordingly.
5374  *                                               4497  *
5375  *                                               4498  *
5376  * If pod affinity can't be adjusted due to m    4499  * If pod affinity can't be adjusted due to memory allocation failure, it falls
5377  * back to @wq->dfl_pwq which may not be opti    4500  * back to @wq->dfl_pwq which may not be optimal but is always correct.
5378  *                                               4501  *
5379  * Note that when the last allowed CPU of a p    4502  * Note that when the last allowed CPU of a pod goes offline for a workqueue
5380  * with a cpumask spanning multiple pods, the    4503  * with a cpumask spanning multiple pods, the workers which were already
5381  * executing the work items for the workqueue    4504  * executing the work items for the workqueue will lose their CPU affinity and
5382  * may execute on any CPU. This is similar to    4505  * may execute on any CPU. This is similar to how per-cpu workqueues behave on
5383  * CPU_DOWN. If a workqueue user wants strict    4506  * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
5384  * responsibility to flush the work item from    4507  * responsibility to flush the work item from CPU_DOWN_PREPARE.
5385  */                                              4508  */
5386 static void unbound_wq_update_pwq(struct work !! 4509 static void wq_update_pod(struct workqueue_struct *wq, int cpu,
                                                   >> 4510                           int hotplug_cpu, bool online)
5387 {                                                4511 {
                                                   >> 4512         int off_cpu = online ? -1 : hotplug_cpu;
5388         struct pool_workqueue *old_pwq = NULL    4513         struct pool_workqueue *old_pwq = NULL, *pwq;
5389         struct workqueue_attrs *target_attrs;    4514         struct workqueue_attrs *target_attrs;
5390                                                  4515 
5391         lockdep_assert_held(&wq_pool_mutex);     4516         lockdep_assert_held(&wq_pool_mutex);
5392                                                  4517 
5393         if (!(wq->flags & WQ_UNBOUND) || wq->    4518         if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered)
5394                 return;                          4519                 return;
5395                                                  4520 
5396         /*                                       4521         /*
5397          * We don't wanna alloc/free wq_attrs    4522          * We don't wanna alloc/free wq_attrs for each wq for each CPU.
5398          * Let's use a preallocated one.  The    4523          * Let's use a preallocated one.  The following buf is protected by
5399          * CPU hotplug exclusion.                4524          * CPU hotplug exclusion.
5400          */                                      4525          */
5401         target_attrs = unbound_wq_update_pwq_ !! 4526         target_attrs = wq_update_pod_attrs_buf;
5402                                                  4527 
5403         copy_workqueue_attrs(target_attrs, wq    4528         copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
5404         wqattrs_actualize_cpumask(target_attr    4529         wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
5405                                                  4530 
5406         /* nothing to do if the target cpumas    4531         /* nothing to do if the target cpumask matches the current pwq */
5407         wq_calc_pod_cpumask(target_attrs, cpu !! 4532         wq_calc_pod_cpumask(target_attrs, cpu, off_cpu);
5408         if (wqattrs_equal(target_attrs, unbou !! 4533         pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu),
                                                   >> 4534                                         lockdep_is_held(&wq_pool_mutex));
                                                   >> 4535         if (wqattrs_equal(target_attrs, pwq->pool->attrs))
5409                 return;                          4536                 return;
5410                                                  4537 
5411         /* create a new pwq */                   4538         /* create a new pwq */
5412         pwq = alloc_unbound_pwq(wq, target_at    4539         pwq = alloc_unbound_pwq(wq, target_attrs);
5413         if (!pwq) {                              4540         if (!pwq) {
5414                 pr_warn("workqueue: allocatio    4541                 pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n",
5415                         wq->name);               4542                         wq->name);
5416                 goto use_dfl_pwq;                4543                 goto use_dfl_pwq;
5417         }                                        4544         }
5418                                                  4545 
5419         /* Install the new pwq. */               4546         /* Install the new pwq. */
5420         mutex_lock(&wq->mutex);                  4547         mutex_lock(&wq->mutex);
5421         old_pwq = install_unbound_pwq(wq, cpu    4548         old_pwq = install_unbound_pwq(wq, cpu, pwq);
5422         goto out_unlock;                         4549         goto out_unlock;
5423                                                  4550 
5424 use_dfl_pwq:                                     4551 use_dfl_pwq:
5425         mutex_lock(&wq->mutex);                  4552         mutex_lock(&wq->mutex);
5426         pwq = unbound_pwq(wq, -1);            !! 4553         raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
5427         raw_spin_lock_irq(&pwq->pool->lock);  !! 4554         get_pwq(wq->dfl_pwq);
5428         get_pwq(pwq);                         !! 4555         raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
5429         raw_spin_unlock_irq(&pwq->pool->lock) !! 4556         old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq);
5430         old_pwq = install_unbound_pwq(wq, cpu << 
5431 out_unlock:                                      4557 out_unlock:
5432         mutex_unlock(&wq->mutex);                4558         mutex_unlock(&wq->mutex);
5433         put_pwq_unlocked(old_pwq);               4559         put_pwq_unlocked(old_pwq);
5434 }                                                4560 }
5435                                                  4561 
5436 static int alloc_and_link_pwqs(struct workque    4562 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
5437 {                                                4563 {
5438         bool highpri = wq->flags & WQ_HIGHPRI    4564         bool highpri = wq->flags & WQ_HIGHPRI;
5439         int cpu, ret;                            4565         int cpu, ret;
5440                                                  4566 
5441         lockdep_assert_held(&wq_pool_mutex);  << 
5442                                               << 
5443         wq->cpu_pwq = alloc_percpu(struct poo    4567         wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
5444         if (!wq->cpu_pwq)                        4568         if (!wq->cpu_pwq)
5445                 goto enomem;                     4569                 goto enomem;
5446                                                  4570 
5447         if (!(wq->flags & WQ_UNBOUND)) {         4571         if (!(wq->flags & WQ_UNBOUND)) {
5448                 struct worker_pool __percpu * << 
5449                                               << 
5450                 if (wq->flags & WQ_BH)        << 
5451                         pools = bh_worker_poo << 
5452                 else                          << 
5453                         pools = cpu_worker_po << 
5454                                               << 
5455                 for_each_possible_cpu(cpu) {     4572                 for_each_possible_cpu(cpu) {
5456                         struct pool_workqueue !! 4573                         struct pool_workqueue **pwq_p =
5457                         struct worker_pool *p !! 4574                                 per_cpu_ptr(wq->cpu_pwq, cpu);
5458                                               !! 4575                         struct worker_pool *pool =
5459                         pool = &(per_cpu_ptr( !! 4576                                 &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]);
5460                         pwq_p = per_cpu_ptr(w << 
5461                                                  4577 
5462                         *pwq_p = kmem_cache_a    4578                         *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL,
5463                                                  4579                                                        pool->node);
5464                         if (!*pwq_p)             4580                         if (!*pwq_p)
5465                                 goto enomem;     4581                                 goto enomem;
5466                                                  4582 
5467                         init_pwq(*pwq_p, wq,     4583                         init_pwq(*pwq_p, wq, pool);
5468                                                  4584 
5469                         mutex_lock(&wq->mutex    4585                         mutex_lock(&wq->mutex);
5470                         link_pwq(*pwq_p);        4586                         link_pwq(*pwq_p);
5471                         mutex_unlock(&wq->mut    4587                         mutex_unlock(&wq->mutex);
5472                 }                                4588                 }
5473                 return 0;                        4589                 return 0;
5474         }                                        4590         }
5475                                                  4591 
                                                   >> 4592         cpus_read_lock();
5476         if (wq->flags & __WQ_ORDERED) {          4593         if (wq->flags & __WQ_ORDERED) {
5477                 struct pool_workqueue *dfl_pw !! 4594                 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
5478                                               << 
5479                 ret = apply_workqueue_attrs_l << 
5480                 /* there should only be singl    4595                 /* there should only be single pwq for ordering guarantee */
5481                 dfl_pwq = rcu_access_pointer( !! 4596                 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
5482                 WARN(!ret && (wq->pwqs.next ! !! 4597                               wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
5483                               wq->pwqs.prev ! << 
5484                      "ordering guarantee brok    4598                      "ordering guarantee broken for workqueue %s\n", wq->name);
5485         } else {                                 4599         } else {
5486                 ret = apply_workqueue_attrs_l !! 4600                 ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
5487         }                                        4601         }
                                                   >> 4602         cpus_read_unlock();
                                                   >> 4603 
                                                   >> 4604         /* for unbound pwq, flush the pwq_release_worker ensures that the
                                                   >> 4605          * pwq_release_workfn() completes before calling kfree(wq).
                                                   >> 4606          */
                                                   >> 4607         if (ret)
                                                   >> 4608                 kthread_flush_worker(pwq_release_worker);
5488                                                  4609 
5489         return ret;                              4610         return ret;
5490                                                  4611 
5491 enomem:                                          4612 enomem:
5492         if (wq->cpu_pwq) {                       4613         if (wq->cpu_pwq) {
5493                 for_each_possible_cpu(cpu) {     4614                 for_each_possible_cpu(cpu) {
5494                         struct pool_workqueue    4615                         struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
5495                                                  4616 
5496                         if (pwq)                 4617                         if (pwq)
5497                                 kmem_cache_fr    4618                                 kmem_cache_free(pwq_cache, pwq);
5498                 }                                4619                 }
5499                 free_percpu(wq->cpu_pwq);        4620                 free_percpu(wq->cpu_pwq);
5500                 wq->cpu_pwq = NULL;              4621                 wq->cpu_pwq = NULL;
5501         }                                        4622         }
5502         return -ENOMEM;                          4623         return -ENOMEM;
5503 }                                                4624 }
5504                                                  4625 
5505 static int wq_clamp_max_active(int max_active    4626 static int wq_clamp_max_active(int max_active, unsigned int flags,
5506                                const char *na    4627                                const char *name)
5507 {                                                4628 {
5508         if (max_active < 1 || max_active > WQ    4629         if (max_active < 1 || max_active > WQ_MAX_ACTIVE)
5509                 pr_warn("workqueue: max_activ    4630                 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
5510                         max_active, name, 1,     4631                         max_active, name, 1, WQ_MAX_ACTIVE);
5511                                                  4632 
5512         return clamp_val(max_active, 1, WQ_MA    4633         return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
5513 }                                                4634 }
5514                                                  4635 
5515 /*                                               4636 /*
5516  * Workqueues which may be used during memory    4637  * Workqueues which may be used during memory reclaim should have a rescuer
5517  * to guarantee forward progress.                4638  * to guarantee forward progress.
5518  */                                              4639  */
5519 static int init_rescuer(struct workqueue_stru    4640 static int init_rescuer(struct workqueue_struct *wq)
5520 {                                                4641 {
5521         struct worker *rescuer;                  4642         struct worker *rescuer;
5522         char id_buf[WORKER_ID_LEN];           << 
5523         int ret;                                 4643         int ret;
5524                                                  4644 
5525         lockdep_assert_held(&wq_pool_mutex);  << 
5526                                               << 
5527         if (!(wq->flags & WQ_MEM_RECLAIM))       4645         if (!(wq->flags & WQ_MEM_RECLAIM))
5528                 return 0;                        4646                 return 0;
5529                                                  4647 
5530         rescuer = alloc_worker(NUMA_NO_NODE);    4648         rescuer = alloc_worker(NUMA_NO_NODE);
5531         if (!rescuer) {                          4649         if (!rescuer) {
5532                 pr_err("workqueue: Failed to     4650                 pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n",
5533                        wq->name);                4651                        wq->name);
5534                 return -ENOMEM;                  4652                 return -ENOMEM;
5535         }                                        4653         }
5536                                                  4654 
5537         rescuer->rescue_wq = wq;                 4655         rescuer->rescue_wq = wq;
5538         format_worker_id(id_buf, sizeof(id_bu !! 4656         rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name);
5539                                               << 
5540         rescuer->task = kthread_create(rescue << 
5541         if (IS_ERR(rescuer->task)) {             4657         if (IS_ERR(rescuer->task)) {
5542                 ret = PTR_ERR(rescuer->task);    4658                 ret = PTR_ERR(rescuer->task);
5543                 pr_err("workqueue: Failed to     4659                 pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
5544                        wq->name, ERR_PTR(ret)    4660                        wq->name, ERR_PTR(ret));
5545                 kfree(rescuer);                  4661                 kfree(rescuer);
5546                 return ret;                      4662                 return ret;
5547         }                                        4663         }
5548                                                  4664 
5549         wq->rescuer = rescuer;                   4665         wq->rescuer = rescuer;
5550         if (wq->flags & WQ_UNBOUND)           !! 4666         kthread_bind_mask(rescuer->task, cpu_possible_mask);
5551                 kthread_bind_mask(rescuer->ta << 
5552         else                                  << 
5553                 kthread_bind_mask(rescuer->ta << 
5554         wake_up_process(rescuer->task);          4667         wake_up_process(rescuer->task);
5555                                                  4668 
5556         return 0;                                4669         return 0;
5557 }                                                4670 }
5558                                                  4671 
5559 /**                                           << 
5560  * wq_adjust_max_active - update a wq's max_a << 
5561  * @wq: target workqueue                      << 
5562  *                                            << 
5563  * If @wq isn't freezing, set @wq->max_active << 
5564  * activate inactive work items accordingly.  << 
5565  * @wq->max_active to zero.                   << 
5566  */                                           << 
5567 static void wq_adjust_max_active(struct workq << 
5568 {                                             << 
5569         bool activated;                       << 
5570         int new_max, new_min;                 << 
5571                                               << 
5572         lockdep_assert_held(&wq->mutex);      << 
5573                                               << 
5574         if ((wq->flags & WQ_FREEZABLE) && wor << 
5575                 new_max = 0;                  << 
5576                 new_min = 0;                  << 
5577         } else {                              << 
5578                 new_max = wq->saved_max_activ << 
5579                 new_min = wq->saved_min_activ << 
5580         }                                     << 
5581                                               << 
5582         if (wq->max_active == new_max && wq-> << 
5583                 return;                       << 
5584                                               << 
5585         /*                                    << 
5586          * Update @wq->max/min_active and the << 
5587          * active work items are allowed. Thi << 
5588          * because new work items are always  << 
5589          * work items if there are any.       << 
5590          */                                   << 
5591         WRITE_ONCE(wq->max_active, new_max);  << 
5592         WRITE_ONCE(wq->min_active, new_min);  << 
5593                                               << 
5594         if (wq->flags & WQ_UNBOUND)           << 
5595                 wq_update_node_max_active(wq, << 
5596                                               << 
5597         if (new_max == 0)                     << 
5598                 return;                       << 
5599                                               << 
5600         /*                                    << 
5601          * Round-robin through pwq's activati << 
5602          * until max_active is filled.        << 
5603          */                                   << 
5604         do {                                  << 
5605                 struct pool_workqueue *pwq;   << 
5606                                               << 
5607                 activated = false;            << 
5608                 for_each_pwq(pwq, wq) {       << 
5609                         unsigned long irq_fla << 
5610                                               << 
5611                         /* can be called duri << 
5612                         raw_spin_lock_irqsave << 
5613                         if (pwq_activate_firs << 
5614                                 activated = t << 
5615                                 kick_pool(pwq << 
5616                         }                     << 
5617                         raw_spin_unlock_irqre << 
5618                 }                             << 
5619         } while (activated);                  << 
5620 }                                             << 
5621                                               << 
5622 __printf(1, 4)                                   4672 __printf(1, 4)
5623 struct workqueue_struct *alloc_workqueue(cons    4673 struct workqueue_struct *alloc_workqueue(const char *fmt,
5624                                          unsi    4674                                          unsigned int flags,
5625                                          int     4675                                          int max_active, ...)
5626 {                                                4676 {
5627         va_list args;                            4677         va_list args;
5628         struct workqueue_struct *wq;             4678         struct workqueue_struct *wq;
5629         size_t wq_size;                       !! 4679         struct pool_workqueue *pwq;
5630         int name_len;                         << 
5631                                                  4680 
5632         if (flags & WQ_BH) {                  !! 4681         /*
5633                 if (WARN_ON_ONCE(flags & ~__W !! 4682          * Unbound && max_active == 1 used to imply ordered, which is no longer
5634                         return NULL;          !! 4683          * the case on many machines due to per-pod pools. While
5635                 if (WARN_ON_ONCE(max_active)) !! 4684          * alloc_ordered_workqueue() is the right way to create an ordered
5636                         return NULL;          !! 4685          * workqueue, keep the previous behavior to avoid subtle breakages.
5637         }                                     !! 4686          */
                                                   >> 4687         if ((flags & WQ_UNBOUND) && max_active == 1)
                                                   >> 4688                 flags |= __WQ_ORDERED;
5638                                                  4689 
5639         /* see the comment above the definiti    4690         /* see the comment above the definition of WQ_POWER_EFFICIENT */
5640         if ((flags & WQ_POWER_EFFICIENT) && w    4691         if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
5641                 flags |= WQ_UNBOUND;             4692                 flags |= WQ_UNBOUND;
5642                                                  4693 
5643         /* allocate wq and format name */        4694         /* allocate wq and format name */
5644         if (flags & WQ_UNBOUND)               !! 4695         wq = kzalloc(sizeof(*wq), GFP_KERNEL);
5645                 wq_size = struct_size(wq, nod << 
5646         else                                  << 
5647                 wq_size = sizeof(*wq);        << 
5648                                               << 
5649         wq = kzalloc(wq_size, GFP_KERNEL);    << 
5650         if (!wq)                                 4696         if (!wq)
5651                 return NULL;                     4697                 return NULL;
5652                                                  4698 
5653         if (flags & WQ_UNBOUND) {                4699         if (flags & WQ_UNBOUND) {
5654                 wq->unbound_attrs = alloc_wor    4700                 wq->unbound_attrs = alloc_workqueue_attrs();
5655                 if (!wq->unbound_attrs)          4701                 if (!wq->unbound_attrs)
5656                         goto err_free_wq;        4702                         goto err_free_wq;
5657         }                                        4703         }
5658                                                  4704 
5659         va_start(args, max_active);              4705         va_start(args, max_active);
5660         name_len = vsnprintf(wq->name, sizeof !! 4706         vsnprintf(wq->name, sizeof(wq->name), fmt, args);
5661         va_end(args);                            4707         va_end(args);
5662                                                  4708 
5663         if (name_len >= WQ_NAME_LEN)          !! 4709         max_active = max_active ?: WQ_DFL_ACTIVE;
5664                 pr_warn_once("workqueue: name !! 4710         max_active = wq_clamp_max_active(max_active, flags, wq->name);
5665                              wq->name);       << 
5666                                               << 
5667         if (flags & WQ_BH) {                  << 
5668                 /*                            << 
5669                  * BH workqueues always share << 
5670                  * and don't impose any max_a << 
5671                  */                           << 
5672                 max_active = INT_MAX;         << 
5673         } else {                              << 
5674                 max_active = max_active ?: WQ << 
5675                 max_active = wq_clamp_max_act << 
5676         }                                     << 
5677                                                  4711 
5678         /* init wq */                            4712         /* init wq */
5679         wq->flags = flags;                       4713         wq->flags = flags;
5680         wq->max_active = max_active;          !! 4714         wq->saved_max_active = max_active;
5681         wq->min_active = min(max_active, WQ_D << 
5682         wq->saved_max_active = wq->max_active << 
5683         wq->saved_min_active = wq->min_active << 
5684         mutex_init(&wq->mutex);                  4715         mutex_init(&wq->mutex);
5685         atomic_set(&wq->nr_pwqs_to_flush, 0);    4716         atomic_set(&wq->nr_pwqs_to_flush, 0);
5686         INIT_LIST_HEAD(&wq->pwqs);               4717         INIT_LIST_HEAD(&wq->pwqs);
5687         INIT_LIST_HEAD(&wq->flusher_queue);      4718         INIT_LIST_HEAD(&wq->flusher_queue);
5688         INIT_LIST_HEAD(&wq->flusher_overflow)    4719         INIT_LIST_HEAD(&wq->flusher_overflow);
5689         INIT_LIST_HEAD(&wq->maydays);            4720         INIT_LIST_HEAD(&wq->maydays);
5690                                                  4721 
5691         wq_init_lockdep(wq);                     4722         wq_init_lockdep(wq);
5692         INIT_LIST_HEAD(&wq->list);               4723         INIT_LIST_HEAD(&wq->list);
5693                                                  4724 
5694         if (flags & WQ_UNBOUND) {             !! 4725         if (alloc_and_link_pwqs(wq) < 0)
5695                 if (alloc_node_nr_active(wq-> !! 4726                 goto err_unreg_lockdep;
5696                         goto err_unreg_lockde !! 4727 
5697         }                                     !! 4728         if (wq_online && init_rescuer(wq) < 0)
                                                   >> 4729                 goto err_destroy;
                                                   >> 4730 
                                                   >> 4731         if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
                                                   >> 4732                 goto err_destroy;
5698                                                  4733 
5699         /*                                       4734         /*
5700          * wq_pool_mutex protects the workque !! 4735          * wq_pool_mutex protects global freeze state and workqueues list.
5701          * and the global freeze state.       !! 4736          * Grab it, adjust max_active and add the new @wq to workqueues
                                                   >> 4737          * list.
5702          */                                      4738          */
5703         apply_wqattrs_lock();                 !! 4739         mutex_lock(&wq_pool_mutex);
5704                                               << 
5705         if (alloc_and_link_pwqs(wq) < 0)      << 
5706                 goto err_unlock_free_node_nr_ << 
5707                                                  4740 
5708         mutex_lock(&wq->mutex);                  4741         mutex_lock(&wq->mutex);
5709         wq_adjust_max_active(wq);             !! 4742         for_each_pwq(pwq, wq)
                                                   >> 4743                 pwq_adjust_max_active(pwq);
5710         mutex_unlock(&wq->mutex);                4744         mutex_unlock(&wq->mutex);
5711                                                  4745 
5712         list_add_tail_rcu(&wq->list, &workque    4746         list_add_tail_rcu(&wq->list, &workqueues);
5713                                                  4747 
5714         if (wq_online && init_rescuer(wq) < 0 !! 4748         mutex_unlock(&wq_pool_mutex);
5715                 goto err_unlock_destroy;      << 
5716                                               << 
5717         apply_wqattrs_unlock();               << 
5718                                               << 
5719         if ((wq->flags & WQ_SYSFS) && workque << 
5720                 goto err_destroy;             << 
5721                                                  4749 
5722         return wq;                               4750         return wq;
5723                                                  4751 
5724 err_unlock_free_node_nr_active:               << 
5725         apply_wqattrs_unlock();               << 
5726         /*                                    << 
5727          * Failed alloc_and_link_pwqs() may l << 
5728          * flushing the pwq_release_worker en << 
5729          * completes before calling kfree(wq) << 
5730          */                                   << 
5731         if (wq->flags & WQ_UNBOUND) {         << 
5732                 kthread_flush_worker(pwq_rele << 
5733                 free_node_nr_active(wq->node_ << 
5734         }                                     << 
5735 err_unreg_lockdep:                               4752 err_unreg_lockdep:
5736         wq_unregister_lockdep(wq);               4753         wq_unregister_lockdep(wq);
5737         wq_free_lockdep(wq);                     4754         wq_free_lockdep(wq);
5738 err_free_wq:                                     4755 err_free_wq:
5739         free_workqueue_attrs(wq->unbound_attr    4756         free_workqueue_attrs(wq->unbound_attrs);
5740         kfree(wq);                               4757         kfree(wq);
5741         return NULL;                             4758         return NULL;
5742 err_unlock_destroy:                           << 
5743         apply_wqattrs_unlock();               << 
5744 err_destroy:                                     4759 err_destroy:
5745         destroy_workqueue(wq);                   4760         destroy_workqueue(wq);
5746         return NULL;                             4761         return NULL;
5747 }                                                4762 }
5748 EXPORT_SYMBOL_GPL(alloc_workqueue);              4763 EXPORT_SYMBOL_GPL(alloc_workqueue);
5749                                                  4764 
5750 static bool pwq_busy(struct pool_workqueue *p    4765 static bool pwq_busy(struct pool_workqueue *pwq)
5751 {                                                4766 {
5752         int i;                                   4767         int i;
5753                                                  4768 
5754         for (i = 0; i < WORK_NR_COLORS; i++)     4769         for (i = 0; i < WORK_NR_COLORS; i++)
5755                 if (pwq->nr_in_flight[i])        4770                 if (pwq->nr_in_flight[i])
5756                         return true;             4771                         return true;
5757                                                  4772 
5758         if ((pwq != rcu_access_pointer(pwq->w !! 4773         if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
5759                 return true;                     4774                 return true;
5760         if (!pwq_is_empty(pwq))               !! 4775         if (pwq->nr_active || !list_empty(&pwq->inactive_works))
5761                 return true;                     4776                 return true;
5762                                                  4777 
5763         return false;                            4778         return false;
5764 }                                                4779 }
5765                                                  4780 
5766 /**                                              4781 /**
5767  * destroy_workqueue - safely terminate a wor    4782  * destroy_workqueue - safely terminate a workqueue
5768  * @wq: target workqueue                         4783  * @wq: target workqueue
5769  *                                               4784  *
5770  * Safely destroy a workqueue. All work curre    4785  * Safely destroy a workqueue. All work currently pending will be done first.
5771  */                                              4786  */
5772 void destroy_workqueue(struct workqueue_struc    4787 void destroy_workqueue(struct workqueue_struct *wq)
5773 {                                                4788 {
5774         struct pool_workqueue *pwq;              4789         struct pool_workqueue *pwq;
5775         int cpu;                                 4790         int cpu;
5776                                                  4791 
5777         /*                                       4792         /*
5778          * Remove it from sysfs first so that    4793          * Remove it from sysfs first so that sanity check failure doesn't
5779          * lead to sysfs name conflicts.         4794          * lead to sysfs name conflicts.
5780          */                                      4795          */
5781         workqueue_sysfs_unregister(wq);          4796         workqueue_sysfs_unregister(wq);
5782                                                  4797 
5783         /* mark the workqueue destruction is     4798         /* mark the workqueue destruction is in progress */
5784         mutex_lock(&wq->mutex);                  4799         mutex_lock(&wq->mutex);
5785         wq->flags |= __WQ_DESTROYING;            4800         wq->flags |= __WQ_DESTROYING;
5786         mutex_unlock(&wq->mutex);                4801         mutex_unlock(&wq->mutex);
5787                                                  4802 
5788         /* drain it before proceeding with de    4803         /* drain it before proceeding with destruction */
5789         drain_workqueue(wq);                     4804         drain_workqueue(wq);
5790                                                  4805 
5791         /* kill rescuer, if sanity checks fai    4806         /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
5792         if (wq->rescuer) {                       4807         if (wq->rescuer) {
5793                 struct worker *rescuer = wq->    4808                 struct worker *rescuer = wq->rescuer;
5794                                                  4809 
5795                 /* this prevents new queueing    4810                 /* this prevents new queueing */
5796                 raw_spin_lock_irq(&wq_mayday_    4811                 raw_spin_lock_irq(&wq_mayday_lock);
5797                 wq->rescuer = NULL;              4812                 wq->rescuer = NULL;
5798                 raw_spin_unlock_irq(&wq_mayda    4813                 raw_spin_unlock_irq(&wq_mayday_lock);
5799                                                  4814 
5800                 /* rescuer will empty maydays    4815                 /* rescuer will empty maydays list before exiting */
5801                 kthread_stop(rescuer->task);     4816                 kthread_stop(rescuer->task);
5802                 kfree(rescuer);                  4817                 kfree(rescuer);
5803         }                                        4818         }
5804                                                  4819 
5805         /*                                       4820         /*
5806          * Sanity checks - grab all the locks    4821          * Sanity checks - grab all the locks so that we wait for all
5807          * in-flight operations which may do     4822          * in-flight operations which may do put_pwq().
5808          */                                      4823          */
5809         mutex_lock(&wq_pool_mutex);              4824         mutex_lock(&wq_pool_mutex);
5810         mutex_lock(&wq->mutex);                  4825         mutex_lock(&wq->mutex);
5811         for_each_pwq(pwq, wq) {                  4826         for_each_pwq(pwq, wq) {
5812                 raw_spin_lock_irq(&pwq->pool-    4827                 raw_spin_lock_irq(&pwq->pool->lock);
5813                 if (WARN_ON(pwq_busy(pwq))) {    4828                 if (WARN_ON(pwq_busy(pwq))) {
5814                         pr_warn("%s: %s has t    4829                         pr_warn("%s: %s has the following busy pwq\n",
5815                                 __func__, wq-    4830                                 __func__, wq->name);
5816                         show_pwq(pwq);           4831                         show_pwq(pwq);
5817                         raw_spin_unlock_irq(&    4832                         raw_spin_unlock_irq(&pwq->pool->lock);
5818                         mutex_unlock(&wq->mut    4833                         mutex_unlock(&wq->mutex);
5819                         mutex_unlock(&wq_pool    4834                         mutex_unlock(&wq_pool_mutex);
5820                         show_one_workqueue(wq    4835                         show_one_workqueue(wq);
5821                         return;                  4836                         return;
5822                 }                                4837                 }
5823                 raw_spin_unlock_irq(&pwq->poo    4838                 raw_spin_unlock_irq(&pwq->pool->lock);
5824         }                                        4839         }
5825         mutex_unlock(&wq->mutex);                4840         mutex_unlock(&wq->mutex);
5826                                                  4841 
5827         /*                                       4842         /*
5828          * wq list is used to freeze wq, remo    4843          * wq list is used to freeze wq, remove from list after
5829          * flushing is complete in case freez    4844          * flushing is complete in case freeze races us.
5830          */                                      4845          */
5831         list_del_rcu(&wq->list);                 4846         list_del_rcu(&wq->list);
5832         mutex_unlock(&wq_pool_mutex);            4847         mutex_unlock(&wq_pool_mutex);
5833                                                  4848 
5834         /*                                       4849         /*
5835          * We're the sole accessor of @wq. Di    4850          * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq
5836          * to put the base refs. @wq will be     4851          * to put the base refs. @wq will be auto-destroyed from the last
5837          * pwq_put. RCU read lock prevents @w    4852          * pwq_put. RCU read lock prevents @wq from going away from under us.
5838          */                                      4853          */
5839         rcu_read_lock();                         4854         rcu_read_lock();
5840                                                  4855 
5841         for_each_possible_cpu(cpu) {             4856         for_each_possible_cpu(cpu) {
5842                 put_pwq_unlocked(unbound_pwq( !! 4857                 pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
5843                 RCU_INIT_POINTER(*unbound_pwq !! 4858                 RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL);
                                                   >> 4859                 put_pwq_unlocked(pwq);
5844         }                                        4860         }
5845                                                  4861 
5846         put_pwq_unlocked(unbound_pwq(wq, -1)) !! 4862         put_pwq_unlocked(wq->dfl_pwq);
5847         RCU_INIT_POINTER(*unbound_pwq_slot(wq !! 4863         wq->dfl_pwq = NULL;
5848                                                  4864 
5849         rcu_read_unlock();                       4865         rcu_read_unlock();
5850 }                                                4866 }
5851 EXPORT_SYMBOL_GPL(destroy_workqueue);            4867 EXPORT_SYMBOL_GPL(destroy_workqueue);
5852                                                  4868 
5853 /**                                              4869 /**
5854  * workqueue_set_max_active - adjust max_acti    4870  * workqueue_set_max_active - adjust max_active of a workqueue
5855  * @wq: target workqueue                         4871  * @wq: target workqueue
5856  * @max_active: new max_active value.            4872  * @max_active: new max_active value.
5857  *                                               4873  *
5858  * Set max_active of @wq to @max_active. See  !! 4874  * Set max_active of @wq to @max_active.
5859  * comment.                                   << 
5860  *                                               4875  *
5861  * CONTEXT:                                      4876  * CONTEXT:
5862  * Don't call from IRQ context.                  4877  * Don't call from IRQ context.
5863  */                                              4878  */
5864 void workqueue_set_max_active(struct workqueu    4879 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
5865 {                                                4880 {
5866         /* max_active doesn't mean anything f !! 4881         struct pool_workqueue *pwq;
5867         if (WARN_ON(wq->flags & WQ_BH))       !! 4882 
5868                 return;                       << 
5869         /* disallow meddling with max_active     4883         /* disallow meddling with max_active for ordered workqueues */
5870         if (WARN_ON(wq->flags & __WQ_ORDERED) !! 4884         if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5871                 return;                          4885                 return;
5872                                                  4886 
5873         max_active = wq_clamp_max_active(max_    4887         max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
5874                                                  4888 
5875         mutex_lock(&wq->mutex);                  4889         mutex_lock(&wq->mutex);
5876                                                  4890 
                                                   >> 4891         wq->flags &= ~__WQ_ORDERED;
5877         wq->saved_max_active = max_active;       4892         wq->saved_max_active = max_active;
5878         if (wq->flags & WQ_UNBOUND)           << 
5879                 wq->saved_min_active = min(wq << 
5880                                                  4893 
5881         wq_adjust_max_active(wq);             !! 4894         for_each_pwq(pwq, wq)
                                                   >> 4895                 pwq_adjust_max_active(pwq);
5882                                                  4896 
5883         mutex_unlock(&wq->mutex);                4897         mutex_unlock(&wq->mutex);
5884 }                                                4898 }
5885 EXPORT_SYMBOL_GPL(workqueue_set_max_active);     4899 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
5886                                                  4900 
5887 /**                                              4901 /**
5888  * workqueue_set_min_active - adjust min_acti << 
5889  * @wq: target unbound workqueue              << 
5890  * @min_active: new min_active value          << 
5891  *                                            << 
5892  * Set min_active of an unbound workqueue. Un << 
5893  * unbound workqueue is not guaranteed to be  << 
5894  * interdependent work items. Instead, an unb << 
5895  * able to process min_active number of inter << 
5896  * %WQ_DFL_MIN_ACTIVE by default.             << 
5897  *                                            << 
5898  * Use this function to adjust the min_active << 
5899  * max_active.                                << 
5900  */                                           << 
5901 void workqueue_set_min_active(struct workqueu << 
5902 {                                             << 
5903         /* min_active is only meaningful for  << 
5904         if (WARN_ON((wq->flags & (WQ_BH | WQ_ << 
5905                     WQ_UNBOUND))              << 
5906                 return;                       << 
5907                                               << 
5908         mutex_lock(&wq->mutex);               << 
5909         wq->saved_min_active = clamp(min_acti << 
5910         wq_adjust_max_active(wq);             << 
5911         mutex_unlock(&wq->mutex);             << 
5912 }                                             << 
5913                                               << 
5914 /**                                           << 
5915  * current_work - retrieve %current task's wo    4902  * current_work - retrieve %current task's work struct
5916  *                                               4903  *
5917  * Determine if %current task is a workqueue     4904  * Determine if %current task is a workqueue worker and what it's working on.
5918  * Useful to find out the context that the %c    4905  * Useful to find out the context that the %current task is running in.
5919  *                                               4906  *
5920  * Return: work struct if %current task is a     4907  * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
5921  */                                              4908  */
5922 struct work_struct *current_work(void)           4909 struct work_struct *current_work(void)
5923 {                                                4910 {
5924         struct worker *worker = current_wq_wo    4911         struct worker *worker = current_wq_worker();
5925                                                  4912 
5926         return worker ? worker->current_work     4913         return worker ? worker->current_work : NULL;
5927 }                                                4914 }
5928 EXPORT_SYMBOL(current_work);                     4915 EXPORT_SYMBOL(current_work);
5929                                                  4916 
5930 /**                                              4917 /**
5931  * current_is_workqueue_rescuer - is %current    4918  * current_is_workqueue_rescuer - is %current workqueue rescuer?
5932  *                                               4919  *
5933  * Determine whether %current is a workqueue     4920  * Determine whether %current is a workqueue rescuer.  Can be used from
5934  * work functions to determine whether it's b    4921  * work functions to determine whether it's being run off the rescuer task.
5935  *                                               4922  *
5936  * Return: %true if %current is a workqueue r    4923  * Return: %true if %current is a workqueue rescuer. %false otherwise.
5937  */                                              4924  */
5938 bool current_is_workqueue_rescuer(void)          4925 bool current_is_workqueue_rescuer(void)
5939 {                                                4926 {
5940         struct worker *worker = current_wq_wo    4927         struct worker *worker = current_wq_worker();
5941                                                  4928 
5942         return worker && worker->rescue_wq;      4929         return worker && worker->rescue_wq;
5943 }                                                4930 }
5944                                                  4931 
5945 /**                                              4932 /**
5946  * workqueue_congested - test whether a workq    4933  * workqueue_congested - test whether a workqueue is congested
5947  * @cpu: CPU in question                         4934  * @cpu: CPU in question
5948  * @wq: target workqueue                         4935  * @wq: target workqueue
5949  *                                               4936  *
5950  * Test whether @wq's cpu workqueue for @cpu     4937  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
5951  * no synchronization around this function an    4938  * no synchronization around this function and the test result is
5952  * unreliable and only useful as advisory hin    4939  * unreliable and only useful as advisory hints or for debugging.
5953  *                                               4940  *
5954  * If @cpu is WORK_CPU_UNBOUND, the test is p    4941  * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
5955  *                                               4942  *
5956  * With the exception of ordered workqueues,     4943  * With the exception of ordered workqueues, all workqueues have per-cpu
5957  * pool_workqueues, each with its own congest    4944  * pool_workqueues, each with its own congested state. A workqueue being
5958  * congested on one CPU doesn't mean that the    4945  * congested on one CPU doesn't mean that the workqueue is contested on any
5959  * other CPUs.                                   4946  * other CPUs.
5960  *                                               4947  *
5961  * Return:                                       4948  * Return:
5962  * %true if congested, %false otherwise.         4949  * %true if congested, %false otherwise.
5963  */                                              4950  */
5964 bool workqueue_congested(int cpu, struct work    4951 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
5965 {                                                4952 {
5966         struct pool_workqueue *pwq;              4953         struct pool_workqueue *pwq;
5967         bool ret;                                4954         bool ret;
5968                                                  4955 
5969         rcu_read_lock();                         4956         rcu_read_lock();
5970         preempt_disable();                       4957         preempt_disable();
5971                                                  4958 
5972         if (cpu == WORK_CPU_UNBOUND)             4959         if (cpu == WORK_CPU_UNBOUND)
5973                 cpu = smp_processor_id();        4960                 cpu = smp_processor_id();
5974                                                  4961 
5975         pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);    4962         pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
5976         ret = !list_empty(&pwq->inactive_work    4963         ret = !list_empty(&pwq->inactive_works);
5977                                                  4964 
5978         preempt_enable();                        4965         preempt_enable();
5979         rcu_read_unlock();                       4966         rcu_read_unlock();
5980                                                  4967 
5981         return ret;                              4968         return ret;
5982 }                                                4969 }
5983 EXPORT_SYMBOL_GPL(workqueue_congested);          4970 EXPORT_SYMBOL_GPL(workqueue_congested);
5984                                                  4971 
5985 /**                                              4972 /**
5986  * work_busy - test whether a work is current    4973  * work_busy - test whether a work is currently pending or running
5987  * @work: the work to be tested                  4974  * @work: the work to be tested
5988  *                                               4975  *
5989  * Test whether @work is currently pending or    4976  * Test whether @work is currently pending or running.  There is no
5990  * synchronization around this function and t    4977  * synchronization around this function and the test result is
5991  * unreliable and only useful as advisory hin    4978  * unreliable and only useful as advisory hints or for debugging.
5992  *                                               4979  *
5993  * Return:                                       4980  * Return:
5994  * OR'd bitmask of WORK_BUSY_* bits.             4981  * OR'd bitmask of WORK_BUSY_* bits.
5995  */                                              4982  */
5996 unsigned int work_busy(struct work_struct *wo    4983 unsigned int work_busy(struct work_struct *work)
5997 {                                                4984 {
5998         struct worker_pool *pool;                4985         struct worker_pool *pool;
5999         unsigned long irq_flags;              !! 4986         unsigned long flags;
6000         unsigned int ret = 0;                    4987         unsigned int ret = 0;
6001                                                  4988 
6002         if (work_pending(work))                  4989         if (work_pending(work))
6003                 ret |= WORK_BUSY_PENDING;        4990                 ret |= WORK_BUSY_PENDING;
6004                                                  4991 
6005         rcu_read_lock();                         4992         rcu_read_lock();
6006         pool = get_work_pool(work);              4993         pool = get_work_pool(work);
6007         if (pool) {                              4994         if (pool) {
6008                 raw_spin_lock_irqsave(&pool-> !! 4995                 raw_spin_lock_irqsave(&pool->lock, flags);
6009                 if (find_worker_executing_wor    4996                 if (find_worker_executing_work(pool, work))
6010                         ret |= WORK_BUSY_RUNN    4997                         ret |= WORK_BUSY_RUNNING;
6011                 raw_spin_unlock_irqrestore(&p !! 4998                 raw_spin_unlock_irqrestore(&pool->lock, flags);
6012         }                                        4999         }
6013         rcu_read_unlock();                       5000         rcu_read_unlock();
6014                                                  5001 
6015         return ret;                              5002         return ret;
6016 }                                                5003 }
6017 EXPORT_SYMBOL_GPL(work_busy);                    5004 EXPORT_SYMBOL_GPL(work_busy);
6018                                                  5005 
6019 /**                                              5006 /**
6020  * set_worker_desc - set description for the     5007  * set_worker_desc - set description for the current work item
6021  * @fmt: printf-style format string              5008  * @fmt: printf-style format string
6022  * @...: arguments for the format string         5009  * @...: arguments for the format string
6023  *                                               5010  *
6024  * This function can be called by a running w    5011  * This function can be called by a running work function to describe what
6025  * the work item is about.  If the worker tas    5012  * the work item is about.  If the worker task gets dumped, this
6026  * information will be printed out together t    5013  * information will be printed out together to help debugging.  The
6027  * description can be at most WORKER_DESC_LEN    5014  * description can be at most WORKER_DESC_LEN including the trailing '\0'.
6028  */                                              5015  */
6029 void set_worker_desc(const char *fmt, ...)       5016 void set_worker_desc(const char *fmt, ...)
6030 {                                                5017 {
6031         struct worker *worker = current_wq_wo    5018         struct worker *worker = current_wq_worker();
6032         va_list args;                            5019         va_list args;
6033                                                  5020 
6034         if (worker) {                            5021         if (worker) {
6035                 va_start(args, fmt);             5022                 va_start(args, fmt);
6036                 vsnprintf(worker->desc, sizeo    5023                 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
6037                 va_end(args);                    5024                 va_end(args);
6038         }                                        5025         }
6039 }                                                5026 }
6040 EXPORT_SYMBOL_GPL(set_worker_desc);              5027 EXPORT_SYMBOL_GPL(set_worker_desc);
6041                                                  5028 
6042 /**                                              5029 /**
6043  * print_worker_info - print out worker infor    5030  * print_worker_info - print out worker information and description
6044  * @log_lvl: the log level to use when printi    5031  * @log_lvl: the log level to use when printing
6045  * @task: target task                            5032  * @task: target task
6046  *                                               5033  *
6047  * If @task is a worker and currently executi    5034  * If @task is a worker and currently executing a work item, print out the
6048  * name of the workqueue being serviced and w    5035  * name of the workqueue being serviced and worker description set with
6049  * set_worker_desc() by the currently executi    5036  * set_worker_desc() by the currently executing work item.
6050  *                                               5037  *
6051  * This function can be safely called on any     5038  * This function can be safely called on any task as long as the
6052  * task_struct itself is accessible.  While s    5039  * task_struct itself is accessible.  While safe, this function isn't
6053  * synchronized and may print out mixups or g    5040  * synchronized and may print out mixups or garbages of limited length.
6054  */                                              5041  */
6055 void print_worker_info(const char *log_lvl, s    5042 void print_worker_info(const char *log_lvl, struct task_struct *task)
6056 {                                                5043 {
6057         work_func_t *fn = NULL;                  5044         work_func_t *fn = NULL;
6058         char name[WQ_NAME_LEN] = { };            5045         char name[WQ_NAME_LEN] = { };
6059         char desc[WORKER_DESC_LEN] = { };        5046         char desc[WORKER_DESC_LEN] = { };
6060         struct pool_workqueue *pwq = NULL;       5047         struct pool_workqueue *pwq = NULL;
6061         struct workqueue_struct *wq = NULL;      5048         struct workqueue_struct *wq = NULL;
6062         struct worker *worker;                   5049         struct worker *worker;
6063                                                  5050 
6064         if (!(task->flags & PF_WQ_WORKER))       5051         if (!(task->flags & PF_WQ_WORKER))
6065                 return;                          5052                 return;
6066                                                  5053 
6067         /*                                       5054         /*
6068          * This function is called without an    5055          * This function is called without any synchronization and @task
6069          * could be in any state.  Be careful    5056          * could be in any state.  Be careful with dereferences.
6070          */                                      5057          */
6071         worker = kthread_probe_data(task);       5058         worker = kthread_probe_data(task);
6072                                                  5059 
6073         /*                                       5060         /*
6074          * Carefully copy the associated work    5061          * Carefully copy the associated workqueue's workfn, name and desc.
6075          * Keep the original last '\0' in cas    5062          * Keep the original last '\0' in case the original is garbage.
6076          */                                      5063          */
6077         copy_from_kernel_nofault(&fn, &worker    5064         copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
6078         copy_from_kernel_nofault(&pwq, &worke    5065         copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
6079         copy_from_kernel_nofault(&wq, &pwq->w    5066         copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
6080         copy_from_kernel_nofault(name, wq->na    5067         copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
6081         copy_from_kernel_nofault(desc, worker    5068         copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
6082                                                  5069 
6083         if (fn || name[0] || desc[0]) {          5070         if (fn || name[0] || desc[0]) {
6084                 printk("%sWorkqueue: %s %ps",    5071                 printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
6085                 if (strcmp(name, desc))          5072                 if (strcmp(name, desc))
6086                         pr_cont(" (%s)", desc    5073                         pr_cont(" (%s)", desc);
6087                 pr_cont("\n");                   5074                 pr_cont("\n");
6088         }                                        5075         }
6089 }                                                5076 }
6090                                                  5077 
6091 static void pr_cont_pool_info(struct worker_p    5078 static void pr_cont_pool_info(struct worker_pool *pool)
6092 {                                                5079 {
6093         pr_cont(" cpus=%*pbl", nr_cpumask_bit    5080         pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
6094         if (pool->node != NUMA_NO_NODE)          5081         if (pool->node != NUMA_NO_NODE)
6095                 pr_cont(" node=%d", pool->nod    5082                 pr_cont(" node=%d", pool->node);
6096         pr_cont(" flags=0x%x", pool->flags);  !! 5083         pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
6097         if (pool->flags & POOL_BH)            << 
6098                 pr_cont(" bh%s",              << 
6099                         pool->attrs->nice ==  << 
6100         else                                  << 
6101                 pr_cont(" nice=%d", pool->att << 
6102 }                                             << 
6103                                               << 
6104 static void pr_cont_worker_id(struct worker * << 
6105 {                                             << 
6106         struct worker_pool *pool = worker->po << 
6107                                               << 
6108         if (pool->flags & WQ_BH)              << 
6109                 pr_cont("bh%s",               << 
6110                         pool->attrs->nice ==  << 
6111         else                                  << 
6112                 pr_cont("%d%s", task_pid_nr(w << 
6113                         worker->rescue_wq ? " << 
6114 }                                                5084 }
6115                                                  5085 
6116 struct pr_cont_work_struct {                     5086 struct pr_cont_work_struct {
6117         bool comma;                              5087         bool comma;
6118         work_func_t func;                        5088         work_func_t func;
6119         long ctr;                                5089         long ctr;
6120 };                                               5090 };
6121                                                  5091 
6122 static void pr_cont_work_flush(bool comma, wo    5092 static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp)
6123 {                                                5093 {
6124         if (!pcwsp->ctr)                         5094         if (!pcwsp->ctr)
6125                 goto out_record;                 5095                 goto out_record;
6126         if (func == pcwsp->func) {               5096         if (func == pcwsp->func) {
6127                 pcwsp->ctr++;                    5097                 pcwsp->ctr++;
6128                 return;                          5098                 return;
6129         }                                        5099         }
6130         if (pcwsp->ctr == 1)                     5100         if (pcwsp->ctr == 1)
6131                 pr_cont("%s %ps", pcwsp->comm    5101                 pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func);
6132         else                                     5102         else
6133                 pr_cont("%s %ld*%ps", pcwsp->    5103                 pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func);
6134         pcwsp->ctr = 0;                          5104         pcwsp->ctr = 0;
6135 out_record:                                      5105 out_record:
6136         if ((long)func == -1L)                   5106         if ((long)func == -1L)
6137                 return;                          5107                 return;
6138         pcwsp->comma = comma;                    5108         pcwsp->comma = comma;
6139         pcwsp->func = func;                      5109         pcwsp->func = func;
6140         pcwsp->ctr = 1;                          5110         pcwsp->ctr = 1;
6141 }                                                5111 }
6142                                                  5112 
6143 static void pr_cont_work(bool comma, struct w    5113 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp)
6144 {                                                5114 {
6145         if (work->func == wq_barrier_func) {     5115         if (work->func == wq_barrier_func) {
6146                 struct wq_barrier *barr;         5116                 struct wq_barrier *barr;
6147                                                  5117 
6148                 barr = container_of(work, str    5118                 barr = container_of(work, struct wq_barrier, work);
6149                                                  5119 
6150                 pr_cont_work_flush(comma, (wo    5120                 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
6151                 pr_cont("%s BAR(%d)", comma ?    5121                 pr_cont("%s BAR(%d)", comma ? "," : "",
6152                         task_pid_nr(barr->tas    5122                         task_pid_nr(barr->task));
6153         } else {                                 5123         } else {
6154                 if (!comma)                      5124                 if (!comma)
6155                         pr_cont_work_flush(co    5125                         pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
6156                 pr_cont_work_flush(comma, wor    5126                 pr_cont_work_flush(comma, work->func, pcwsp);
6157         }                                        5127         }
6158 }                                                5128 }
6159                                                  5129 
6160 static void show_pwq(struct pool_workqueue *p    5130 static void show_pwq(struct pool_workqueue *pwq)
6161 {                                                5131 {
6162         struct pr_cont_work_struct pcws = { .    5132         struct pr_cont_work_struct pcws = { .ctr = 0, };
6163         struct worker_pool *pool = pwq->pool;    5133         struct worker_pool *pool = pwq->pool;
6164         struct work_struct *work;                5134         struct work_struct *work;
6165         struct worker *worker;                   5135         struct worker *worker;
6166         bool has_in_flight = false, has_pendi    5136         bool has_in_flight = false, has_pending = false;
6167         int bkt;                                 5137         int bkt;
6168                                                  5138 
6169         pr_info("  pwq %d:", pool->id);          5139         pr_info("  pwq %d:", pool->id);
6170         pr_cont_pool_info(pool);                 5140         pr_cont_pool_info(pool);
6171                                                  5141 
6172         pr_cont(" active=%d refcnt=%d%s\n",   !! 5142         pr_cont(" active=%d/%d refcnt=%d%s\n",
6173                 pwq->nr_active, pwq->refcnt,  !! 5143                 pwq->nr_active, pwq->max_active, pwq->refcnt,
6174                 !list_empty(&pwq->mayday_node    5144                 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
6175                                                  5145 
6176         hash_for_each(pool->busy_hash, bkt, w    5146         hash_for_each(pool->busy_hash, bkt, worker, hentry) {
6177                 if (worker->current_pwq == pw    5147                 if (worker->current_pwq == pwq) {
6178                         has_in_flight = true;    5148                         has_in_flight = true;
6179                         break;                   5149                         break;
6180                 }                                5150                 }
6181         }                                        5151         }
6182         if (has_in_flight) {                     5152         if (has_in_flight) {
6183                 bool comma = false;              5153                 bool comma = false;
6184                                                  5154 
6185                 pr_info("    in-flight:");       5155                 pr_info("    in-flight:");
6186                 hash_for_each(pool->busy_hash    5156                 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
6187                         if (worker->current_p    5157                         if (worker->current_pwq != pwq)
6188                                 continue;        5158                                 continue;
6189                                                  5159 
6190                         pr_cont(" %s", comma  !! 5160                         pr_cont("%s %d%s:%ps", comma ? "," : "",
6191                         pr_cont_worker_id(wor !! 5161                                 task_pid_nr(worker->task),
6192                         pr_cont(":%ps", worke !! 5162                                 worker->rescue_wq ? "(RESCUER)" : "",
                                                   >> 5163                                 worker->current_func);
6193                         list_for_each_entry(w    5164                         list_for_each_entry(work, &worker->scheduled, entry)
6194                                 pr_cont_work(    5165                                 pr_cont_work(false, work, &pcws);
6195                         pr_cont_work_flush(co    5166                         pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
6196                         comma = true;            5167                         comma = true;
6197                 }                                5168                 }
6198                 pr_cont("\n");                   5169                 pr_cont("\n");
6199         }                                        5170         }
6200                                                  5171 
6201         list_for_each_entry(work, &pool->work    5172         list_for_each_entry(work, &pool->worklist, entry) {
6202                 if (get_work_pwq(work) == pwq    5173                 if (get_work_pwq(work) == pwq) {
6203                         has_pending = true;      5174                         has_pending = true;
6204                         break;                   5175                         break;
6205                 }                                5176                 }
6206         }                                        5177         }
6207         if (has_pending) {                       5178         if (has_pending) {
6208                 bool comma = false;              5179                 bool comma = false;
6209                                                  5180 
6210                 pr_info("    pending:");         5181                 pr_info("    pending:");
6211                 list_for_each_entry(work, &po    5182                 list_for_each_entry(work, &pool->worklist, entry) {
6212                         if (get_work_pwq(work    5183                         if (get_work_pwq(work) != pwq)
6213                                 continue;        5184                                 continue;
6214                                                  5185 
6215                         pr_cont_work(comma, w    5186                         pr_cont_work(comma, work, &pcws);
6216                         comma = !(*work_data_    5187                         comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
6217                 }                                5188                 }
6218                 pr_cont_work_flush(comma, (wo    5189                 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
6219                 pr_cont("\n");                   5190                 pr_cont("\n");
6220         }                                        5191         }
6221                                                  5192 
6222         if (!list_empty(&pwq->inactive_works)    5193         if (!list_empty(&pwq->inactive_works)) {
6223                 bool comma = false;              5194                 bool comma = false;
6224                                                  5195 
6225                 pr_info("    inactive:");        5196                 pr_info("    inactive:");
6226                 list_for_each_entry(work, &pw    5197                 list_for_each_entry(work, &pwq->inactive_works, entry) {
6227                         pr_cont_work(comma, w    5198                         pr_cont_work(comma, work, &pcws);
6228                         comma = !(*work_data_    5199                         comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
6229                 }                                5200                 }
6230                 pr_cont_work_flush(comma, (wo    5201                 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
6231                 pr_cont("\n");                   5202                 pr_cont("\n");
6232         }                                        5203         }
6233 }                                                5204 }
6234                                                  5205 
6235 /**                                              5206 /**
6236  * show_one_workqueue - dump state of specifi    5207  * show_one_workqueue - dump state of specified workqueue
6237  * @wq: workqueue whose state will be printed    5208  * @wq: workqueue whose state will be printed
6238  */                                              5209  */
6239 void show_one_workqueue(struct workqueue_stru    5210 void show_one_workqueue(struct workqueue_struct *wq)
6240 {                                                5211 {
6241         struct pool_workqueue *pwq;              5212         struct pool_workqueue *pwq;
6242         bool idle = true;                        5213         bool idle = true;
6243         unsigned long irq_flags;              !! 5214         unsigned long flags;
6244                                                  5215 
6245         for_each_pwq(pwq, wq) {                  5216         for_each_pwq(pwq, wq) {
6246                 if (!pwq_is_empty(pwq)) {     !! 5217                 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
6247                         idle = false;            5218                         idle = false;
6248                         break;                   5219                         break;
6249                 }                                5220                 }
6250         }                                        5221         }
6251         if (idle) /* Nothing to print for idl    5222         if (idle) /* Nothing to print for idle workqueue */
6252                 return;                          5223                 return;
6253                                                  5224 
6254         pr_info("workqueue %s: flags=0x%x\n",    5225         pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
6255                                                  5226 
6256         for_each_pwq(pwq, wq) {                  5227         for_each_pwq(pwq, wq) {
6257                 raw_spin_lock_irqsave(&pwq->p !! 5228                 raw_spin_lock_irqsave(&pwq->pool->lock, flags);
6258                 if (!pwq_is_empty(pwq)) {     !! 5229                 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
6259                         /*                       5230                         /*
6260                          * Defer printing to     5231                          * Defer printing to avoid deadlocks in console
6261                          * drivers that queue    5232                          * drivers that queue work while holding locks
6262                          * also taken in thei    5233                          * also taken in their write paths.
6263                          */                      5234                          */
6264                         printk_deferred_enter    5235                         printk_deferred_enter();
6265                         show_pwq(pwq);           5236                         show_pwq(pwq);
6266                         printk_deferred_exit(    5237                         printk_deferred_exit();
6267                 }                                5238                 }
6268                 raw_spin_unlock_irqrestore(&p !! 5239                 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
6269                 /*                               5240                 /*
6270                  * We could be printing a lot    5241                  * We could be printing a lot from atomic context, e.g.
6271                  * sysrq-t -> show_all_workqu    5242                  * sysrq-t -> show_all_workqueues(). Avoid triggering
6272                  * hard lockup.                  5243                  * hard lockup.
6273                  */                              5244                  */
6274                 touch_nmi_watchdog();            5245                 touch_nmi_watchdog();
6275         }                                        5246         }
6276                                                  5247 
6277 }                                                5248 }
6278                                                  5249 
6279 /**                                              5250 /**
6280  * show_one_worker_pool - dump state of speci    5251  * show_one_worker_pool - dump state of specified worker pool
6281  * @pool: worker pool whose state will be pri    5252  * @pool: worker pool whose state will be printed
6282  */                                              5253  */
6283 static void show_one_worker_pool(struct worke    5254 static void show_one_worker_pool(struct worker_pool *pool)
6284 {                                                5255 {
6285         struct worker *worker;                   5256         struct worker *worker;
6286         bool first = true;                       5257         bool first = true;
6287         unsigned long irq_flags;              !! 5258         unsigned long flags;
6288         unsigned long hung = 0;                  5259         unsigned long hung = 0;
6289                                                  5260 
6290         raw_spin_lock_irqsave(&pool->lock, ir !! 5261         raw_spin_lock_irqsave(&pool->lock, flags);
6291         if (pool->nr_workers == pool->nr_idle    5262         if (pool->nr_workers == pool->nr_idle)
6292                 goto next_pool;                  5263                 goto next_pool;
6293                                                  5264 
6294         /* How long the first pending work is    5265         /* How long the first pending work is waiting for a worker. */
6295         if (!list_empty(&pool->worklist))        5266         if (!list_empty(&pool->worklist))
6296                 hung = jiffies_to_msecs(jiffi    5267                 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
6297                                                  5268 
6298         /*                                       5269         /*
6299          * Defer printing to avoid deadlocks     5270          * Defer printing to avoid deadlocks in console drivers that
6300          * queue work while holding locks als    5271          * queue work while holding locks also taken in their write
6301          * paths.                                5272          * paths.
6302          */                                      5273          */
6303         printk_deferred_enter();                 5274         printk_deferred_enter();
6304         pr_info("pool %d:", pool->id);           5275         pr_info("pool %d:", pool->id);
6305         pr_cont_pool_info(pool);                 5276         pr_cont_pool_info(pool);
6306         pr_cont(" hung=%lus workers=%d", hung    5277         pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
6307         if (pool->manager)                       5278         if (pool->manager)
6308                 pr_cont(" manager: %d",          5279                 pr_cont(" manager: %d",
6309                         task_pid_nr(pool->man    5280                         task_pid_nr(pool->manager->task));
6310         list_for_each_entry(worker, &pool->id    5281         list_for_each_entry(worker, &pool->idle_list, entry) {
6311                 pr_cont(" %s", first ? "idle: !! 5282                 pr_cont(" %s%d", first ? "idle: " : "",
6312                 pr_cont_worker_id(worker);    !! 5283                         task_pid_nr(worker->task));
6313                 first = false;                   5284                 first = false;
6314         }                                        5285         }
6315         pr_cont("\n");                           5286         pr_cont("\n");
6316         printk_deferred_exit();                  5287         printk_deferred_exit();
6317 next_pool:                                       5288 next_pool:
6318         raw_spin_unlock_irqrestore(&pool->loc !! 5289         raw_spin_unlock_irqrestore(&pool->lock, flags);
6319         /*                                       5290         /*
6320          * We could be printing a lot from at    5291          * We could be printing a lot from atomic context, e.g.
6321          * sysrq-t -> show_all_workqueues().     5292          * sysrq-t -> show_all_workqueues(). Avoid triggering
6322          * hard lockup.                          5293          * hard lockup.
6323          */                                      5294          */
6324         touch_nmi_watchdog();                    5295         touch_nmi_watchdog();
6325                                                  5296 
6326 }                                                5297 }
6327                                                  5298 
6328 /**                                              5299 /**
6329  * show_all_workqueues - dump workqueue state    5300  * show_all_workqueues - dump workqueue state
6330  *                                               5301  *
6331  * Called from a sysrq handler and prints out    5302  * Called from a sysrq handler and prints out all busy workqueues and pools.
6332  */                                              5303  */
6333 void show_all_workqueues(void)                   5304 void show_all_workqueues(void)
6334 {                                                5305 {
6335         struct workqueue_struct *wq;             5306         struct workqueue_struct *wq;
6336         struct worker_pool *pool;                5307         struct worker_pool *pool;
6337         int pi;                                  5308         int pi;
6338                                                  5309 
6339         rcu_read_lock();                         5310         rcu_read_lock();
6340                                                  5311 
6341         pr_info("Showing busy workqueues and     5312         pr_info("Showing busy workqueues and worker pools:\n");
6342                                                  5313 
6343         list_for_each_entry_rcu(wq, &workqueu    5314         list_for_each_entry_rcu(wq, &workqueues, list)
6344                 show_one_workqueue(wq);          5315                 show_one_workqueue(wq);
6345                                                  5316 
6346         for_each_pool(pool, pi)                  5317         for_each_pool(pool, pi)
6347                 show_one_worker_pool(pool);      5318                 show_one_worker_pool(pool);
6348                                                  5319 
6349         rcu_read_unlock();                       5320         rcu_read_unlock();
6350 }                                                5321 }
6351                                                  5322 
6352 /**                                              5323 /**
6353  * show_freezable_workqueues - dump freezable    5324  * show_freezable_workqueues - dump freezable workqueue state
6354  *                                               5325  *
6355  * Called from try_to_freeze_tasks() and prin    5326  * Called from try_to_freeze_tasks() and prints out all freezable workqueues
6356  * still busy.                                   5327  * still busy.
6357  */                                              5328  */
6358 void show_freezable_workqueues(void)             5329 void show_freezable_workqueues(void)
6359 {                                                5330 {
6360         struct workqueue_struct *wq;             5331         struct workqueue_struct *wq;
6361                                                  5332 
6362         rcu_read_lock();                         5333         rcu_read_lock();
6363                                                  5334 
6364         pr_info("Showing freezable workqueues    5335         pr_info("Showing freezable workqueues that are still busy:\n");
6365                                                  5336 
6366         list_for_each_entry_rcu(wq, &workqueu    5337         list_for_each_entry_rcu(wq, &workqueues, list) {
6367                 if (!(wq->flags & WQ_FREEZABL    5338                 if (!(wq->flags & WQ_FREEZABLE))
6368                         continue;                5339                         continue;
6369                 show_one_workqueue(wq);          5340                 show_one_workqueue(wq);
6370         }                                        5341         }
6371                                                  5342 
6372         rcu_read_unlock();                       5343         rcu_read_unlock();
6373 }                                                5344 }
6374                                                  5345 
6375 /* used to show worker information through /p    5346 /* used to show worker information through /proc/PID/{comm,stat,status} */
6376 void wq_worker_comm(char *buf, size_t size, s    5347 void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
6377 {                                                5348 {
                                                   >> 5349         int off;
                                                   >> 5350 
                                                   >> 5351         /* always show the actual comm */
                                                   >> 5352         off = strscpy(buf, task->comm, size);
                                                   >> 5353         if (off < 0)
                                                   >> 5354                 return;
                                                   >> 5355 
6378         /* stabilize PF_WQ_WORKER and worker     5356         /* stabilize PF_WQ_WORKER and worker pool association */
6379         mutex_lock(&wq_pool_attach_mutex);       5357         mutex_lock(&wq_pool_attach_mutex);
6380                                                  5358 
6381         if (task->flags & PF_WQ_WORKER) {        5359         if (task->flags & PF_WQ_WORKER) {
6382                 struct worker *worker = kthre    5360                 struct worker *worker = kthread_data(task);
6383                 struct worker_pool *pool = wo    5361                 struct worker_pool *pool = worker->pool;
6384                 int off;                      << 
6385                                               << 
6386                 off = format_worker_id(buf, s << 
6387                                                  5362 
6388                 if (pool) {                      5363                 if (pool) {
6389                         raw_spin_lock_irq(&po    5364                         raw_spin_lock_irq(&pool->lock);
6390                         /*                       5365                         /*
6391                          * ->desc tracks info    5366                          * ->desc tracks information (wq name or
6392                          * set_worker_desc())    5367                          * set_worker_desc()) for the latest execution.  If
6393                          * current, prepend '    5368                          * current, prepend '+', otherwise '-'.
6394                          */                      5369                          */
6395                         if (worker->desc[0] !    5370                         if (worker->desc[0] != '\0') {
6396                                 if (worker->c    5371                                 if (worker->current_work)
6397                                         scnpr    5372                                         scnprintf(buf + off, size - off, "+%s",
6398                                                  5373                                                   worker->desc);
6399                                 else             5374                                 else
6400                                         scnpr    5375                                         scnprintf(buf + off, size - off, "-%s",
6401                                                  5376                                                   worker->desc);
6402                         }                        5377                         }
6403                         raw_spin_unlock_irq(&    5378                         raw_spin_unlock_irq(&pool->lock);
6404                 }                                5379                 }
6405         } else {                              << 
6406                 strscpy(buf, task->comm, size << 
6407         }                                        5380         }
6408                                                  5381 
6409         mutex_unlock(&wq_pool_attach_mutex);     5382         mutex_unlock(&wq_pool_attach_mutex);
6410 }                                                5383 }
6411                                                  5384 
6412 #ifdef CONFIG_SMP                                5385 #ifdef CONFIG_SMP
6413                                                  5386 
6414 /*                                               5387 /*
6415  * CPU hotplug.                                  5388  * CPU hotplug.
6416  *                                               5389  *
6417  * There are two challenges in supporting CPU    5390  * There are two challenges in supporting CPU hotplug.  Firstly, there
6418  * are a lot of assumptions on strong associa    5391  * are a lot of assumptions on strong associations among work, pwq and
6419  * pool which make migrating pending and sche    5392  * pool which make migrating pending and scheduled works very
6420  * difficult to implement without impacting h    5393  * difficult to implement without impacting hot paths.  Secondly,
6421  * worker pools serve mix of short, long and     5394  * worker pools serve mix of short, long and very long running works making
6422  * blocked draining impractical.                 5395  * blocked draining impractical.
6423  *                                               5396  *
6424  * This is solved by allowing the pools to be    5397  * This is solved by allowing the pools to be disassociated from the CPU
6425  * running as an unbound one and allowing it     5398  * running as an unbound one and allowing it to be reattached later if the
6426  * cpu comes back online.                        5399  * cpu comes back online.
6427  */                                              5400  */
6428                                                  5401 
6429 static void unbind_workers(int cpu)              5402 static void unbind_workers(int cpu)
6430 {                                                5403 {
6431         struct worker_pool *pool;                5404         struct worker_pool *pool;
6432         struct worker *worker;                   5405         struct worker *worker;
6433                                                  5406 
6434         for_each_cpu_worker_pool(pool, cpu) {    5407         for_each_cpu_worker_pool(pool, cpu) {
6435                 mutex_lock(&wq_pool_attach_mu    5408                 mutex_lock(&wq_pool_attach_mutex);
6436                 raw_spin_lock_irq(&pool->lock    5409                 raw_spin_lock_irq(&pool->lock);
6437                                                  5410 
6438                 /*                               5411                 /*
6439                  * We've blocked all attach/d    5412                  * We've blocked all attach/detach operations. Make all workers
6440                  * unbound and set DISASSOCIA    5413                  * unbound and set DISASSOCIATED.  Before this, all workers
6441                  * must be on the cpu.  After    5414                  * must be on the cpu.  After this, they may become diasporas.
6442                  * And the preemption disable    5415                  * And the preemption disabled section in their sched callbacks
6443                  * are guaranteed to see WORK    5416                  * are guaranteed to see WORKER_UNBOUND since the code here
6444                  * is on the same cpu.           5417                  * is on the same cpu.
6445                  */                              5418                  */
6446                 for_each_pool_worker(worker,     5419                 for_each_pool_worker(worker, pool)
6447                         worker->flags |= WORK    5420                         worker->flags |= WORKER_UNBOUND;
6448                                                  5421 
6449                 pool->flags |= POOL_DISASSOCI    5422                 pool->flags |= POOL_DISASSOCIATED;
6450                                                  5423 
6451                 /*                               5424                 /*
6452                  * The handling of nr_running    5425                  * The handling of nr_running in sched callbacks are disabled
6453                  * now.  Zap nr_running.  Aft    5426                  * now.  Zap nr_running.  After this, nr_running stays zero and
6454                  * need_more_worker() and kee    5427                  * need_more_worker() and keep_working() are always true as
6455                  * long as the worklist is no    5428                  * long as the worklist is not empty.  This pool now behaves as
6456                  * an unbound (in terms of co    5429                  * an unbound (in terms of concurrency management) pool which
6457                  * are served by workers tied    5430                  * are served by workers tied to the pool.
6458                  */                              5431                  */
6459                 pool->nr_running = 0;            5432                 pool->nr_running = 0;
6460                                                  5433 
6461                 /*                               5434                 /*
6462                  * With concurrency managemen    5435                  * With concurrency management just turned off, a busy
6463                  * worker blocking could lead    5436                  * worker blocking could lead to lengthy stalls.  Kick off
6464                  * unbound chain execution of    5437                  * unbound chain execution of currently pending work items.
6465                  */                              5438                  */
6466                 kick_pool(pool);                 5439                 kick_pool(pool);
6467                                                  5440 
6468                 raw_spin_unlock_irq(&pool->lo    5441                 raw_spin_unlock_irq(&pool->lock);
6469                                                  5442 
6470                 for_each_pool_worker(worker,     5443                 for_each_pool_worker(worker, pool)
6471                         unbind_worker(worker)    5444                         unbind_worker(worker);
6472                                                  5445 
6473                 mutex_unlock(&wq_pool_attach_    5446                 mutex_unlock(&wq_pool_attach_mutex);
6474         }                                        5447         }
6475 }                                                5448 }
6476                                                  5449 
6477 /**                                              5450 /**
6478  * rebind_workers - rebind all workers of a p    5451  * rebind_workers - rebind all workers of a pool to the associated CPU
6479  * @pool: pool of interest                       5452  * @pool: pool of interest
6480  *                                               5453  *
6481  * @pool->cpu is coming online.  Rebind all w    5454  * @pool->cpu is coming online.  Rebind all workers to the CPU.
6482  */                                              5455  */
6483 static void rebind_workers(struct worker_pool    5456 static void rebind_workers(struct worker_pool *pool)
6484 {                                                5457 {
6485         struct worker *worker;                   5458         struct worker *worker;
6486                                                  5459 
6487         lockdep_assert_held(&wq_pool_attach_m    5460         lockdep_assert_held(&wq_pool_attach_mutex);
6488                                                  5461 
6489         /*                                       5462         /*
6490          * Restore CPU affinity of all worker    5463          * Restore CPU affinity of all workers.  As all idle workers should
6491          * be on the run-queue of the associa    5464          * be on the run-queue of the associated CPU before any local
6492          * wake-ups for concurrency managemen    5465          * wake-ups for concurrency management happen, restore CPU affinity
6493          * of all workers first and then clea    5466          * of all workers first and then clear UNBOUND.  As we're called
6494          * from CPU_ONLINE, the following sho    5467          * from CPU_ONLINE, the following shouldn't fail.
6495          */                                      5468          */
6496         for_each_pool_worker(worker, pool) {     5469         for_each_pool_worker(worker, pool) {
6497                 kthread_set_per_cpu(worker->t    5470                 kthread_set_per_cpu(worker->task, pool->cpu);
6498                 WARN_ON_ONCE(set_cpus_allowed    5471                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
6499                                                  5472                                                   pool_allowed_cpus(pool)) < 0);
6500         }                                        5473         }
6501                                                  5474 
6502         raw_spin_lock_irq(&pool->lock);          5475         raw_spin_lock_irq(&pool->lock);
6503                                                  5476 
6504         pool->flags &= ~POOL_DISASSOCIATED;      5477         pool->flags &= ~POOL_DISASSOCIATED;
6505                                                  5478 
6506         for_each_pool_worker(worker, pool) {     5479         for_each_pool_worker(worker, pool) {
6507                 unsigned int worker_flags = w    5480                 unsigned int worker_flags = worker->flags;
6508                                                  5481 
6509                 /*                               5482                 /*
6510                  * We want to clear UNBOUND b    5483                  * We want to clear UNBOUND but can't directly call
6511                  * worker_clr_flags() or adju    5484                  * worker_clr_flags() or adjust nr_running.  Atomically
6512                  * replace UNBOUND with anoth    5485                  * replace UNBOUND with another NOT_RUNNING flag REBOUND.
6513                  * @worker will clear REBOUND    5486                  * @worker will clear REBOUND using worker_clr_flags() when
6514                  * it initiates the next exec    5487                  * it initiates the next execution cycle thus restoring
6515                  * concurrency management.  N    5488                  * concurrency management.  Note that when or whether
6516                  * @worker clears REBOUND doe    5489                  * @worker clears REBOUND doesn't affect correctness.
6517                  *                               5490                  *
6518                  * WRITE_ONCE() is necessary     5491                  * WRITE_ONCE() is necessary because @worker->flags may be
6519                  * tested without holding any    5492                  * tested without holding any lock in
6520                  * wq_worker_running().  With    5493                  * wq_worker_running().  Without it, NOT_RUNNING test may
6521                  * fail incorrectly leading t    5494                  * fail incorrectly leading to premature concurrency
6522                  * management operations.        5495                  * management operations.
6523                  */                              5496                  */
6524                 WARN_ON_ONCE(!(worker_flags &    5497                 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
6525                 worker_flags |= WORKER_REBOUN    5498                 worker_flags |= WORKER_REBOUND;
6526                 worker_flags &= ~WORKER_UNBOU    5499                 worker_flags &= ~WORKER_UNBOUND;
6527                 WRITE_ONCE(worker->flags, wor    5500                 WRITE_ONCE(worker->flags, worker_flags);
6528         }                                        5501         }
6529                                                  5502 
6530         raw_spin_unlock_irq(&pool->lock);        5503         raw_spin_unlock_irq(&pool->lock);
6531 }                                                5504 }
6532                                                  5505 
6533 /**                                              5506 /**
6534  * restore_unbound_workers_cpumask - restore     5507  * restore_unbound_workers_cpumask - restore cpumask of unbound workers
6535  * @pool: unbound pool of interest               5508  * @pool: unbound pool of interest
6536  * @cpu: the CPU which is coming up              5509  * @cpu: the CPU which is coming up
6537  *                                               5510  *
6538  * An unbound pool may end up with a cpumask     5511  * An unbound pool may end up with a cpumask which doesn't have any online
6539  * CPUs.  When a worker of such pool get sche    5512  * CPUs.  When a worker of such pool get scheduled, the scheduler resets
6540  * its cpus_allowed.  If @cpu is in @pool's c    5513  * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
6541  * online CPU before, cpus_allowed of all its    5514  * online CPU before, cpus_allowed of all its workers should be restored.
6542  */                                              5515  */
6543 static void restore_unbound_workers_cpumask(s    5516 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
6544 {                                                5517 {
6545         static cpumask_t cpumask;                5518         static cpumask_t cpumask;
6546         struct worker *worker;                   5519         struct worker *worker;
6547                                                  5520 
6548         lockdep_assert_held(&wq_pool_attach_m    5521         lockdep_assert_held(&wq_pool_attach_mutex);
6549                                                  5522 
6550         /* is @cpu allowed for @pool? */         5523         /* is @cpu allowed for @pool? */
6551         if (!cpumask_test_cpu(cpu, pool->attr    5524         if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
6552                 return;                          5525                 return;
6553                                                  5526 
6554         cpumask_and(&cpumask, pool->attrs->cp    5527         cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
6555                                                  5528 
6556         /* as we're called from CPU_ONLINE, t    5529         /* as we're called from CPU_ONLINE, the following shouldn't fail */
6557         for_each_pool_worker(worker, pool)       5530         for_each_pool_worker(worker, pool)
6558                 WARN_ON_ONCE(set_cpus_allowed    5531                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
6559 }                                                5532 }
6560                                                  5533 
6561 int workqueue_prepare_cpu(unsigned int cpu)      5534 int workqueue_prepare_cpu(unsigned int cpu)
6562 {                                                5535 {
6563         struct worker_pool *pool;                5536         struct worker_pool *pool;
6564                                                  5537 
6565         for_each_cpu_worker_pool(pool, cpu) {    5538         for_each_cpu_worker_pool(pool, cpu) {
6566                 if (pool->nr_workers)            5539                 if (pool->nr_workers)
6567                         continue;                5540                         continue;
6568                 if (!create_worker(pool))        5541                 if (!create_worker(pool))
6569                         return -ENOMEM;          5542                         return -ENOMEM;
6570         }                                        5543         }
6571         return 0;                                5544         return 0;
6572 }                                                5545 }
6573                                                  5546 
6574 int workqueue_online_cpu(unsigned int cpu)       5547 int workqueue_online_cpu(unsigned int cpu)
6575 {                                                5548 {
6576         struct worker_pool *pool;                5549         struct worker_pool *pool;
6577         struct workqueue_struct *wq;             5550         struct workqueue_struct *wq;
6578         int pi;                                  5551         int pi;
6579                                                  5552 
6580         mutex_lock(&wq_pool_mutex);              5553         mutex_lock(&wq_pool_mutex);
6581                                                  5554 
6582         cpumask_set_cpu(cpu, wq_online_cpumas << 
6583                                               << 
6584         for_each_pool(pool, pi) {                5555         for_each_pool(pool, pi) {
6585                 /* BH pools aren't affected b << 
6586                 if (pool->flags & POOL_BH)    << 
6587                         continue;             << 
6588                                               << 
6589                 mutex_lock(&wq_pool_attach_mu    5556                 mutex_lock(&wq_pool_attach_mutex);
                                                   >> 5557 
6590                 if (pool->cpu == cpu)            5558                 if (pool->cpu == cpu)
6591                         rebind_workers(pool);    5559                         rebind_workers(pool);
6592                 else if (pool->cpu < 0)          5560                 else if (pool->cpu < 0)
6593                         restore_unbound_worke    5561                         restore_unbound_workers_cpumask(pool, cpu);
                                                   >> 5562 
6594                 mutex_unlock(&wq_pool_attach_    5563                 mutex_unlock(&wq_pool_attach_mutex);
6595         }                                        5564         }
6596                                                  5565 
6597         /* update pod affinity of unbound wor    5566         /* update pod affinity of unbound workqueues */
6598         list_for_each_entry(wq, &workqueues,     5567         list_for_each_entry(wq, &workqueues, list) {
6599                 struct workqueue_attrs *attrs    5568                 struct workqueue_attrs *attrs = wq->unbound_attrs;
6600                                                  5569 
6601                 if (attrs) {                     5570                 if (attrs) {
6602                         const struct wq_pod_t    5571                         const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
6603                         int tcpu;                5572                         int tcpu;
6604                                                  5573 
6605                         for_each_cpu(tcpu, pt    5574                         for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
6606                                 unbound_wq_up !! 5575                                 wq_update_pod(wq, tcpu, cpu, true);
6607                                               << 
6608                         mutex_lock(&wq->mutex << 
6609                         wq_update_node_max_ac << 
6610                         mutex_unlock(&wq->mut << 
6611                 }                                5576                 }
6612         }                                        5577         }
6613                                                  5578 
6614         mutex_unlock(&wq_pool_mutex);            5579         mutex_unlock(&wq_pool_mutex);
6615         return 0;                                5580         return 0;
6616 }                                                5581 }
6617                                                  5582 
6618 int workqueue_offline_cpu(unsigned int cpu)      5583 int workqueue_offline_cpu(unsigned int cpu)
6619 {                                                5584 {
6620         struct workqueue_struct *wq;             5585         struct workqueue_struct *wq;
6621                                                  5586 
6622         /* unbinding per-cpu workers should h    5587         /* unbinding per-cpu workers should happen on the local CPU */
6623         if (WARN_ON(cpu != smp_processor_id()    5588         if (WARN_ON(cpu != smp_processor_id()))
6624                 return -1;                       5589                 return -1;
6625                                                  5590 
6626         unbind_workers(cpu);                     5591         unbind_workers(cpu);
6627                                                  5592 
6628         /* update pod affinity of unbound wor    5593         /* update pod affinity of unbound workqueues */
6629         mutex_lock(&wq_pool_mutex);              5594         mutex_lock(&wq_pool_mutex);
6630                                               << 
6631         cpumask_clear_cpu(cpu, wq_online_cpum << 
6632                                               << 
6633         list_for_each_entry(wq, &workqueues,     5595         list_for_each_entry(wq, &workqueues, list) {
6634                 struct workqueue_attrs *attrs    5596                 struct workqueue_attrs *attrs = wq->unbound_attrs;
6635                                                  5597 
6636                 if (attrs) {                     5598                 if (attrs) {
6637                         const struct wq_pod_t    5599                         const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
6638                         int tcpu;                5600                         int tcpu;
6639                                                  5601 
6640                         for_each_cpu(tcpu, pt    5602                         for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
6641                                 unbound_wq_up !! 5603                                 wq_update_pod(wq, tcpu, cpu, false);
6642                                               << 
6643                         mutex_lock(&wq->mutex << 
6644                         wq_update_node_max_ac << 
6645                         mutex_unlock(&wq->mut << 
6646                 }                                5604                 }
6647         }                                        5605         }
6648         mutex_unlock(&wq_pool_mutex);            5606         mutex_unlock(&wq_pool_mutex);
6649                                                  5607 
6650         return 0;                                5608         return 0;
6651 }                                                5609 }
6652                                                  5610 
6653 struct work_for_cpu {                            5611 struct work_for_cpu {
6654         struct work_struct work;                 5612         struct work_struct work;
6655         long (*fn)(void *);                      5613         long (*fn)(void *);
6656         void *arg;                               5614         void *arg;
6657         long ret;                                5615         long ret;
6658 };                                               5616 };
6659                                                  5617 
6660 static void work_for_cpu_fn(struct work_struc    5618 static void work_for_cpu_fn(struct work_struct *work)
6661 {                                                5619 {
6662         struct work_for_cpu *wfc = container_    5620         struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
6663                                                  5621 
6664         wfc->ret = wfc->fn(wfc->arg);            5622         wfc->ret = wfc->fn(wfc->arg);
6665 }                                                5623 }
6666                                                  5624 
6667 /**                                              5625 /**
6668  * work_on_cpu_key - run a function in thread    5626  * work_on_cpu_key - run a function in thread context on a particular cpu
6669  * @cpu: the cpu to run on                       5627  * @cpu: the cpu to run on
6670  * @fn: the function to run                      5628  * @fn: the function to run
6671  * @arg: the function arg                        5629  * @arg: the function arg
6672  * @key: The lock class key for lock debuggin    5630  * @key: The lock class key for lock debugging purposes
6673  *                                               5631  *
6674  * It is up to the caller to ensure that the     5632  * It is up to the caller to ensure that the cpu doesn't go offline.
6675  * The caller must not hold any locks which w    5633  * The caller must not hold any locks which would prevent @fn from completing.
6676  *                                               5634  *
6677  * Return: The value @fn returns.                5635  * Return: The value @fn returns.
6678  */                                              5636  */
6679 long work_on_cpu_key(int cpu, long (*fn)(void    5637 long work_on_cpu_key(int cpu, long (*fn)(void *),
6680                      void *arg, struct lock_c    5638                      void *arg, struct lock_class_key *key)
6681 {                                                5639 {
6682         struct work_for_cpu wfc = { .fn = fn,    5640         struct work_for_cpu wfc = { .fn = fn, .arg = arg };
6683                                                  5641 
6684         INIT_WORK_ONSTACK_KEY(&wfc.work, work    5642         INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
6685         schedule_work_on(cpu, &wfc.work);        5643         schedule_work_on(cpu, &wfc.work);
6686         flush_work(&wfc.work);                   5644         flush_work(&wfc.work);
6687         destroy_work_on_stack(&wfc.work);        5645         destroy_work_on_stack(&wfc.work);
6688         return wfc.ret;                          5646         return wfc.ret;
6689 }                                                5647 }
6690 EXPORT_SYMBOL_GPL(work_on_cpu_key);              5648 EXPORT_SYMBOL_GPL(work_on_cpu_key);
6691                                                  5649 
6692 /**                                              5650 /**
6693  * work_on_cpu_safe_key - run a function in t    5651  * work_on_cpu_safe_key - run a function in thread context on a particular cpu
6694  * @cpu: the cpu to run on                       5652  * @cpu: the cpu to run on
6695  * @fn:  the function to run                     5653  * @fn:  the function to run
6696  * @arg: the function argument                   5654  * @arg: the function argument
6697  * @key: The lock class key for lock debuggin    5655  * @key: The lock class key for lock debugging purposes
6698  *                                               5656  *
6699  * Disables CPU hotplug and calls work_on_cpu    5657  * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
6700  * any locks which would prevent @fn from com    5658  * any locks which would prevent @fn from completing.
6701  *                                               5659  *
6702  * Return: The value @fn returns.                5660  * Return: The value @fn returns.
6703  */                                              5661  */
6704 long work_on_cpu_safe_key(int cpu, long (*fn)    5662 long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
6705                           void *arg, struct l    5663                           void *arg, struct lock_class_key *key)
6706 {                                                5664 {
6707         long ret = -ENODEV;                      5665         long ret = -ENODEV;
6708                                                  5666 
6709         cpus_read_lock();                        5667         cpus_read_lock();
6710         if (cpu_online(cpu))                     5668         if (cpu_online(cpu))
6711                 ret = work_on_cpu_key(cpu, fn    5669                 ret = work_on_cpu_key(cpu, fn, arg, key);
6712         cpus_read_unlock();                      5670         cpus_read_unlock();
6713         return ret;                              5671         return ret;
6714 }                                                5672 }
6715 EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);         5673 EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
6716 #endif /* CONFIG_SMP */                          5674 #endif /* CONFIG_SMP */
6717                                                  5675 
6718 #ifdef CONFIG_FREEZER                            5676 #ifdef CONFIG_FREEZER
6719                                                  5677 
6720 /**                                              5678 /**
6721  * freeze_workqueues_begin - begin freezing w    5679  * freeze_workqueues_begin - begin freezing workqueues
6722  *                                               5680  *
6723  * Start freezing workqueues.  After this fun    5681  * Start freezing workqueues.  After this function returns, all freezable
6724  * workqueues will queue new works to their i    5682  * workqueues will queue new works to their inactive_works list instead of
6725  * pool->worklist.                               5683  * pool->worklist.
6726  *                                               5684  *
6727  * CONTEXT:                                      5685  * CONTEXT:
6728  * Grabs and releases wq_pool_mutex, wq->mute    5686  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
6729  */                                              5687  */
6730 void freeze_workqueues_begin(void)               5688 void freeze_workqueues_begin(void)
6731 {                                                5689 {
6732         struct workqueue_struct *wq;             5690         struct workqueue_struct *wq;
                                                   >> 5691         struct pool_workqueue *pwq;
6733                                                  5692 
6734         mutex_lock(&wq_pool_mutex);              5693         mutex_lock(&wq_pool_mutex);
6735                                                  5694 
6736         WARN_ON_ONCE(workqueue_freezing);        5695         WARN_ON_ONCE(workqueue_freezing);
6737         workqueue_freezing = true;               5696         workqueue_freezing = true;
6738                                                  5697 
6739         list_for_each_entry(wq, &workqueues,     5698         list_for_each_entry(wq, &workqueues, list) {
6740                 mutex_lock(&wq->mutex);          5699                 mutex_lock(&wq->mutex);
6741                 wq_adjust_max_active(wq);     !! 5700                 for_each_pwq(pwq, wq)
                                                   >> 5701                         pwq_adjust_max_active(pwq);
6742                 mutex_unlock(&wq->mutex);        5702                 mutex_unlock(&wq->mutex);
6743         }                                        5703         }
6744                                                  5704 
6745         mutex_unlock(&wq_pool_mutex);            5705         mutex_unlock(&wq_pool_mutex);
6746 }                                                5706 }
6747                                                  5707 
6748 /**                                              5708 /**
6749  * freeze_workqueues_busy - are freezable wor    5709  * freeze_workqueues_busy - are freezable workqueues still busy?
6750  *                                               5710  *
6751  * Check whether freezing is complete.  This     5711  * Check whether freezing is complete.  This function must be called
6752  * between freeze_workqueues_begin() and thaw    5712  * between freeze_workqueues_begin() and thaw_workqueues().
6753  *                                               5713  *
6754  * CONTEXT:                                      5714  * CONTEXT:
6755  * Grabs and releases wq_pool_mutex.             5715  * Grabs and releases wq_pool_mutex.
6756  *                                               5716  *
6757  * Return:                                       5717  * Return:
6758  * %true if some freezable workqueues are sti    5718  * %true if some freezable workqueues are still busy.  %false if freezing
6759  * is complete.                                  5719  * is complete.
6760  */                                              5720  */
6761 bool freeze_workqueues_busy(void)                5721 bool freeze_workqueues_busy(void)
6762 {                                                5722 {
6763         bool busy = false;                       5723         bool busy = false;
6764         struct workqueue_struct *wq;             5724         struct workqueue_struct *wq;
6765         struct pool_workqueue *pwq;              5725         struct pool_workqueue *pwq;
6766                                                  5726 
6767         mutex_lock(&wq_pool_mutex);              5727         mutex_lock(&wq_pool_mutex);
6768                                                  5728 
6769         WARN_ON_ONCE(!workqueue_freezing);       5729         WARN_ON_ONCE(!workqueue_freezing);
6770                                                  5730 
6771         list_for_each_entry(wq, &workqueues,     5731         list_for_each_entry(wq, &workqueues, list) {
6772                 if (!(wq->flags & WQ_FREEZABL    5732                 if (!(wq->flags & WQ_FREEZABLE))
6773                         continue;                5733                         continue;
6774                 /*                               5734                 /*
6775                  * nr_active is monotonically    5735                  * nr_active is monotonically decreasing.  It's safe
6776                  * to peek without lock.         5736                  * to peek without lock.
6777                  */                              5737                  */
6778                 rcu_read_lock();                 5738                 rcu_read_lock();
6779                 for_each_pwq(pwq, wq) {          5739                 for_each_pwq(pwq, wq) {
6780                         WARN_ON_ONCE(pwq->nr_    5740                         WARN_ON_ONCE(pwq->nr_active < 0);
6781                         if (pwq->nr_active) {    5741                         if (pwq->nr_active) {
6782                                 busy = true;     5742                                 busy = true;
6783                                 rcu_read_unlo    5743                                 rcu_read_unlock();
6784                                 goto out_unlo    5744                                 goto out_unlock;
6785                         }                        5745                         }
6786                 }                                5746                 }
6787                 rcu_read_unlock();               5747                 rcu_read_unlock();
6788         }                                        5748         }
6789 out_unlock:                                      5749 out_unlock:
6790         mutex_unlock(&wq_pool_mutex);            5750         mutex_unlock(&wq_pool_mutex);
6791         return busy;                             5751         return busy;
6792 }                                                5752 }
6793                                                  5753 
6794 /**                                              5754 /**
6795  * thaw_workqueues - thaw workqueues             5755  * thaw_workqueues - thaw workqueues
6796  *                                               5756  *
6797  * Thaw workqueues.  Normal queueing is resto    5757  * Thaw workqueues.  Normal queueing is restored and all collected
6798  * frozen works are transferred to their resp    5758  * frozen works are transferred to their respective pool worklists.
6799  *                                               5759  *
6800  * CONTEXT:                                      5760  * CONTEXT:
6801  * Grabs and releases wq_pool_mutex, wq->mute    5761  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
6802  */                                              5762  */
6803 void thaw_workqueues(void)                       5763 void thaw_workqueues(void)
6804 {                                                5764 {
6805         struct workqueue_struct *wq;             5765         struct workqueue_struct *wq;
                                                   >> 5766         struct pool_workqueue *pwq;
6806                                                  5767 
6807         mutex_lock(&wq_pool_mutex);              5768         mutex_lock(&wq_pool_mutex);
6808                                                  5769 
6809         if (!workqueue_freezing)                 5770         if (!workqueue_freezing)
6810                 goto out_unlock;                 5771                 goto out_unlock;
6811                                                  5772 
6812         workqueue_freezing = false;              5773         workqueue_freezing = false;
6813                                                  5774 
6814         /* restore max_active and repopulate     5775         /* restore max_active and repopulate worklist */
6815         list_for_each_entry(wq, &workqueues,     5776         list_for_each_entry(wq, &workqueues, list) {
6816                 mutex_lock(&wq->mutex);          5777                 mutex_lock(&wq->mutex);
6817                 wq_adjust_max_active(wq);     !! 5778                 for_each_pwq(pwq, wq)
                                                   >> 5779                         pwq_adjust_max_active(pwq);
6818                 mutex_unlock(&wq->mutex);        5780                 mutex_unlock(&wq->mutex);
6819         }                                        5781         }
6820                                                  5782 
6821 out_unlock:                                      5783 out_unlock:
6822         mutex_unlock(&wq_pool_mutex);            5784         mutex_unlock(&wq_pool_mutex);
6823 }                                                5785 }
6824 #endif /* CONFIG_FREEZER */                      5786 #endif /* CONFIG_FREEZER */
6825                                                  5787 
6826 static int workqueue_apply_unbound_cpumask(co    5788 static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
6827 {                                                5789 {
6828         LIST_HEAD(ctxs);                         5790         LIST_HEAD(ctxs);
6829         int ret = 0;                             5791         int ret = 0;
6830         struct workqueue_struct *wq;             5792         struct workqueue_struct *wq;
6831         struct apply_wqattrs_ctx *ctx, *n;       5793         struct apply_wqattrs_ctx *ctx, *n;
6832                                                  5794 
6833         lockdep_assert_held(&wq_pool_mutex);     5795         lockdep_assert_held(&wq_pool_mutex);
6834                                                  5796 
6835         list_for_each_entry(wq, &workqueues,     5797         list_for_each_entry(wq, &workqueues, list) {
6836                 if (!(wq->flags & WQ_UNBOUND) !! 5798                 if (!(wq->flags & WQ_UNBOUND))
                                                   >> 5799                         continue;
                                                   >> 5800                 /* creating multiple pwqs breaks ordering guarantee */
                                                   >> 5801                 if (wq->flags & __WQ_ORDERED)
6837                         continue;                5802                         continue;
6838                                                  5803 
6839                 ctx = apply_wqattrs_prepare(w    5804                 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
6840                 if (IS_ERR(ctx)) {               5805                 if (IS_ERR(ctx)) {
6841                         ret = PTR_ERR(ctx);      5806                         ret = PTR_ERR(ctx);
6842                         break;                   5807                         break;
6843                 }                                5808                 }
6844                                                  5809 
6845                 list_add_tail(&ctx->list, &ct    5810                 list_add_tail(&ctx->list, &ctxs);
6846         }                                        5811         }
6847                                                  5812 
6848         list_for_each_entry_safe(ctx, n, &ctx    5813         list_for_each_entry_safe(ctx, n, &ctxs, list) {
6849                 if (!ret)                        5814                 if (!ret)
6850                         apply_wqattrs_commit(    5815                         apply_wqattrs_commit(ctx);
6851                 apply_wqattrs_cleanup(ctx);      5816                 apply_wqattrs_cleanup(ctx);
6852         }                                        5817         }
6853                                                  5818 
6854         if (!ret) {                              5819         if (!ret) {
6855                 mutex_lock(&wq_pool_attach_mu    5820                 mutex_lock(&wq_pool_attach_mutex);
6856                 cpumask_copy(wq_unbound_cpuma    5821                 cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
6857                 mutex_unlock(&wq_pool_attach_    5822                 mutex_unlock(&wq_pool_attach_mutex);
6858         }                                        5823         }
6859         return ret;                              5824         return ret;
6860 }                                                5825 }
6861                                                  5826 
6862 /**                                              5827 /**
6863  * workqueue_unbound_exclude_cpumask - Exclud !! 5828  *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
6864  * @exclude_cpumask: the cpumask to be exclud !! 5829  *  @cpumask: the cpumask to set
6865  *                                               5830  *
6866  * This function can be called from cpuset co !! 5831  *  The low-level workqueues cpumask is a global cpumask that limits
6867  * CPUs that should be excluded from wq_unbou !! 5832  *  the affinity of all unbound workqueues.  This function check the @cpumask
                                                   >> 5833  *  and apply it to all unbound workqueues and updates all pwqs of them.
                                                   >> 5834  *
                                                   >> 5835  *  Return:     0       - Success
                                                   >> 5836  *              -EINVAL - Invalid @cpumask
                                                   >> 5837  *              -ENOMEM - Failed to allocate memory for attrs or pwqs.
6868  */                                              5838  */
6869 int workqueue_unbound_exclude_cpumask(cpumask !! 5839 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
6870 {                                                5840 {
6871         cpumask_var_t cpumask;                !! 5841         int ret = -EINVAL;
6872         int ret = 0;                          << 
6873                                               << 
6874         if (!zalloc_cpumask_var(&cpumask, GFP << 
6875                 return -ENOMEM;               << 
6876                                               << 
6877         mutex_lock(&wq_pool_mutex);           << 
6878                                                  5842 
6879         /*                                       5843         /*
6880          * If the operation fails, it will fa !! 5844          * Not excluding isolated cpus on purpose.
6881          * wq_requested_unbound_cpumask which !! 5845          * If the user wishes to include them, we allow that.
6882          * (HK_TYPE_WQ ∩ HK_TYPE_DOMAIN) ho !! 5846          */
6883          * by any subsequent write to workque !! 5847         cpumask_and(cpumask, cpumask, cpu_possible_mask);
6884          */                                   !! 5848         if (!cpumask_empty(cpumask)) {
6885         if (!cpumask_andnot(cpumask, wq_reque !! 5849                 apply_wqattrs_lock();
6886                 cpumask_copy(cpumask, wq_requ !! 5850                 if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
6887         if (!cpumask_equal(cpumask, wq_unboun !! 5851                         ret = 0;
                                                   >> 5852                         goto out_unlock;
                                                   >> 5853                 }
                                                   >> 5854 
6888                 ret = workqueue_apply_unbound    5855                 ret = workqueue_apply_unbound_cpumask(cpumask);
6889                                                  5856 
6890         /* Save the current isolated cpumask  !! 5857 out_unlock:
6891         if (!ret)                             !! 5858                 apply_wqattrs_unlock();
6892                 cpumask_copy(wq_isolated_cpum !! 5859         }
6893                                                  5860 
6894         mutex_unlock(&wq_pool_mutex);         << 
6895         free_cpumask_var(cpumask);            << 
6896         return ret;                              5861         return ret;
6897 }                                                5862 }
6898                                                  5863 
6899 static int parse_affn_scope(const char *val)     5864 static int parse_affn_scope(const char *val)
6900 {                                                5865 {
6901         int i;                                   5866         int i;
6902                                                  5867 
6903         for (i = 0; i < ARRAY_SIZE(wq_affn_na    5868         for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) {
6904                 if (!strncasecmp(val, wq_affn    5869                 if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i])))
6905                         return i;                5870                         return i;
6906         }                                        5871         }
6907         return -EINVAL;                          5872         return -EINVAL;
6908 }                                                5873 }
6909                                                  5874 
6910 static int wq_affn_dfl_set(const char *val, c    5875 static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
6911 {                                                5876 {
6912         struct workqueue_struct *wq;             5877         struct workqueue_struct *wq;
6913         int affn, cpu;                           5878         int affn, cpu;
6914                                                  5879 
6915         affn = parse_affn_scope(val);            5880         affn = parse_affn_scope(val);
6916         if (affn < 0)                            5881         if (affn < 0)
6917                 return affn;                     5882                 return affn;
6918         if (affn == WQ_AFFN_DFL)                 5883         if (affn == WQ_AFFN_DFL)
6919                 return -EINVAL;                  5884                 return -EINVAL;
6920                                                  5885 
6921         cpus_read_lock();                        5886         cpus_read_lock();
6922         mutex_lock(&wq_pool_mutex);              5887         mutex_lock(&wq_pool_mutex);
6923                                                  5888 
6924         wq_affn_dfl = affn;                      5889         wq_affn_dfl = affn;
6925                                                  5890 
6926         list_for_each_entry(wq, &workqueues,     5891         list_for_each_entry(wq, &workqueues, list) {
6927                 for_each_online_cpu(cpu)      !! 5892                 for_each_online_cpu(cpu) {
6928                         unbound_wq_update_pwq !! 5893                         wq_update_pod(wq, cpu, cpu, true);
                                                   >> 5894                 }
6929         }                                        5895         }
6930                                                  5896 
6931         mutex_unlock(&wq_pool_mutex);            5897         mutex_unlock(&wq_pool_mutex);
6932         cpus_read_unlock();                      5898         cpus_read_unlock();
6933                                                  5899 
6934         return 0;                                5900         return 0;
6935 }                                                5901 }
6936                                                  5902 
6937 static int wq_affn_dfl_get(char *buffer, cons    5903 static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp)
6938 {                                                5904 {
6939         return scnprintf(buffer, PAGE_SIZE, "    5905         return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]);
6940 }                                                5906 }
6941                                                  5907 
6942 static const struct kernel_param_ops wq_affn_    5908 static const struct kernel_param_ops wq_affn_dfl_ops = {
6943         .set    = wq_affn_dfl_set,               5909         .set    = wq_affn_dfl_set,
6944         .get    = wq_affn_dfl_get,               5910         .get    = wq_affn_dfl_get,
6945 };                                               5911 };
6946                                                  5912 
6947 module_param_cb(default_affinity_scope, &wq_a    5913 module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644);
6948                                                  5914 
6949 #ifdef CONFIG_SYSFS                              5915 #ifdef CONFIG_SYSFS
6950 /*                                               5916 /*
6951  * Workqueues with WQ_SYSFS flag set is visib    5917  * Workqueues with WQ_SYSFS flag set is visible to userland via
6952  * /sys/bus/workqueue/devices/WQ_NAME.  All v    5918  * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
6953  * following attributes.                         5919  * following attributes.
6954  *                                               5920  *
6955  *  per_cpu             RO bool : whether the    5921  *  per_cpu             RO bool : whether the workqueue is per-cpu or unbound
6956  *  max_active          RW int  : maximum num    5922  *  max_active          RW int  : maximum number of in-flight work items
6957  *                                               5923  *
6958  * Unbound workqueues have the following extr    5924  * Unbound workqueues have the following extra attributes.
6959  *                                               5925  *
6960  *  nice                RW int  : nice value     5926  *  nice                RW int  : nice value of the workers
6961  *  cpumask             RW mask : bitmask of     5927  *  cpumask             RW mask : bitmask of allowed CPUs for the workers
6962  *  affinity_scope      RW str  : worker CPU     5928  *  affinity_scope      RW str  : worker CPU affinity scope (cache, numa, none)
6963  *  affinity_strict     RW bool : worker CPU     5929  *  affinity_strict     RW bool : worker CPU affinity is strict
6964  */                                              5930  */
6965 struct wq_device {                               5931 struct wq_device {
6966         struct workqueue_struct         *wq;     5932         struct workqueue_struct         *wq;
6967         struct device                   dev;     5933         struct device                   dev;
6968 };                                               5934 };
6969                                                  5935 
6970 static struct workqueue_struct *dev_to_wq(str    5936 static struct workqueue_struct *dev_to_wq(struct device *dev)
6971 {                                                5937 {
6972         struct wq_device *wq_dev = container_    5938         struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
6973                                                  5939 
6974         return wq_dev->wq;                       5940         return wq_dev->wq;
6975 }                                                5941 }
6976                                                  5942 
6977 static ssize_t per_cpu_show(struct device *de    5943 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
6978                             char *buf)           5944                             char *buf)
6979 {                                                5945 {
6980         struct workqueue_struct *wq = dev_to_    5946         struct workqueue_struct *wq = dev_to_wq(dev);
6981                                                  5947 
6982         return scnprintf(buf, PAGE_SIZE, "%d\    5948         return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
6983 }                                                5949 }
6984 static DEVICE_ATTR_RO(per_cpu);                  5950 static DEVICE_ATTR_RO(per_cpu);
6985                                                  5951 
6986 static ssize_t max_active_show(struct device     5952 static ssize_t max_active_show(struct device *dev,
6987                                struct device_    5953                                struct device_attribute *attr, char *buf)
6988 {                                                5954 {
6989         struct workqueue_struct *wq = dev_to_    5955         struct workqueue_struct *wq = dev_to_wq(dev);
6990                                                  5956 
6991         return scnprintf(buf, PAGE_SIZE, "%d\    5957         return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
6992 }                                                5958 }
6993                                                  5959 
6994 static ssize_t max_active_store(struct device    5960 static ssize_t max_active_store(struct device *dev,
6995                                 struct device    5961                                 struct device_attribute *attr, const char *buf,
6996                                 size_t count)    5962                                 size_t count)
6997 {                                                5963 {
6998         struct workqueue_struct *wq = dev_to_    5964         struct workqueue_struct *wq = dev_to_wq(dev);
6999         int val;                                 5965         int val;
7000                                                  5966 
7001         if (sscanf(buf, "%d", &val) != 1 || v    5967         if (sscanf(buf, "%d", &val) != 1 || val <= 0)
7002                 return -EINVAL;                  5968                 return -EINVAL;
7003                                                  5969 
7004         workqueue_set_max_active(wq, val);       5970         workqueue_set_max_active(wq, val);
7005         return count;                            5971         return count;
7006 }                                                5972 }
7007 static DEVICE_ATTR_RW(max_active);               5973 static DEVICE_ATTR_RW(max_active);
7008                                                  5974 
7009 static struct attribute *wq_sysfs_attrs[] = {    5975 static struct attribute *wq_sysfs_attrs[] = {
7010         &dev_attr_per_cpu.attr,                  5976         &dev_attr_per_cpu.attr,
7011         &dev_attr_max_active.attr,               5977         &dev_attr_max_active.attr,
7012         NULL,                                    5978         NULL,
7013 };                                               5979 };
7014 ATTRIBUTE_GROUPS(wq_sysfs);                      5980 ATTRIBUTE_GROUPS(wq_sysfs);
7015                                                  5981 
7016 static ssize_t wq_nice_show(struct device *de    5982 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
7017                             char *buf)           5983                             char *buf)
7018 {                                                5984 {
7019         struct workqueue_struct *wq = dev_to_    5985         struct workqueue_struct *wq = dev_to_wq(dev);
7020         int written;                             5986         int written;
7021                                                  5987 
7022         mutex_lock(&wq->mutex);                  5988         mutex_lock(&wq->mutex);
7023         written = scnprintf(buf, PAGE_SIZE, "    5989         written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
7024         mutex_unlock(&wq->mutex);                5990         mutex_unlock(&wq->mutex);
7025                                                  5991 
7026         return written;                          5992         return written;
7027 }                                                5993 }
7028                                                  5994 
7029 /* prepare workqueue_attrs for sysfs store op    5995 /* prepare workqueue_attrs for sysfs store operations */
7030 static struct workqueue_attrs *wq_sysfs_prep_    5996 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
7031 {                                                5997 {
7032         struct workqueue_attrs *attrs;           5998         struct workqueue_attrs *attrs;
7033                                                  5999 
7034         lockdep_assert_held(&wq_pool_mutex);     6000         lockdep_assert_held(&wq_pool_mutex);
7035                                                  6001 
7036         attrs = alloc_workqueue_attrs();         6002         attrs = alloc_workqueue_attrs();
7037         if (!attrs)                              6003         if (!attrs)
7038                 return NULL;                     6004                 return NULL;
7039                                                  6005 
7040         copy_workqueue_attrs(attrs, wq->unbou    6006         copy_workqueue_attrs(attrs, wq->unbound_attrs);
7041         return attrs;                            6007         return attrs;
7042 }                                                6008 }
7043                                                  6009 
7044 static ssize_t wq_nice_store(struct device *d    6010 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
7045                              const char *buf,    6011                              const char *buf, size_t count)
7046 {                                                6012 {
7047         struct workqueue_struct *wq = dev_to_    6013         struct workqueue_struct *wq = dev_to_wq(dev);
7048         struct workqueue_attrs *attrs;           6014         struct workqueue_attrs *attrs;
7049         int ret = -ENOMEM;                       6015         int ret = -ENOMEM;
7050                                                  6016 
7051         apply_wqattrs_lock();                    6017         apply_wqattrs_lock();
7052                                                  6018 
7053         attrs = wq_sysfs_prep_attrs(wq);         6019         attrs = wq_sysfs_prep_attrs(wq);
7054         if (!attrs)                              6020         if (!attrs)
7055                 goto out_unlock;                 6021                 goto out_unlock;
7056                                                  6022 
7057         if (sscanf(buf, "%d", &attrs->nice) =    6023         if (sscanf(buf, "%d", &attrs->nice) == 1 &&
7058             attrs->nice >= MIN_NICE && attrs-    6024             attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
7059                 ret = apply_workqueue_attrs_l    6025                 ret = apply_workqueue_attrs_locked(wq, attrs);
7060         else                                     6026         else
7061                 ret = -EINVAL;                   6027                 ret = -EINVAL;
7062                                                  6028 
7063 out_unlock:                                      6029 out_unlock:
7064         apply_wqattrs_unlock();                  6030         apply_wqattrs_unlock();
7065         free_workqueue_attrs(attrs);             6031         free_workqueue_attrs(attrs);
7066         return ret ?: count;                     6032         return ret ?: count;
7067 }                                                6033 }
7068                                                  6034 
7069 static ssize_t wq_cpumask_show(struct device     6035 static ssize_t wq_cpumask_show(struct device *dev,
7070                                struct device_    6036                                struct device_attribute *attr, char *buf)
7071 {                                                6037 {
7072         struct workqueue_struct *wq = dev_to_    6038         struct workqueue_struct *wq = dev_to_wq(dev);
7073         int written;                             6039         int written;
7074                                                  6040 
7075         mutex_lock(&wq->mutex);                  6041         mutex_lock(&wq->mutex);
7076         written = scnprintf(buf, PAGE_SIZE, "    6042         written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
7077                             cpumask_pr_args(w    6043                             cpumask_pr_args(wq->unbound_attrs->cpumask));
7078         mutex_unlock(&wq->mutex);                6044         mutex_unlock(&wq->mutex);
7079         return written;                          6045         return written;
7080 }                                                6046 }
7081                                                  6047 
7082 static ssize_t wq_cpumask_store(struct device    6048 static ssize_t wq_cpumask_store(struct device *dev,
7083                                 struct device    6049                                 struct device_attribute *attr,
7084                                 const char *b    6050                                 const char *buf, size_t count)
7085 {                                                6051 {
7086         struct workqueue_struct *wq = dev_to_    6052         struct workqueue_struct *wq = dev_to_wq(dev);
7087         struct workqueue_attrs *attrs;           6053         struct workqueue_attrs *attrs;
7088         int ret = -ENOMEM;                       6054         int ret = -ENOMEM;
7089                                                  6055 
7090         apply_wqattrs_lock();                    6056         apply_wqattrs_lock();
7091                                                  6057 
7092         attrs = wq_sysfs_prep_attrs(wq);         6058         attrs = wq_sysfs_prep_attrs(wq);
7093         if (!attrs)                              6059         if (!attrs)
7094                 goto out_unlock;                 6060                 goto out_unlock;
7095                                                  6061 
7096         ret = cpumask_parse(buf, attrs->cpuma    6062         ret = cpumask_parse(buf, attrs->cpumask);
7097         if (!ret)                                6063         if (!ret)
7098                 ret = apply_workqueue_attrs_l    6064                 ret = apply_workqueue_attrs_locked(wq, attrs);
7099                                                  6065 
7100 out_unlock:                                      6066 out_unlock:
7101         apply_wqattrs_unlock();                  6067         apply_wqattrs_unlock();
7102         free_workqueue_attrs(attrs);             6068         free_workqueue_attrs(attrs);
7103         return ret ?: count;                     6069         return ret ?: count;
7104 }                                                6070 }
7105                                                  6071 
7106 static ssize_t wq_affn_scope_show(struct devi    6072 static ssize_t wq_affn_scope_show(struct device *dev,
7107                                   struct devi    6073                                   struct device_attribute *attr, char *buf)
7108 {                                                6074 {
7109         struct workqueue_struct *wq = dev_to_    6075         struct workqueue_struct *wq = dev_to_wq(dev);
7110         int written;                             6076         int written;
7111                                                  6077 
7112         mutex_lock(&wq->mutex);                  6078         mutex_lock(&wq->mutex);
7113         if (wq->unbound_attrs->affn_scope ==     6079         if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL)
7114                 written = scnprintf(buf, PAGE    6080                 written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n",
7115                                     wq_affn_n    6081                                     wq_affn_names[WQ_AFFN_DFL],
7116                                     wq_affn_n    6082                                     wq_affn_names[wq_affn_dfl]);
7117         else                                     6083         else
7118                 written = scnprintf(buf, PAGE    6084                 written = scnprintf(buf, PAGE_SIZE, "%s\n",
7119                                     wq_affn_n    6085                                     wq_affn_names[wq->unbound_attrs->affn_scope]);
7120         mutex_unlock(&wq->mutex);                6086         mutex_unlock(&wq->mutex);
7121                                                  6087 
7122         return written;                          6088         return written;
7123 }                                                6089 }
7124                                                  6090 
7125 static ssize_t wq_affn_scope_store(struct dev    6091 static ssize_t wq_affn_scope_store(struct device *dev,
7126                                    struct dev    6092                                    struct device_attribute *attr,
7127                                    const char    6093                                    const char *buf, size_t count)
7128 {                                                6094 {
7129         struct workqueue_struct *wq = dev_to_    6095         struct workqueue_struct *wq = dev_to_wq(dev);
7130         struct workqueue_attrs *attrs;           6096         struct workqueue_attrs *attrs;
7131         int affn, ret = -ENOMEM;                 6097         int affn, ret = -ENOMEM;
7132                                                  6098 
7133         affn = parse_affn_scope(buf);            6099         affn = parse_affn_scope(buf);
7134         if (affn < 0)                            6100         if (affn < 0)
7135                 return affn;                     6101                 return affn;
7136                                                  6102 
7137         apply_wqattrs_lock();                    6103         apply_wqattrs_lock();
7138         attrs = wq_sysfs_prep_attrs(wq);         6104         attrs = wq_sysfs_prep_attrs(wq);
7139         if (attrs) {                             6105         if (attrs) {
7140                 attrs->affn_scope = affn;        6106                 attrs->affn_scope = affn;
7141                 ret = apply_workqueue_attrs_l    6107                 ret = apply_workqueue_attrs_locked(wq, attrs);
7142         }                                        6108         }
7143         apply_wqattrs_unlock();                  6109         apply_wqattrs_unlock();
7144         free_workqueue_attrs(attrs);             6110         free_workqueue_attrs(attrs);
7145         return ret ?: count;                     6111         return ret ?: count;
7146 }                                                6112 }
7147                                                  6113 
7148 static ssize_t wq_affinity_strict_show(struct    6114 static ssize_t wq_affinity_strict_show(struct device *dev,
7149                                        struct    6115                                        struct device_attribute *attr, char *buf)
7150 {                                                6116 {
7151         struct workqueue_struct *wq = dev_to_    6117         struct workqueue_struct *wq = dev_to_wq(dev);
7152                                                  6118 
7153         return scnprintf(buf, PAGE_SIZE, "%d\    6119         return scnprintf(buf, PAGE_SIZE, "%d\n",
7154                          wq->unbound_attrs->a    6120                          wq->unbound_attrs->affn_strict);
7155 }                                                6121 }
7156                                                  6122 
7157 static ssize_t wq_affinity_strict_store(struc    6123 static ssize_t wq_affinity_strict_store(struct device *dev,
7158                                         struc    6124                                         struct device_attribute *attr,
7159                                         const    6125                                         const char *buf, size_t count)
7160 {                                                6126 {
7161         struct workqueue_struct *wq = dev_to_    6127         struct workqueue_struct *wq = dev_to_wq(dev);
7162         struct workqueue_attrs *attrs;           6128         struct workqueue_attrs *attrs;
7163         int v, ret = -ENOMEM;                    6129         int v, ret = -ENOMEM;
7164                                                  6130 
7165         if (sscanf(buf, "%d", &v) != 1)          6131         if (sscanf(buf, "%d", &v) != 1)
7166                 return -EINVAL;                  6132                 return -EINVAL;
7167                                                  6133 
7168         apply_wqattrs_lock();                    6134         apply_wqattrs_lock();
7169         attrs = wq_sysfs_prep_attrs(wq);         6135         attrs = wq_sysfs_prep_attrs(wq);
7170         if (attrs) {                             6136         if (attrs) {
7171                 attrs->affn_strict = (bool)v;    6137                 attrs->affn_strict = (bool)v;
7172                 ret = apply_workqueue_attrs_l    6138                 ret = apply_workqueue_attrs_locked(wq, attrs);
7173         }                                        6139         }
7174         apply_wqattrs_unlock();                  6140         apply_wqattrs_unlock();
7175         free_workqueue_attrs(attrs);             6141         free_workqueue_attrs(attrs);
7176         return ret ?: count;                     6142         return ret ?: count;
7177 }                                                6143 }
7178                                                  6144 
7179 static struct device_attribute wq_sysfs_unbou    6145 static struct device_attribute wq_sysfs_unbound_attrs[] = {
7180         __ATTR(nice, 0644, wq_nice_show, wq_n    6146         __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
7181         __ATTR(cpumask, 0644, wq_cpumask_show    6147         __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
7182         __ATTR(affinity_scope, 0644, wq_affn_    6148         __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store),
7183         __ATTR(affinity_strict, 0644, wq_affi    6149         __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store),
7184         __ATTR_NULL,                             6150         __ATTR_NULL,
7185 };                                               6151 };
7186                                                  6152 
7187 static const struct bus_type wq_subsys = {    !! 6153 static struct bus_type wq_subsys = {
7188         .name                           = "wo    6154         .name                           = "workqueue",
7189         .dev_groups                     = wq_    6155         .dev_groups                     = wq_sysfs_groups,
7190 };                                               6156 };
7191                                                  6157 
7192 /**                                           !! 6158 static ssize_t wq_unbound_cpumask_show(struct device *dev,
7193  *  workqueue_set_unbound_cpumask - Set the l !! 6159                 struct device_attribute *attr, char *buf)
7194  *  @cpumask: the cpumask to set              << 
7195  *                                            << 
7196  *  The low-level workqueues cpumask is a glo << 
7197  *  the affinity of all unbound workqueues.   << 
7198  *  and apply it to all unbound workqueues an << 
7199  *                                            << 
7200  *  Return:     0       - Success             << 
7201  *              -EINVAL - Invalid @cpumask    << 
7202  *              -ENOMEM - Failed to allocate  << 
7203  */                                           << 
7204 static int workqueue_set_unbound_cpumask(cpum << 
7205 {                                             << 
7206         int ret = -EINVAL;                    << 
7207                                               << 
7208         /*                                    << 
7209          * Not excluding isolated cpus on pur << 
7210          * If the user wishes to include them << 
7211          */                                   << 
7212         cpumask_and(cpumask, cpumask, cpu_pos << 
7213         if (!cpumask_empty(cpumask)) {        << 
7214                 ret = 0;                      << 
7215                 apply_wqattrs_lock();         << 
7216                 if (!cpumask_equal(cpumask, w << 
7217                         ret = workqueue_apply << 
7218                 if (!ret)                     << 
7219                         cpumask_copy(wq_reque << 
7220                 apply_wqattrs_unlock();       << 
7221         }                                     << 
7222                                               << 
7223         return ret;                           << 
7224 }                                             << 
7225                                               << 
7226 static ssize_t __wq_cpumask_show(struct devic << 
7227                 struct device_attribute *attr << 
7228 {                                                6160 {
7229         int written;                             6161         int written;
7230                                                  6162 
7231         mutex_lock(&wq_pool_mutex);              6163         mutex_lock(&wq_pool_mutex);
7232         written = scnprintf(buf, PAGE_SIZE, " !! 6164         written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
                                                   >> 6165                             cpumask_pr_args(wq_unbound_cpumask));
7233         mutex_unlock(&wq_pool_mutex);            6166         mutex_unlock(&wq_pool_mutex);
7234                                                  6167 
7235         return written;                          6168         return written;
7236 }                                                6169 }
7237                                                  6170 
7238 static ssize_t cpumask_requested_show(struct  !! 6171 static ssize_t wq_unbound_cpumask_store(struct device *dev,
7239                 struct device_attribute *attr << 
7240 {                                             << 
7241         return __wq_cpumask_show(dev, attr, b << 
7242 }                                             << 
7243 static DEVICE_ATTR_RO(cpumask_requested);     << 
7244                                               << 
7245 static ssize_t cpumask_isolated_show(struct d << 
7246                 struct device_attribute *attr << 
7247 {                                             << 
7248         return __wq_cpumask_show(dev, attr, b << 
7249 }                                             << 
7250 static DEVICE_ATTR_RO(cpumask_isolated);      << 
7251                                               << 
7252 static ssize_t cpumask_show(struct device *de << 
7253                 struct device_attribute *attr << 
7254 {                                             << 
7255         return __wq_cpumask_show(dev, attr, b << 
7256 }                                             << 
7257                                               << 
7258 static ssize_t cpumask_store(struct device *d << 
7259                 struct device_attribute *attr    6172                 struct device_attribute *attr, const char *buf, size_t count)
7260 {                                                6173 {
7261         cpumask_var_t cpumask;                   6174         cpumask_var_t cpumask;
7262         int ret;                                 6175         int ret;
7263                                                  6176 
7264         if (!zalloc_cpumask_var(&cpumask, GFP    6177         if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
7265                 return -ENOMEM;                  6178                 return -ENOMEM;
7266                                                  6179 
7267         ret = cpumask_parse(buf, cpumask);       6180         ret = cpumask_parse(buf, cpumask);
7268         if (!ret)                                6181         if (!ret)
7269                 ret = workqueue_set_unbound_c    6182                 ret = workqueue_set_unbound_cpumask(cpumask);
7270                                                  6183 
7271         free_cpumask_var(cpumask);               6184         free_cpumask_var(cpumask);
7272         return ret ? ret : count;                6185         return ret ? ret : count;
7273 }                                                6186 }
7274 static DEVICE_ATTR_RW(cpumask);               << 
7275                                                  6187 
7276 static struct attribute *wq_sysfs_cpumask_att !! 6188 static struct device_attribute wq_sysfs_cpumask_attr =
7277         &dev_attr_cpumask.attr,               !! 6189         __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
7278         &dev_attr_cpumask_requested.attr,     !! 6190                wq_unbound_cpumask_store);
7279         &dev_attr_cpumask_isolated.attr,      << 
7280         NULL,                                 << 
7281 };                                            << 
7282 ATTRIBUTE_GROUPS(wq_sysfs_cpumask);           << 
7283                                                  6191 
7284 static int __init wq_sysfs_init(void)            6192 static int __init wq_sysfs_init(void)
7285 {                                                6193 {
7286         return subsys_virtual_register(&wq_su !! 6194         struct device *dev_root;
                                                   >> 6195         int err;
                                                   >> 6196 
                                                   >> 6197         err = subsys_virtual_register(&wq_subsys, NULL);
                                                   >> 6198         if (err)
                                                   >> 6199                 return err;
                                                   >> 6200 
                                                   >> 6201         dev_root = bus_get_dev_root(&wq_subsys);
                                                   >> 6202         if (dev_root) {
                                                   >> 6203                 err = device_create_file(dev_root, &wq_sysfs_cpumask_attr);
                                                   >> 6204                 put_device(dev_root);
                                                   >> 6205         }
                                                   >> 6206         return err;
7287 }                                                6207 }
7288 core_initcall(wq_sysfs_init);                    6208 core_initcall(wq_sysfs_init);
7289                                                  6209 
7290 static void wq_device_release(struct device *    6210 static void wq_device_release(struct device *dev)
7291 {                                                6211 {
7292         struct wq_device *wq_dev = container_    6212         struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
7293                                                  6213 
7294         kfree(wq_dev);                           6214         kfree(wq_dev);
7295 }                                                6215 }
7296                                                  6216 
7297 /**                                              6217 /**
7298  * workqueue_sysfs_register - make a workqueu    6218  * workqueue_sysfs_register - make a workqueue visible in sysfs
7299  * @wq: the workqueue to register                6219  * @wq: the workqueue to register
7300  *                                               6220  *
7301  * Expose @wq in sysfs under /sys/bus/workque    6221  * Expose @wq in sysfs under /sys/bus/workqueue/devices.
7302  * alloc_workqueue*() automatically calls thi    6222  * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
7303  * which is the preferred method.                6223  * which is the preferred method.
7304  *                                               6224  *
7305  * Workqueue user should use this function di    6225  * Workqueue user should use this function directly iff it wants to apply
7306  * workqueue_attrs before making the workqueu    6226  * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
7307  * apply_workqueue_attrs() may race against u    6227  * apply_workqueue_attrs() may race against userland updating the
7308  * attributes.                                   6228  * attributes.
7309  *                                               6229  *
7310  * Return: 0 on success, -errno on failure.      6230  * Return: 0 on success, -errno on failure.
7311  */                                              6231  */
7312 int workqueue_sysfs_register(struct workqueue    6232 int workqueue_sysfs_register(struct workqueue_struct *wq)
7313 {                                                6233 {
7314         struct wq_device *wq_dev;                6234         struct wq_device *wq_dev;
7315         int ret;                                 6235         int ret;
7316                                                  6236 
7317         /*                                       6237         /*
7318          * Adjusting max_active breaks orderi !! 6238          * Adjusting max_active or creating new pwqs by applying
7319          * ordered workqueues.                !! 6239          * attributes breaks ordering guarantee.  Disallow exposing ordered
                                                   >> 6240          * workqueues.
7320          */                                      6241          */
7321         if (WARN_ON(wq->flags & __WQ_ORDERED) !! 6242         if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
7322                 return -EINVAL;                  6243                 return -EINVAL;
7323                                                  6244 
7324         wq->wq_dev = wq_dev = kzalloc(sizeof(    6245         wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
7325         if (!wq_dev)                             6246         if (!wq_dev)
7326                 return -ENOMEM;                  6247                 return -ENOMEM;
7327                                                  6248 
7328         wq_dev->wq = wq;                         6249         wq_dev->wq = wq;
7329         wq_dev->dev.bus = &wq_subsys;            6250         wq_dev->dev.bus = &wq_subsys;
7330         wq_dev->dev.release = wq_device_relea    6251         wq_dev->dev.release = wq_device_release;
7331         dev_set_name(&wq_dev->dev, "%s", wq->    6252         dev_set_name(&wq_dev->dev, "%s", wq->name);
7332                                                  6253 
7333         /*                                       6254         /*
7334          * unbound_attrs are created separate    6255          * unbound_attrs are created separately.  Suppress uevent until
7335          * everything is ready.                  6256          * everything is ready.
7336          */                                      6257          */
7337         dev_set_uevent_suppress(&wq_dev->dev,    6258         dev_set_uevent_suppress(&wq_dev->dev, true);
7338                                                  6259 
7339         ret = device_register(&wq_dev->dev);     6260         ret = device_register(&wq_dev->dev);
7340         if (ret) {                               6261         if (ret) {
7341                 put_device(&wq_dev->dev);        6262                 put_device(&wq_dev->dev);
7342                 wq->wq_dev = NULL;               6263                 wq->wq_dev = NULL;
7343                 return ret;                      6264                 return ret;
7344         }                                        6265         }
7345                                                  6266 
7346         if (wq->flags & WQ_UNBOUND) {            6267         if (wq->flags & WQ_UNBOUND) {
7347                 struct device_attribute *attr    6268                 struct device_attribute *attr;
7348                                                  6269 
7349                 for (attr = wq_sysfs_unbound_    6270                 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
7350                         ret = device_create_f    6271                         ret = device_create_file(&wq_dev->dev, attr);
7351                         if (ret) {               6272                         if (ret) {
7352                                 device_unregi    6273                                 device_unregister(&wq_dev->dev);
7353                                 wq->wq_dev =     6274                                 wq->wq_dev = NULL;
7354                                 return ret;      6275                                 return ret;
7355                         }                        6276                         }
7356                 }                                6277                 }
7357         }                                        6278         }
7358                                                  6279 
7359         dev_set_uevent_suppress(&wq_dev->dev,    6280         dev_set_uevent_suppress(&wq_dev->dev, false);
7360         kobject_uevent(&wq_dev->dev.kobj, KOB    6281         kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
7361         return 0;                                6282         return 0;
7362 }                                                6283 }
7363                                                  6284 
7364 /**                                              6285 /**
7365  * workqueue_sysfs_unregister - undo workqueu    6286  * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
7366  * @wq: the workqueue to unregister              6287  * @wq: the workqueue to unregister
7367  *                                               6288  *
7368  * If @wq is registered to sysfs by workqueue    6289  * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
7369  */                                              6290  */
7370 static void workqueue_sysfs_unregister(struct    6291 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
7371 {                                                6292 {
7372         struct wq_device *wq_dev = wq->wq_dev    6293         struct wq_device *wq_dev = wq->wq_dev;
7373                                                  6294 
7374         if (!wq->wq_dev)                         6295         if (!wq->wq_dev)
7375                 return;                          6296                 return;
7376                                                  6297 
7377         wq->wq_dev = NULL;                       6298         wq->wq_dev = NULL;
7378         device_unregister(&wq_dev->dev);         6299         device_unregister(&wq_dev->dev);
7379 }                                                6300 }
7380 #else   /* CONFIG_SYSFS */                       6301 #else   /* CONFIG_SYSFS */
7381 static void workqueue_sysfs_unregister(struct    6302 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)     { }
7382 #endif  /* CONFIG_SYSFS */                       6303 #endif  /* CONFIG_SYSFS */
7383                                                  6304 
7384 /*                                               6305 /*
7385  * Workqueue watchdog.                           6306  * Workqueue watchdog.
7386  *                                               6307  *
7387  * Stall may be caused by various bugs - miss    6308  * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
7388  * flush dependency, a concurrency managed wo    6309  * flush dependency, a concurrency managed work item which stays RUNNING
7389  * indefinitely.  Workqueue stalls can be ver    6310  * indefinitely.  Workqueue stalls can be very difficult to debug as the
7390  * usual warning mechanisms don't trigger and    6311  * usual warning mechanisms don't trigger and internal workqueue state is
7391  * largely opaque.                               6312  * largely opaque.
7392  *                                               6313  *
7393  * Workqueue watchdog monitors all worker poo    6314  * Workqueue watchdog monitors all worker pools periodically and dumps
7394  * state if some pools failed to make forward    6315  * state if some pools failed to make forward progress for a while where
7395  * forward progress is defined as the first i    6316  * forward progress is defined as the first item on ->worklist changing.
7396  *                                               6317  *
7397  * This mechanism is controlled through the k    6318  * This mechanism is controlled through the kernel parameter
7398  * "workqueue.watchdog_thresh" which can be u    6319  * "workqueue.watchdog_thresh" which can be updated at runtime through the
7399  * corresponding sysfs parameter file.           6320  * corresponding sysfs parameter file.
7400  */                                              6321  */
7401 #ifdef CONFIG_WQ_WATCHDOG                        6322 #ifdef CONFIG_WQ_WATCHDOG
7402                                                  6323 
7403 static unsigned long wq_watchdog_thresh = 30;    6324 static unsigned long wq_watchdog_thresh = 30;
7404 static struct timer_list wq_watchdog_timer;      6325 static struct timer_list wq_watchdog_timer;
7405                                                  6326 
7406 static unsigned long wq_watchdog_touched = IN    6327 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
7407 static DEFINE_PER_CPU(unsigned long, wq_watch    6328 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
7408                                                  6329 
7409 /*                                               6330 /*
7410  * Show workers that might prevent the proces    6331  * Show workers that might prevent the processing of pending work items.
7411  * The only candidates are CPU-bound workers     6332  * The only candidates are CPU-bound workers in the running state.
7412  * Pending work items should be handled by an    6333  * Pending work items should be handled by another idle worker
7413  * in all other situations.                      6334  * in all other situations.
7414  */                                              6335  */
7415 static void show_cpu_pool_hog(struct worker_p    6336 static void show_cpu_pool_hog(struct worker_pool *pool)
7416 {                                                6337 {
7417         struct worker *worker;                   6338         struct worker *worker;
7418         unsigned long irq_flags;              !! 6339         unsigned long flags;
7419         int bkt;                                 6340         int bkt;
7420                                                  6341 
7421         raw_spin_lock_irqsave(&pool->lock, ir !! 6342         raw_spin_lock_irqsave(&pool->lock, flags);
7422                                                  6343 
7423         hash_for_each(pool->busy_hash, bkt, w    6344         hash_for_each(pool->busy_hash, bkt, worker, hentry) {
7424                 if (task_is_running(worker->t    6345                 if (task_is_running(worker->task)) {
7425                         /*                       6346                         /*
7426                          * Defer printing to     6347                          * Defer printing to avoid deadlocks in console
7427                          * drivers that queue    6348                          * drivers that queue work while holding locks
7428                          * also taken in thei    6349                          * also taken in their write paths.
7429                          */                      6350                          */
7430                         printk_deferred_enter    6351                         printk_deferred_enter();
7431                                                  6352 
7432                         pr_info("pool %d:\n",    6353                         pr_info("pool %d:\n", pool->id);
7433                         sched_show_task(worke    6354                         sched_show_task(worker->task);
7434                                                  6355 
7435                         printk_deferred_exit(    6356                         printk_deferred_exit();
7436                 }                                6357                 }
7437         }                                        6358         }
7438                                                  6359 
7439         raw_spin_unlock_irqrestore(&pool->loc !! 6360         raw_spin_unlock_irqrestore(&pool->lock, flags);
7440 }                                                6361 }
7441                                                  6362 
7442 static void show_cpu_pools_hogs(void)            6363 static void show_cpu_pools_hogs(void)
7443 {                                                6364 {
7444         struct worker_pool *pool;                6365         struct worker_pool *pool;
7445         int pi;                                  6366         int pi;
7446                                                  6367 
7447         pr_info("Showing backtraces of runnin    6368         pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n");
7448                                                  6369 
7449         rcu_read_lock();                         6370         rcu_read_lock();
7450                                                  6371 
7451         for_each_pool(pool, pi) {                6372         for_each_pool(pool, pi) {
7452                 if (pool->cpu_stall)             6373                 if (pool->cpu_stall)
7453                         show_cpu_pool_hog(poo    6374                         show_cpu_pool_hog(pool);
7454                                                  6375 
7455         }                                        6376         }
7456                                                  6377 
7457         rcu_read_unlock();                       6378         rcu_read_unlock();
7458 }                                                6379 }
7459                                                  6380 
7460 static void wq_watchdog_reset_touched(void)      6381 static void wq_watchdog_reset_touched(void)
7461 {                                                6382 {
7462         int cpu;                                 6383         int cpu;
7463                                                  6384 
7464         wq_watchdog_touched = jiffies;           6385         wq_watchdog_touched = jiffies;
7465         for_each_possible_cpu(cpu)               6386         for_each_possible_cpu(cpu)
7466                 per_cpu(wq_watchdog_touched_c    6387                 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
7467 }                                                6388 }
7468                                                  6389 
7469 static void wq_watchdog_timer_fn(struct timer    6390 static void wq_watchdog_timer_fn(struct timer_list *unused)
7470 {                                                6391 {
7471         unsigned long thresh = READ_ONCE(wq_w    6392         unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
7472         bool lockup_detected = false;            6393         bool lockup_detected = false;
7473         bool cpu_pool_stall = false;             6394         bool cpu_pool_stall = false;
7474         unsigned long now = jiffies;             6395         unsigned long now = jiffies;
7475         struct worker_pool *pool;                6396         struct worker_pool *pool;
7476         int pi;                                  6397         int pi;
7477                                                  6398 
7478         if (!thresh)                             6399         if (!thresh)
7479                 return;                          6400                 return;
7480                                                  6401 
7481         rcu_read_lock();                         6402         rcu_read_lock();
7482                                                  6403 
7483         for_each_pool(pool, pi) {                6404         for_each_pool(pool, pi) {
7484                 unsigned long pool_ts, touche    6405                 unsigned long pool_ts, touched, ts;
7485                                                  6406 
7486                 pool->cpu_stall = false;         6407                 pool->cpu_stall = false;
7487                 if (list_empty(&pool->worklis    6408                 if (list_empty(&pool->worklist))
7488                         continue;                6409                         continue;
7489                                                  6410 
7490                 /*                               6411                 /*
7491                  * If a virtual machine is st    6412                  * If a virtual machine is stopped by the host it can look to
7492                  * the watchdog like a stall.    6413                  * the watchdog like a stall.
7493                  */                              6414                  */
7494                 kvm_check_and_clear_guest_pau    6415                 kvm_check_and_clear_guest_paused();
7495                                                  6416 
7496                 /* get the latest of pool and    6417                 /* get the latest of pool and touched timestamps */
7497                 if (pool->cpu >= 0)              6418                 if (pool->cpu >= 0)
7498                         touched = READ_ONCE(p    6419                         touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
7499                 else                             6420                 else
7500                         touched = READ_ONCE(w    6421                         touched = READ_ONCE(wq_watchdog_touched);
7501                 pool_ts = READ_ONCE(pool->wat    6422                 pool_ts = READ_ONCE(pool->watchdog_ts);
7502                                                  6423 
7503                 if (time_after(pool_ts, touch    6424                 if (time_after(pool_ts, touched))
7504                         ts = pool_ts;            6425                         ts = pool_ts;
7505                 else                             6426                 else
7506                         ts = touched;            6427                         ts = touched;
7507                                                  6428 
7508                 /* did we stall? */              6429                 /* did we stall? */
7509                 if (time_after(now, ts + thre    6430                 if (time_after(now, ts + thresh)) {
7510                         lockup_detected = tru    6431                         lockup_detected = true;
7511                         if (pool->cpu >= 0 && !! 6432                         if (pool->cpu >= 0) {
7512                                 pool->cpu_sta    6433                                 pool->cpu_stall = true;
7513                                 cpu_pool_stal    6434                                 cpu_pool_stall = true;
7514                         }                        6435                         }
7515                         pr_emerg("BUG: workqu    6436                         pr_emerg("BUG: workqueue lockup - pool");
7516                         pr_cont_pool_info(poo    6437                         pr_cont_pool_info(pool);
7517                         pr_cont(" stuck for %    6438                         pr_cont(" stuck for %us!\n",
7518                                 jiffies_to_ms    6439                                 jiffies_to_msecs(now - pool_ts) / 1000);
7519                 }                                6440                 }
7520                                                  6441 
7521                                                  6442 
7522         }                                        6443         }
7523                                                  6444 
7524         rcu_read_unlock();                       6445         rcu_read_unlock();
7525                                                  6446 
7526         if (lockup_detected)                     6447         if (lockup_detected)
7527                 show_all_workqueues();           6448                 show_all_workqueues();
7528                                                  6449 
7529         if (cpu_pool_stall)                      6450         if (cpu_pool_stall)
7530                 show_cpu_pools_hogs();           6451                 show_cpu_pools_hogs();
7531                                                  6452 
7532         wq_watchdog_reset_touched();             6453         wq_watchdog_reset_touched();
7533         mod_timer(&wq_watchdog_timer, jiffies    6454         mod_timer(&wq_watchdog_timer, jiffies + thresh);
7534 }                                                6455 }
7535                                                  6456 
7536 notrace void wq_watchdog_touch(int cpu)          6457 notrace void wq_watchdog_touch(int cpu)
7537 {                                                6458 {
7538         unsigned long thresh = READ_ONCE(wq_w    6459         unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
7539         unsigned long touch_ts = READ_ONCE(wq    6460         unsigned long touch_ts = READ_ONCE(wq_watchdog_touched);
7540         unsigned long now = jiffies;             6461         unsigned long now = jiffies;
7541                                                  6462 
7542         if (cpu >= 0)                            6463         if (cpu >= 0)
7543                 per_cpu(wq_watchdog_touched_c    6464                 per_cpu(wq_watchdog_touched_cpu, cpu) = now;
7544         else                                     6465         else
7545                 WARN_ONCE(1, "%s should be ca    6466                 WARN_ONCE(1, "%s should be called with valid CPU", __func__);
7546                                                  6467 
7547         /* Don't unnecessarily store to globa    6468         /* Don't unnecessarily store to global cacheline */
7548         if (time_after(now, touch_ts + thresh    6469         if (time_after(now, touch_ts + thresh / 4))
7549                 WRITE_ONCE(wq_watchdog_touche    6470                 WRITE_ONCE(wq_watchdog_touched, jiffies);
7550 }                                                6471 }
7551                                                  6472 
7552 static void wq_watchdog_set_thresh(unsigned l    6473 static void wq_watchdog_set_thresh(unsigned long thresh)
7553 {                                                6474 {
7554         wq_watchdog_thresh = 0;                  6475         wq_watchdog_thresh = 0;
7555         del_timer_sync(&wq_watchdog_timer);      6476         del_timer_sync(&wq_watchdog_timer);
7556                                                  6477 
7557         if (thresh) {                            6478         if (thresh) {
7558                 wq_watchdog_thresh = thresh;     6479                 wq_watchdog_thresh = thresh;
7559                 wq_watchdog_reset_touched();     6480                 wq_watchdog_reset_touched();
7560                 mod_timer(&wq_watchdog_timer,    6481                 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
7561         }                                        6482         }
7562 }                                                6483 }
7563                                                  6484 
7564 static int wq_watchdog_param_set_thresh(const    6485 static int wq_watchdog_param_set_thresh(const char *val,
7565                                         const    6486                                         const struct kernel_param *kp)
7566 {                                                6487 {
7567         unsigned long thresh;                    6488         unsigned long thresh;
7568         int ret;                                 6489         int ret;
7569                                                  6490 
7570         ret = kstrtoul(val, 0, &thresh);         6491         ret = kstrtoul(val, 0, &thresh);
7571         if (ret)                                 6492         if (ret)
7572                 return ret;                      6493                 return ret;
7573                                                  6494 
7574         if (system_wq)                           6495         if (system_wq)
7575                 wq_watchdog_set_thresh(thresh    6496                 wq_watchdog_set_thresh(thresh);
7576         else                                     6497         else
7577                 wq_watchdog_thresh = thresh;     6498                 wq_watchdog_thresh = thresh;
7578                                                  6499 
7579         return 0;                                6500         return 0;
7580 }                                                6501 }
7581                                                  6502 
7582 static const struct kernel_param_ops wq_watch    6503 static const struct kernel_param_ops wq_watchdog_thresh_ops = {
7583         .set    = wq_watchdog_param_set_thres    6504         .set    = wq_watchdog_param_set_thresh,
7584         .get    = param_get_ulong,               6505         .get    = param_get_ulong,
7585 };                                               6506 };
7586                                                  6507 
7587 module_param_cb(watchdog_thresh, &wq_watchdog    6508 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
7588                 0644);                           6509                 0644);
7589                                                  6510 
7590 static void wq_watchdog_init(void)               6511 static void wq_watchdog_init(void)
7591 {                                                6512 {
7592         timer_setup(&wq_watchdog_timer, wq_wa    6513         timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
7593         wq_watchdog_set_thresh(wq_watchdog_th    6514         wq_watchdog_set_thresh(wq_watchdog_thresh);
7594 }                                                6515 }
7595                                                  6516 
7596 #else   /* CONFIG_WQ_WATCHDOG */                 6517 #else   /* CONFIG_WQ_WATCHDOG */
7597                                                  6518 
7598 static inline void wq_watchdog_init(void) { }    6519 static inline void wq_watchdog_init(void) { }
7599                                                  6520 
7600 #endif  /* CONFIG_WQ_WATCHDOG */                 6521 #endif  /* CONFIG_WQ_WATCHDOG */
7601                                                  6522 
7602 static void bh_pool_kick_normal(struct irq_wo << 
7603 {                                             << 
7604         raise_softirq_irqoff(TASKLET_SOFTIRQ) << 
7605 }                                             << 
7606                                               << 
7607 static void bh_pool_kick_highpri(struct irq_w << 
7608 {                                             << 
7609         raise_softirq_irqoff(HI_SOFTIRQ);     << 
7610 }                                             << 
7611                                               << 
7612 static void __init restrict_unbound_cpumask(c    6523 static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask)
7613 {                                                6524 {
7614         if (!cpumask_intersects(wq_unbound_cp    6525         if (!cpumask_intersects(wq_unbound_cpumask, mask)) {
7615                 pr_warn("workqueue: Restricti    6526                 pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n",
7616                         cpumask_pr_args(wq_un    6527                         cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask));
7617                 return;                          6528                 return;
7618         }                                        6529         }
7619                                                  6530 
7620         cpumask_and(wq_unbound_cpumask, wq_un    6531         cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask);
7621 }                                                6532 }
7622                                                  6533 
7623 static void __init init_cpu_worker_pool(struc << 
7624 {                                             << 
7625         BUG_ON(init_worker_pool(pool));       << 
7626         pool->cpu = cpu;                      << 
7627         cpumask_copy(pool->attrs->cpumask, cp << 
7628         cpumask_copy(pool->attrs->__pod_cpuma << 
7629         pool->attrs->nice = nice;             << 
7630         pool->attrs->affn_strict = true;      << 
7631         pool->node = cpu_to_node(cpu);        << 
7632                                               << 
7633         /* alloc pool ID */                   << 
7634         mutex_lock(&wq_pool_mutex);           << 
7635         BUG_ON(worker_pool_assign_id(pool));  << 
7636         mutex_unlock(&wq_pool_mutex);         << 
7637 }                                             << 
7638                                               << 
7639 /**                                              6534 /**
7640  * workqueue_init_early - early init for work    6535  * workqueue_init_early - early init for workqueue subsystem
7641  *                                               6536  *
7642  * This is the first step of three-staged wor    6537  * This is the first step of three-staged workqueue subsystem initialization and
7643  * invoked as soon as the bare basics - memor    6538  * invoked as soon as the bare basics - memory allocation, cpumasks and idr are
7644  * up. It sets up all the data structures and    6539  * up. It sets up all the data structures and system workqueues and allows early
7645  * boot code to create workqueues and queue/c    6540  * boot code to create workqueues and queue/cancel work items. Actual work item
7646  * execution starts only after kthreads can b    6541  * execution starts only after kthreads can be created and scheduled right
7647  * before early initcalls.                       6542  * before early initcalls.
7648  */                                              6543  */
7649 void __init workqueue_init_early(void)           6544 void __init workqueue_init_early(void)
7650 {                                                6545 {
7651         struct wq_pod_type *pt = &wq_pod_type    6546         struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM];
7652         int std_nice[NR_STD_WORKER_POOLS] = {    6547         int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
7653         void (*irq_work_fns[2])(struct irq_wo << 
7654                                               << 
7655         int i, cpu;                              6548         int i, cpu;
7656                                                  6549 
7657         BUILD_BUG_ON(__alignof__(struct pool_    6550         BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
7658                                                  6551 
7659         BUG_ON(!alloc_cpumask_var(&wq_online_ << 
7660         BUG_ON(!alloc_cpumask_var(&wq_unbound    6552         BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
7661         BUG_ON(!alloc_cpumask_var(&wq_request << 
7662         BUG_ON(!zalloc_cpumask_var(&wq_isolat << 
7663                                               << 
7664         cpumask_copy(wq_online_cpumask, cpu_o << 
7665         cpumask_copy(wq_unbound_cpumask, cpu_    6553         cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
7666         restrict_unbound_cpumask("HK_TYPE_WQ"    6554         restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
7667         restrict_unbound_cpumask("HK_TYPE_DOM    6555         restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
7668         if (!cpumask_empty(&wq_cmdline_cpumas    6556         if (!cpumask_empty(&wq_cmdline_cpumask))
7669                 restrict_unbound_cpumask("wor    6557                 restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
7670                                                  6558 
7671         cpumask_copy(wq_requested_unbound_cpu << 
7672                                               << 
7673         pwq_cache = KMEM_CACHE(pool_workqueue    6559         pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
7674                                                  6560 
7675         unbound_wq_update_pwq_attrs_buf = all !! 6561         wq_update_pod_attrs_buf = alloc_workqueue_attrs();
7676         BUG_ON(!unbound_wq_update_pwq_attrs_b !! 6562         BUG_ON(!wq_update_pod_attrs_buf);
7677                                               << 
7678         /*                                    << 
7679          * If nohz_full is enabled, set power << 
7680          * This allows workqueue items to be  << 
7681          */                                   << 
7682         if (housekeeping_enabled(HK_TYPE_TICK << 
7683                 wq_power_efficient = true;    << 
7684                                                  6563 
7685         /* initialize WQ_AFFN_SYSTEM pods */     6564         /* initialize WQ_AFFN_SYSTEM pods */
7686         pt->pod_cpus = kcalloc(1, sizeof(pt->    6565         pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
7687         pt->pod_node = kcalloc(1, sizeof(pt->    6566         pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL);
7688         pt->cpu_pod = kcalloc(nr_cpu_ids, siz    6567         pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
7689         BUG_ON(!pt->pod_cpus || !pt->pod_node    6568         BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod);
7690                                                  6569 
7691         BUG_ON(!zalloc_cpumask_var_node(&pt->    6570         BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE));
7692                                                  6571 
7693         pt->nr_pods = 1;                         6572         pt->nr_pods = 1;
7694         cpumask_copy(pt->pod_cpus[0], cpu_pos    6573         cpumask_copy(pt->pod_cpus[0], cpu_possible_mask);
7695         pt->pod_node[0] = NUMA_NO_NODE;          6574         pt->pod_node[0] = NUMA_NO_NODE;
7696         pt->cpu_pod[0] = 0;                      6575         pt->cpu_pod[0] = 0;
7697                                                  6576 
7698         /* initialize BH and CPU pools */     !! 6577         /* initialize CPU pools */
7699         for_each_possible_cpu(cpu) {             6578         for_each_possible_cpu(cpu) {
7700                 struct worker_pool *pool;        6579                 struct worker_pool *pool;
7701                                                  6580 
7702                 i = 0;                           6581                 i = 0;
7703                 for_each_bh_worker_pool(pool, !! 6582                 for_each_cpu_worker_pool(pool, cpu) {
7704                         init_cpu_worker_pool( !! 6583                         BUG_ON(init_worker_pool(pool));
7705                         pool->flags |= POOL_B !! 6584                         pool->cpu = cpu;
7706                         init_irq_work(bh_pool !! 6585                         cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
7707                         i++;                  !! 6586                         cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu));
7708                 }                             !! 6587                         pool->attrs->nice = std_nice[i++];
                                                   >> 6588                         pool->attrs->affn_strict = true;
                                                   >> 6589                         pool->node = cpu_to_node(cpu);
7709                                                  6590 
7710                 i = 0;                        !! 6591                         /* alloc pool ID */
7711                 for_each_cpu_worker_pool(pool !! 6592                         mutex_lock(&wq_pool_mutex);
7712                         init_cpu_worker_pool( !! 6593                         BUG_ON(worker_pool_assign_id(pool));
                                                   >> 6594                         mutex_unlock(&wq_pool_mutex);
                                                   >> 6595                 }
7713         }                                        6596         }
7714                                                  6597 
7715         /* create default unbound and ordered    6598         /* create default unbound and ordered wq attrs */
7716         for (i = 0; i < NR_STD_WORKER_POOLS;     6599         for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
7717                 struct workqueue_attrs *attrs    6600                 struct workqueue_attrs *attrs;
7718                                                  6601 
7719                 BUG_ON(!(attrs = alloc_workqu    6602                 BUG_ON(!(attrs = alloc_workqueue_attrs()));
7720                 attrs->nice = std_nice[i];       6603                 attrs->nice = std_nice[i];
7721                 unbound_std_wq_attrs[i] = att    6604                 unbound_std_wq_attrs[i] = attrs;
7722                                                  6605 
7723                 /*                               6606                 /*
7724                  * An ordered wq should have     6607                  * An ordered wq should have only one pwq as ordering is
7725                  * guaranteed by max_active w    6608                  * guaranteed by max_active which is enforced by pwqs.
7726                  */                              6609                  */
7727                 BUG_ON(!(attrs = alloc_workqu    6610                 BUG_ON(!(attrs = alloc_workqueue_attrs()));
7728                 attrs->nice = std_nice[i];       6611                 attrs->nice = std_nice[i];
7729                 attrs->ordered = true;           6612                 attrs->ordered = true;
7730                 ordered_wq_attrs[i] = attrs;     6613                 ordered_wq_attrs[i] = attrs;
7731         }                                        6614         }
7732                                                  6615 
7733         system_wq = alloc_workqueue("events",    6616         system_wq = alloc_workqueue("events", 0, 0);
7734         system_highpri_wq = alloc_workqueue("    6617         system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
7735         system_long_wq = alloc_workqueue("eve    6618         system_long_wq = alloc_workqueue("events_long", 0, 0);
7736         system_unbound_wq = alloc_workqueue("    6619         system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
7737                                             W    6620                                             WQ_MAX_ACTIVE);
7738         system_freezable_wq = alloc_workqueue    6621         system_freezable_wq = alloc_workqueue("events_freezable",
7739                                                  6622                                               WQ_FREEZABLE, 0);
7740         system_power_efficient_wq = alloc_wor    6623         system_power_efficient_wq = alloc_workqueue("events_power_efficient",
7741                                                  6624                                               WQ_POWER_EFFICIENT, 0);
7742         system_freezable_power_efficient_wq = !! 6625         system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
7743                                                  6626                                               WQ_FREEZABLE | WQ_POWER_EFFICIENT,
7744                                                  6627                                               0);
7745         system_bh_wq = alloc_workqueue("event << 
7746         system_bh_highpri_wq = alloc_workqueu << 
7747                                               << 
7748         BUG_ON(!system_wq || !system_highpri_    6628         BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
7749                !system_unbound_wq || !system_    6629                !system_unbound_wq || !system_freezable_wq ||
7750                !system_power_efficient_wq ||     6630                !system_power_efficient_wq ||
7751                !system_freezable_power_effici !! 6631                !system_freezable_power_efficient_wq);
7752                !system_bh_wq || !system_bh_hi << 
7753 }                                                6632 }
7754                                                  6633 
7755 static void __init wq_cpu_intensive_thresh_in    6634 static void __init wq_cpu_intensive_thresh_init(void)
7756 {                                                6635 {
7757         unsigned long thresh;                    6636         unsigned long thresh;
7758         unsigned long bogo;                      6637         unsigned long bogo;
7759                                                  6638 
7760         pwq_release_worker = kthread_create_w    6639         pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
7761         BUG_ON(IS_ERR(pwq_release_worker));      6640         BUG_ON(IS_ERR(pwq_release_worker));
7762                                                  6641 
7763         /* if the user set it to a specific v    6642         /* if the user set it to a specific value, keep it */
7764         if (wq_cpu_intensive_thresh_us != ULO    6643         if (wq_cpu_intensive_thresh_us != ULONG_MAX)
7765                 return;                          6644                 return;
7766                                                  6645 
7767         /*                                       6646         /*
7768          * The default of 10ms is derived fro    6647          * The default of 10ms is derived from the fact that most modern (as of
7769          * 2023) processors can do a lot in 1    6648          * 2023) processors can do a lot in 10ms and that it's just below what
7770          * most consider human-perceivable. H    6649          * most consider human-perceivable. However, the kernel also runs on a
7771          * lot slower CPUs including microcon    6650          * lot slower CPUs including microcontrollers where the threshold is way
7772          * too low.                              6651          * too low.
7773          *                                       6652          *
7774          * Let's scale up the threshold upto     6653          * Let's scale up the threshold upto 1 second if BogoMips is below 4000.
7775          * This is by no means accurate but i    6654          * This is by no means accurate but it doesn't have to be. The mechanism
7776          * is still useful even when the thre    6655          * is still useful even when the threshold is fully scaled up. Also, as
7777          * the reports would usually be appli    6656          * the reports would usually be applicable to everyone, some machines
7778          * operating on longer thresholds won    6657          * operating on longer thresholds won't significantly diminish their
7779          * usefulness.                           6658          * usefulness.
7780          */                                      6659          */
7781         thresh = 10 * USEC_PER_MSEC;             6660         thresh = 10 * USEC_PER_MSEC;
7782                                                  6661 
7783         /* see init/calibrate.c for lpj -> Bo    6662         /* see init/calibrate.c for lpj -> BogoMIPS calculation */
7784         bogo = max_t(unsigned long, loops_per    6663         bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1);
7785         if (bogo < 4000)                         6664         if (bogo < 4000)
7786                 thresh = min_t(unsigned long,    6665                 thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC);
7787                                                  6666 
7788         pr_debug("wq_cpu_intensive_thresh: lp    6667         pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n",
7789                  loops_per_jiffy, bogo, thres    6668                  loops_per_jiffy, bogo, thresh);
7790                                                  6669 
7791         wq_cpu_intensive_thresh_us = thresh;     6670         wq_cpu_intensive_thresh_us = thresh;
7792 }                                                6671 }
7793                                                  6672 
7794 /**                                              6673 /**
7795  * workqueue_init - bring workqueue subsystem    6674  * workqueue_init - bring workqueue subsystem fully online
7796  *                                               6675  *
7797  * This is the second step of three-staged wo    6676  * This is the second step of three-staged workqueue subsystem initialization
7798  * and invoked as soon as kthreads can be cre    6677  * and invoked as soon as kthreads can be created and scheduled. Workqueues have
7799  * been created and work items queued on them    6678  * been created and work items queued on them, but there are no kworkers
7800  * executing the work items yet. Populate the    6679  * executing the work items yet. Populate the worker pools with the initial
7801  * workers and enable future kworker creation    6680  * workers and enable future kworker creations.
7802  */                                              6681  */
7803 void __init workqueue_init(void)                 6682 void __init workqueue_init(void)
7804 {                                                6683 {
7805         struct workqueue_struct *wq;             6684         struct workqueue_struct *wq;
7806         struct worker_pool *pool;                6685         struct worker_pool *pool;
7807         int cpu, bkt;                            6686         int cpu, bkt;
7808                                                  6687 
7809         wq_cpu_intensive_thresh_init();          6688         wq_cpu_intensive_thresh_init();
7810                                                  6689 
7811         mutex_lock(&wq_pool_mutex);              6690         mutex_lock(&wq_pool_mutex);
7812                                                  6691 
7813         /*                                       6692         /*
7814          * Per-cpu pools created earlier coul    6693          * Per-cpu pools created earlier could be missing node hint. Fix them
7815          * up. Also, create a rescuer for wor    6694          * up. Also, create a rescuer for workqueues that requested it.
7816          */                                      6695          */
7817         for_each_possible_cpu(cpu) {             6696         for_each_possible_cpu(cpu) {
7818                 for_each_bh_worker_pool(pool, !! 6697                 for_each_cpu_worker_pool(pool, cpu) {
7819                         pool->node = cpu_to_n << 
7820                 for_each_cpu_worker_pool(pool << 
7821                         pool->node = cpu_to_n    6698                         pool->node = cpu_to_node(cpu);
                                                   >> 6699                 }
7822         }                                        6700         }
7823                                                  6701 
7824         list_for_each_entry(wq, &workqueues,     6702         list_for_each_entry(wq, &workqueues, list) {
7825                 WARN(init_rescuer(wq),           6703                 WARN(init_rescuer(wq),
7826                      "workqueue: failed to cr    6704                      "workqueue: failed to create early rescuer for %s",
7827                      wq->name);                  6705                      wq->name);
7828         }                                        6706         }
7829                                                  6707 
7830         mutex_unlock(&wq_pool_mutex);            6708         mutex_unlock(&wq_pool_mutex);
7831                                                  6709 
7832         /*                                    !! 6710         /* create the initial workers */
7833          * Create the initial workers. A BH p << 
7834          * represents the shared BH execution << 
7835          * affected by hotplug events. Create << 
7836          * possible CPUs here.                << 
7837          */                                   << 
7838         for_each_possible_cpu(cpu)            << 
7839                 for_each_bh_worker_pool(pool, << 
7840                         BUG_ON(!create_worker << 
7841                                               << 
7842         for_each_online_cpu(cpu) {               6711         for_each_online_cpu(cpu) {
7843                 for_each_cpu_worker_pool(pool    6712                 for_each_cpu_worker_pool(pool, cpu) {
7844                         pool->flags &= ~POOL_    6713                         pool->flags &= ~POOL_DISASSOCIATED;
7845                         BUG_ON(!create_worker    6714                         BUG_ON(!create_worker(pool));
7846                 }                                6715                 }
7847         }                                        6716         }
7848                                                  6717 
7849         hash_for_each(unbound_pool_hash, bkt,    6718         hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
7850                 BUG_ON(!create_worker(pool));    6719                 BUG_ON(!create_worker(pool));
7851                                                  6720 
7852         wq_online = true;                        6721         wq_online = true;
7853         wq_watchdog_init();                      6722         wq_watchdog_init();
7854 }                                                6723 }
7855                                                  6724 
7856 /*                                               6725 /*
7857  * Initialize @pt by first initializing @pt->    6726  * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to
7858  * @cpu_shares_pod(). Each subset of CPUs tha    6727  * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique
7859  * and consecutive pod ID. The rest of @pt is    6728  * and consecutive pod ID. The rest of @pt is initialized accordingly.
7860  */                                              6729  */
7861 static void __init init_pod_type(struct wq_po    6730 static void __init init_pod_type(struct wq_pod_type *pt,
7862                                  bool (*cpus_    6731                                  bool (*cpus_share_pod)(int, int))
7863 {                                                6732 {
7864         int cur, pre, cpu, pod;                  6733         int cur, pre, cpu, pod;
7865                                                  6734 
7866         pt->nr_pods = 0;                         6735         pt->nr_pods = 0;
7867                                                  6736 
7868         /* init @pt->cpu_pod[] according to @    6737         /* init @pt->cpu_pod[] according to @cpus_share_pod() */
7869         pt->cpu_pod = kcalloc(nr_cpu_ids, siz    6738         pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
7870         BUG_ON(!pt->cpu_pod);                    6739         BUG_ON(!pt->cpu_pod);
7871                                                  6740 
7872         for_each_possible_cpu(cur) {             6741         for_each_possible_cpu(cur) {
7873                 for_each_possible_cpu(pre) {     6742                 for_each_possible_cpu(pre) {
7874                         if (pre >= cur) {        6743                         if (pre >= cur) {
7875                                 pt->cpu_pod[c    6744                                 pt->cpu_pod[cur] = pt->nr_pods++;
7876                                 break;           6745                                 break;
7877                         }                        6746                         }
7878                         if (cpus_share_pod(cu    6747                         if (cpus_share_pod(cur, pre)) {
7879                                 pt->cpu_pod[c    6748                                 pt->cpu_pod[cur] = pt->cpu_pod[pre];
7880                                 break;           6749                                 break;
7881                         }                        6750                         }
7882                 }                                6751                 }
7883         }                                        6752         }
7884                                                  6753 
7885         /* init the rest to match @pt->cpu_po    6754         /* init the rest to match @pt->cpu_pod[] */
7886         pt->pod_cpus = kcalloc(pt->nr_pods, s    6755         pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
7887         pt->pod_node = kcalloc(pt->nr_pods, s    6756         pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL);
7888         BUG_ON(!pt->pod_cpus || !pt->pod_node    6757         BUG_ON(!pt->pod_cpus || !pt->pod_node);
7889                                                  6758 
7890         for (pod = 0; pod < pt->nr_pods; pod+    6759         for (pod = 0; pod < pt->nr_pods; pod++)
7891                 BUG_ON(!zalloc_cpumask_var(&p    6760                 BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL));
7892                                                  6761 
7893         for_each_possible_cpu(cpu) {             6762         for_each_possible_cpu(cpu) {
7894                 cpumask_set_cpu(cpu, pt->pod_    6763                 cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]);
7895                 pt->pod_node[pt->cpu_pod[cpu]    6764                 pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu);
7896         }                                        6765         }
7897 }                                                6766 }
7898                                                  6767 
7899 static bool __init cpus_dont_share(int cpu0,     6768 static bool __init cpus_dont_share(int cpu0, int cpu1)
7900 {                                                6769 {
7901         return false;                            6770         return false;
7902 }                                                6771 }
7903                                                  6772 
7904 static bool __init cpus_share_smt(int cpu0, i    6773 static bool __init cpus_share_smt(int cpu0, int cpu1)
7905 {                                                6774 {
7906 #ifdef CONFIG_SCHED_SMT                          6775 #ifdef CONFIG_SCHED_SMT
7907         return cpumask_test_cpu(cpu0, cpu_smt    6776         return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1));
7908 #else                                            6777 #else
7909         return false;                            6778         return false;
7910 #endif                                           6779 #endif
7911 }                                                6780 }
7912                                                  6781 
7913 static bool __init cpus_share_numa(int cpu0,     6782 static bool __init cpus_share_numa(int cpu0, int cpu1)
7914 {                                                6783 {
7915         return cpu_to_node(cpu0) == cpu_to_no    6784         return cpu_to_node(cpu0) == cpu_to_node(cpu1);
7916 }                                                6785 }
7917                                                  6786 
7918 /**                                              6787 /**
7919  * workqueue_init_topology - initialize CPU p    6788  * workqueue_init_topology - initialize CPU pods for unbound workqueues
7920  *                                               6789  *
7921  * This is the third step of three-staged wor !! 6790  * This is the third step of there-staged workqueue subsystem initialization and
7922  * invoked after SMP and topology information    6791  * invoked after SMP and topology information are fully initialized. It
7923  * initializes the unbound CPU pods according    6792  * initializes the unbound CPU pods accordingly.
7924  */                                              6793  */
7925 void __init workqueue_init_topology(void)        6794 void __init workqueue_init_topology(void)
7926 {                                                6795 {
7927         struct workqueue_struct *wq;             6796         struct workqueue_struct *wq;
7928         int cpu;                                 6797         int cpu;
7929                                                  6798 
7930         init_pod_type(&wq_pod_types[WQ_AFFN_C    6799         init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
7931         init_pod_type(&wq_pod_types[WQ_AFFN_S    6800         init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
7932         init_pod_type(&wq_pod_types[WQ_AFFN_C    6801         init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
7933         init_pod_type(&wq_pod_types[WQ_AFFN_N    6802         init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
7934                                                  6803 
7935         wq_topo_initialized = true;           << 
7936                                               << 
7937         mutex_lock(&wq_pool_mutex);              6804         mutex_lock(&wq_pool_mutex);
7938                                                  6805 
7939         /*                                       6806         /*
7940          * Workqueues allocated earlier would    6807          * Workqueues allocated earlier would have all CPUs sharing the default
7941          * worker pool. Explicitly call unbou !! 6808          * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU
7942          * and CPU combinations to apply per- !! 6809          * combinations to apply per-pod sharing.
7943          */                                      6810          */
7944         list_for_each_entry(wq, &workqueues,     6811         list_for_each_entry(wq, &workqueues, list) {
7945                 for_each_online_cpu(cpu)      !! 6812                 for_each_online_cpu(cpu) {
7946                         unbound_wq_update_pwq !! 6813                         wq_update_pod(wq, cpu, cpu, true);
7947                 if (wq->flags & WQ_UNBOUND) { << 
7948                         mutex_lock(&wq->mutex << 
7949                         wq_update_node_max_ac << 
7950                         mutex_unlock(&wq->mut << 
7951                 }                                6814                 }
7952         }                                        6815         }
7953                                                  6816 
7954         mutex_unlock(&wq_pool_mutex);            6817         mutex_unlock(&wq_pool_mutex);
7955 }                                                6818 }
7956                                                  6819 
7957 void __warn_flushing_systemwide_wq(void)         6820 void __warn_flushing_systemwide_wq(void)
7958 {                                                6821 {
7959         pr_warn("WARNING: Flushing system-wid    6822         pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n");
7960         dump_stack();                            6823         dump_stack();
7961 }                                                6824 }
7962 EXPORT_SYMBOL(__warn_flushing_systemwide_wq);    6825 EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
7963                                                  6826 
7964 static int __init workqueue_unbound_cpus_setu    6827 static int __init workqueue_unbound_cpus_setup(char *str)
7965 {                                                6828 {
7966         if (cpulist_parse(str, &wq_cmdline_cp    6829         if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) {
7967                 cpumask_clear(&wq_cmdline_cpu    6830                 cpumask_clear(&wq_cmdline_cpumask);
7968                 pr_warn("workqueue.unbound_cp    6831                 pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n");
7969         }                                        6832         }
7970                                                  6833 
7971         return 1;                                6834         return 1;
7972 }                                                6835 }
7973 __setup("workqueue.unbound_cpus=", workqueue_    6836 __setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup);
7974                                                  6837 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php