~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/workqueue.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * kernel/workqueue.c - generic async execution with shared worker pool
  4  *
  5  * Copyright (C) 2002           Ingo Molnar
  6  *
  7  *   Derived from the taskqueue/keventd code by:
  8  *     David Woodhouse <dwmw2@infradead.org>
  9  *     Andrew Morton
 10  *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
 11  *     Theodore Ts'o <tytso@mit.edu>
 12  *
 13  * Made to use alloc_percpu by Christoph Lameter.
 14  *
 15  * Copyright (C) 2010           SUSE Linux Products GmbH
 16  * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
 17  *
 18  * This is the generic async execution mechanism.  Work items as are
 19  * executed in process context.  The worker pool is shared and
 20  * automatically managed.  There are two worker pools for each CPU (one for
 21  * normal work items and the other for high priority ones) and some extra
 22  * pools for workqueues which are not bound to any specific CPU - the
 23  * number of these backing pools is dynamic.
 24  *
 25  * Please read Documentation/core-api/workqueue.rst for details.
 26  */
 27 
 28 #include <linux/export.h>
 29 #include <linux/kernel.h>
 30 #include <linux/sched.h>
 31 #include <linux/init.h>
 32 #include <linux/interrupt.h>
 33 #include <linux/signal.h>
 34 #include <linux/completion.h>
 35 #include <linux/workqueue.h>
 36 #include <linux/slab.h>
 37 #include <linux/cpu.h>
 38 #include <linux/notifier.h>
 39 #include <linux/kthread.h>
 40 #include <linux/hardirq.h>
 41 #include <linux/mempolicy.h>
 42 #include <linux/freezer.h>
 43 #include <linux/debug_locks.h>
 44 #include <linux/lockdep.h>
 45 #include <linux/idr.h>
 46 #include <linux/jhash.h>
 47 #include <linux/hashtable.h>
 48 #include <linux/rculist.h>
 49 #include <linux/nodemask.h>
 50 #include <linux/moduleparam.h>
 51 #include <linux/uaccess.h>
 52 #include <linux/sched/isolation.h>
 53 #include <linux/sched/debug.h>
 54 #include <linux/nmi.h>
 55 #include <linux/kvm_para.h>
 56 #include <linux/delay.h>
 57 #include <linux/irq_work.h>
 58 
 59 #include "workqueue_internal.h"
 60 
 61 enum worker_pool_flags {
 62         /*
 63          * worker_pool flags
 64          *
 65          * A bound pool is either associated or disassociated with its CPU.
 66          * While associated (!DISASSOCIATED), all workers are bound to the
 67          * CPU and none has %WORKER_UNBOUND set and concurrency management
 68          * is in effect.
 69          *
 70          * While DISASSOCIATED, the cpu may be offline and all workers have
 71          * %WORKER_UNBOUND set and concurrency management disabled, and may
 72          * be executing on any CPU.  The pool behaves as an unbound one.
 73          *
 74          * Note that DISASSOCIATED should be flipped only while holding
 75          * wq_pool_attach_mutex to avoid changing binding state while
 76          * worker_attach_to_pool() is in progress.
 77          *
 78          * As there can only be one concurrent BH execution context per CPU, a
 79          * BH pool is per-CPU and always DISASSOCIATED.
 80          */
 81         POOL_BH                 = 1 << 0,       /* is a BH pool */
 82         POOL_MANAGER_ACTIVE     = 1 << 1,       /* being managed */
 83         POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
 84         POOL_BH_DRAINING        = 1 << 3,       /* draining after CPU offline */
 85 };
 86 
 87 enum worker_flags {
 88         /* worker flags */
 89         WORKER_DIE              = 1 << 1,       /* die die die */
 90         WORKER_IDLE             = 1 << 2,       /* is idle */
 91         WORKER_PREP             = 1 << 3,       /* preparing to run works */
 92         WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
 93         WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
 94         WORKER_REBOUND          = 1 << 8,       /* worker was rebound */
 95 
 96         WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_CPU_INTENSIVE |
 97                                   WORKER_UNBOUND | WORKER_REBOUND,
 98 };
 99 
100 enum work_cancel_flags {
101         WORK_CANCEL_DELAYED     = 1 << 0,       /* canceling a delayed_work */
102         WORK_CANCEL_DISABLE     = 1 << 1,       /* canceling to disable */
103 };
104 
105 enum wq_internal_consts {
106         NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
107 
108         UNBOUND_POOL_HASH_ORDER = 6,            /* hashed by pool->attrs */
109         BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
110 
111         MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
112         IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
113 
114         MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
115                                                 /* call for help after 10ms
116                                                    (min two ticks) */
117         MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
118         CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
119 
120         /*
121          * Rescue workers are used only on emergencies and shared by
122          * all cpus.  Give MIN_NICE.
123          */
124         RESCUER_NICE_LEVEL      = MIN_NICE,
125         HIGHPRI_NICE_LEVEL      = MIN_NICE,
126 
127         WQ_NAME_LEN             = 32,
128         WORKER_ID_LEN           = 10 + WQ_NAME_LEN, /* "kworker/R-" + WQ_NAME_LEN */
129 };
130 
131 /*
132  * We don't want to trap softirq for too long. See MAX_SOFTIRQ_TIME and
133  * MAX_SOFTIRQ_RESTART in kernel/softirq.c. These are macros because
134  * msecs_to_jiffies() can't be an initializer.
135  */
136 #define BH_WORKER_JIFFIES       msecs_to_jiffies(2)
137 #define BH_WORKER_RESTARTS      10
138 
139 /*
140  * Structure fields follow one of the following exclusion rules.
141  *
142  * I: Modifiable by initialization/destruction paths and read-only for
143  *    everyone else.
144  *
145  * P: Preemption protected.  Disabling preemption is enough and should
146  *    only be modified and accessed from the local cpu.
147  *
148  * L: pool->lock protected.  Access with pool->lock held.
149  *
150  * LN: pool->lock and wq_node_nr_active->lock protected for writes. Either for
151  *     reads.
152  *
153  * K: Only modified by worker while holding pool->lock. Can be safely read by
154  *    self, while holding pool->lock or from IRQ context if %current is the
155  *    kworker.
156  *
157  * S: Only modified by worker self.
158  *
159  * A: wq_pool_attach_mutex protected.
160  *
161  * PL: wq_pool_mutex protected.
162  *
163  * PR: wq_pool_mutex protected for writes.  RCU protected for reads.
164  *
165  * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
166  *
167  * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
168  *      RCU for reads.
169  *
170  * WQ: wq->mutex protected.
171  *
172  * WR: wq->mutex protected for writes.  RCU protected for reads.
173  *
174  * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read
175  *     with READ_ONCE() without locking.
176  *
177  * MD: wq_mayday_lock protected.
178  *
179  * WD: Used internally by the watchdog.
180  */
181 
182 /* struct worker is defined in workqueue_internal.h */
183 
184 struct worker_pool {
185         raw_spinlock_t          lock;           /* the pool lock */
186         int                     cpu;            /* I: the associated cpu */
187         int                     node;           /* I: the associated node ID */
188         int                     id;             /* I: pool ID */
189         unsigned int            flags;          /* L: flags */
190 
191         unsigned long           watchdog_ts;    /* L: watchdog timestamp */
192         bool                    cpu_stall;      /* WD: stalled cpu bound pool */
193 
194         /*
195          * The counter is incremented in a process context on the associated CPU
196          * w/ preemption disabled, and decremented or reset in the same context
197          * but w/ pool->lock held. The readers grab pool->lock and are
198          * guaranteed to see if the counter reached zero.
199          */
200         int                     nr_running;
201 
202         struct list_head        worklist;       /* L: list of pending works */
203 
204         int                     nr_workers;     /* L: total number of workers */
205         int                     nr_idle;        /* L: currently idle workers */
206 
207         struct list_head        idle_list;      /* L: list of idle workers */
208         struct timer_list       idle_timer;     /* L: worker idle timeout */
209         struct work_struct      idle_cull_work; /* L: worker idle cleanup */
210 
211         struct timer_list       mayday_timer;     /* L: SOS timer for workers */
212 
213         /* a workers is either on busy_hash or idle_list, or the manager */
214         DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
215                                                 /* L: hash of busy workers */
216 
217         struct worker           *manager;       /* L: purely informational */
218         struct list_head        workers;        /* A: attached workers */
219 
220         struct ida              worker_ida;     /* worker IDs for task name */
221 
222         struct workqueue_attrs  *attrs;         /* I: worker attributes */
223         struct hlist_node       hash_node;      /* PL: unbound_pool_hash node */
224         int                     refcnt;         /* PL: refcnt for unbound pools */
225 
226         /*
227          * Destruction of pool is RCU protected to allow dereferences
228          * from get_work_pool().
229          */
230         struct rcu_head         rcu;
231 };
232 
233 /*
234  * Per-pool_workqueue statistics. These can be monitored using
235  * tools/workqueue/wq_monitor.py.
236  */
237 enum pool_workqueue_stats {
238         PWQ_STAT_STARTED,       /* work items started execution */
239         PWQ_STAT_COMPLETED,     /* work items completed execution */
240         PWQ_STAT_CPU_TIME,      /* total CPU time consumed */
241         PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */
242         PWQ_STAT_CM_WAKEUP,     /* concurrency-management worker wakeups */
243         PWQ_STAT_REPATRIATED,   /* unbound workers brought back into scope */
244         PWQ_STAT_MAYDAY,        /* maydays to rescuer */
245         PWQ_STAT_RESCUED,       /* linked work items executed by rescuer */
246 
247         PWQ_NR_STATS,
248 };
249 
250 /*
251  * The per-pool workqueue.  While queued, bits below WORK_PWQ_SHIFT
252  * of work_struct->data are used for flags and the remaining high bits
253  * point to the pwq; thus, pwqs need to be aligned at two's power of the
254  * number of flag bits.
255  */
256 struct pool_workqueue {
257         struct worker_pool      *pool;          /* I: the associated pool */
258         struct workqueue_struct *wq;            /* I: the owning workqueue */
259         int                     work_color;     /* L: current color */
260         int                     flush_color;    /* L: flushing color */
261         int                     refcnt;         /* L: reference count */
262         int                     nr_in_flight[WORK_NR_COLORS];
263                                                 /* L: nr of in_flight works */
264         bool                    plugged;        /* L: execution suspended */
265 
266         /*
267          * nr_active management and WORK_STRUCT_INACTIVE:
268          *
269          * When pwq->nr_active >= max_active, new work item is queued to
270          * pwq->inactive_works instead of pool->worklist and marked with
271          * WORK_STRUCT_INACTIVE.
272          *
273          * All work items marked with WORK_STRUCT_INACTIVE do not participate in
274          * nr_active and all work items in pwq->inactive_works are marked with
275          * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are
276          * in pwq->inactive_works. Some of them are ready to run in
277          * pool->worklist or worker->scheduled. Those work itmes are only struct
278          * wq_barrier which is used for flush_work() and should not participate
279          * in nr_active. For non-barrier work item, it is marked with
280          * WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
281          */
282         int                     nr_active;      /* L: nr of active works */
283         struct list_head        inactive_works; /* L: inactive works */
284         struct list_head        pending_node;   /* LN: node on wq_node_nr_active->pending_pwqs */
285         struct list_head        pwqs_node;      /* WR: node on wq->pwqs */
286         struct list_head        mayday_node;    /* MD: node on wq->maydays */
287 
288         u64                     stats[PWQ_NR_STATS];
289 
290         /*
291          * Release of unbound pwq is punted to a kthread_worker. See put_pwq()
292          * and pwq_release_workfn() for details. pool_workqueue itself is also
293          * RCU protected so that the first pwq can be determined without
294          * grabbing wq->mutex.
295          */
296         struct kthread_work     release_work;
297         struct rcu_head         rcu;
298 } __aligned(1 << WORK_STRUCT_PWQ_SHIFT);
299 
300 /*
301  * Structure used to wait for workqueue flush.
302  */
303 struct wq_flusher {
304         struct list_head        list;           /* WQ: list of flushers */
305         int                     flush_color;    /* WQ: flush color waiting for */
306         struct completion       done;           /* flush completion */
307 };
308 
309 struct wq_device;
310 
311 /*
312  * Unlike in a per-cpu workqueue where max_active limits its concurrency level
313  * on each CPU, in an unbound workqueue, max_active applies to the whole system.
314  * As sharing a single nr_active across multiple sockets can be very expensive,
315  * the counting and enforcement is per NUMA node.
316  *
317  * The following struct is used to enforce per-node max_active. When a pwq wants
318  * to start executing a work item, it should increment ->nr using
319  * tryinc_node_nr_active(). If acquisition fails due to ->nr already being over
320  * ->max, the pwq is queued on ->pending_pwqs. As in-flight work items finish
321  * and decrement ->nr, node_activate_pending_pwq() activates the pending pwqs in
322  * round-robin order.
323  */
324 struct wq_node_nr_active {
325         int                     max;            /* per-node max_active */
326         atomic_t                nr;             /* per-node nr_active */
327         raw_spinlock_t          lock;           /* nests inside pool locks */
328         struct list_head        pending_pwqs;   /* LN: pwqs with inactive works */
329 };
330 
331 /*
332  * The externally visible workqueue.  It relays the issued work items to
333  * the appropriate worker_pool through its pool_workqueues.
334  */
335 struct workqueue_struct {
336         struct list_head        pwqs;           /* WR: all pwqs of this wq */
337         struct list_head        list;           /* PR: list of all workqueues */
338 
339         struct mutex            mutex;          /* protects this wq */
340         int                     work_color;     /* WQ: current work color */
341         int                     flush_color;    /* WQ: current flush color */
342         atomic_t                nr_pwqs_to_flush; /* flush in progress */
343         struct wq_flusher       *first_flusher; /* WQ: first flusher */
344         struct list_head        flusher_queue;  /* WQ: flush waiters */
345         struct list_head        flusher_overflow; /* WQ: flush overflow list */
346 
347         struct list_head        maydays;        /* MD: pwqs requesting rescue */
348         struct worker           *rescuer;       /* MD: rescue worker */
349 
350         int                     nr_drainers;    /* WQ: drain in progress */
351 
352         /* See alloc_workqueue() function comment for info on min/max_active */
353         int                     max_active;     /* WO: max active works */
354         int                     min_active;     /* WO: min active works */
355         int                     saved_max_active; /* WQ: saved max_active */
356         int                     saved_min_active; /* WQ: saved min_active */
357 
358         struct workqueue_attrs  *unbound_attrs; /* PW: only for unbound wqs */
359         struct pool_workqueue __rcu *dfl_pwq;   /* PW: only for unbound wqs */
360 
361 #ifdef CONFIG_SYSFS
362         struct wq_device        *wq_dev;        /* I: for sysfs interface */
363 #endif
364 #ifdef CONFIG_LOCKDEP
365         char                    *lock_name;
366         struct lock_class_key   key;
367         struct lockdep_map      lockdep_map;
368 #endif
369         char                    name[WQ_NAME_LEN]; /* I: workqueue name */
370 
371         /*
372          * Destruction of workqueue_struct is RCU protected to allow walking
373          * the workqueues list without grabbing wq_pool_mutex.
374          * This is used to dump all workqueues from sysrq.
375          */
376         struct rcu_head         rcu;
377 
378         /* hot fields used during command issue, aligned to cacheline */
379         unsigned int            flags ____cacheline_aligned; /* WQ: WQ_* flags */
380         struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
381         struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */
382 };
383 
384 /*
385  * Each pod type describes how CPUs should be grouped for unbound workqueues.
386  * See the comment above workqueue_attrs->affn_scope.
387  */
388 struct wq_pod_type {
389         int                     nr_pods;        /* number of pods */
390         cpumask_var_t           *pod_cpus;      /* pod -> cpus */
391         int                     *pod_node;      /* pod -> node */
392         int                     *cpu_pod;       /* cpu -> pod */
393 };
394 
395 struct work_offq_data {
396         u32                     pool_id;
397         u32                     disable;
398         u32                     flags;
399 };
400 
401 static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
402         [WQ_AFFN_DFL]           = "default",
403         [WQ_AFFN_CPU]           = "cpu",
404         [WQ_AFFN_SMT]           = "smt",
405         [WQ_AFFN_CACHE]         = "cache",
406         [WQ_AFFN_NUMA]          = "numa",
407         [WQ_AFFN_SYSTEM]        = "system",
408 };
409 
410 /*
411  * Per-cpu work items which run for longer than the following threshold are
412  * automatically considered CPU intensive and excluded from concurrency
413  * management to prevent them from noticeably delaying other per-cpu work items.
414  * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter.
415  * The actual value is initialized in wq_cpu_intensive_thresh_init().
416  */
417 static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
418 module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
419 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
420 static unsigned int wq_cpu_intensive_warning_thresh = 4;
421 module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh, uint, 0644);
422 #endif
423 
424 /* see the comment above the definition of WQ_POWER_EFFICIENT */
425 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
426 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
427 
428 static bool wq_online;                  /* can kworkers be created yet? */
429 static bool wq_topo_initialized __read_mostly = false;
430 
431 static struct kmem_cache *pwq_cache;
432 
433 static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
434 static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
435 
436 /* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
437 static struct workqueue_attrs *unbound_wq_update_pwq_attrs_buf;
438 
439 static DEFINE_MUTEX(wq_pool_mutex);     /* protects pools and workqueues list */
440 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
441 static DEFINE_RAW_SPINLOCK(wq_mayday_lock);     /* protects wq->maydays list */
442 /* wait for manager to go away */
443 static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
444 
445 static LIST_HEAD(workqueues);           /* PR: list of all workqueues */
446 static bool workqueue_freezing;         /* PL: have wqs started freezing? */
447 
448 /* PL: mirror the cpu_online_mask excluding the CPU in the midst of hotplugging */
449 static cpumask_var_t wq_online_cpumask;
450 
451 /* PL&A: allowable cpus for unbound wqs and work items */
452 static cpumask_var_t wq_unbound_cpumask;
453 
454 /* PL: user requested unbound cpumask via sysfs */
455 static cpumask_var_t wq_requested_unbound_cpumask;
456 
457 /* PL: isolated cpumask to be excluded from unbound cpumask */
458 static cpumask_var_t wq_isolated_cpumask;
459 
460 /* for further constrain wq_unbound_cpumask by cmdline parameter*/
461 static struct cpumask wq_cmdline_cpumask __initdata;
462 
463 /* CPU where unbound work was last round robin scheduled from this CPU */
464 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
465 
466 /*
467  * Local execution of unbound work items is no longer guaranteed.  The
468  * following always forces round-robin CPU selection on unbound work items
469  * to uncover usages which depend on it.
470  */
471 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
472 static bool wq_debug_force_rr_cpu = true;
473 #else
474 static bool wq_debug_force_rr_cpu = false;
475 #endif
476 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
477 
478 /* to raise softirq for the BH worker pools on other CPUs */
479 static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS],
480                                      bh_pool_irq_works);
481 
482 /* the BH worker pools */
483 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
484                                      bh_worker_pools);
485 
486 /* the per-cpu worker pools */
487 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
488                                      cpu_worker_pools);
489 
490 static DEFINE_IDR(worker_pool_idr);     /* PR: idr of all pools */
491 
492 /* PL: hash of all unbound pools keyed by pool->attrs */
493 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
494 
495 /* I: attributes used when instantiating standard unbound pools on demand */
496 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
497 
498 /* I: attributes used when instantiating ordered pools on demand */
499 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
500 
501 /*
502  * I: kthread_worker to release pwq's. pwq release needs to be bounced to a
503  * process context while holding a pool lock. Bounce to a dedicated kthread
504  * worker to avoid A-A deadlocks.
505  */
506 static struct kthread_worker *pwq_release_worker __ro_after_init;
507 
508 struct workqueue_struct *system_wq __ro_after_init;
509 EXPORT_SYMBOL(system_wq);
510 struct workqueue_struct *system_highpri_wq __ro_after_init;
511 EXPORT_SYMBOL_GPL(system_highpri_wq);
512 struct workqueue_struct *system_long_wq __ro_after_init;
513 EXPORT_SYMBOL_GPL(system_long_wq);
514 struct workqueue_struct *system_unbound_wq __ro_after_init;
515 EXPORT_SYMBOL_GPL(system_unbound_wq);
516 struct workqueue_struct *system_freezable_wq __ro_after_init;
517 EXPORT_SYMBOL_GPL(system_freezable_wq);
518 struct workqueue_struct *system_power_efficient_wq __ro_after_init;
519 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
520 struct workqueue_struct *system_freezable_power_efficient_wq __ro_after_init;
521 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
522 struct workqueue_struct *system_bh_wq;
523 EXPORT_SYMBOL_GPL(system_bh_wq);
524 struct workqueue_struct *system_bh_highpri_wq;
525 EXPORT_SYMBOL_GPL(system_bh_highpri_wq);
526 
527 static int worker_thread(void *__worker);
528 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
529 static void show_pwq(struct pool_workqueue *pwq);
530 static void show_one_worker_pool(struct worker_pool *pool);
531 
532 #define CREATE_TRACE_POINTS
533 #include <trace/events/workqueue.h>
534 
535 #define assert_rcu_or_pool_mutex()                                      \
536         RCU_LOCKDEP_WARN(!rcu_read_lock_any_held() &&                   \
537                          !lockdep_is_held(&wq_pool_mutex),              \
538                          "RCU or wq_pool_mutex should be held")
539 
540 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)                        \
541         RCU_LOCKDEP_WARN(!rcu_read_lock_any_held() &&                   \
542                          !lockdep_is_held(&wq->mutex) &&                \
543                          !lockdep_is_held(&wq_pool_mutex),              \
544                          "RCU, wq->mutex or wq_pool_mutex should be held")
545 
546 #define for_each_bh_worker_pool(pool, cpu)                              \
547         for ((pool) = &per_cpu(bh_worker_pools, cpu)[0];                \
548              (pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
549              (pool)++)
550 
551 #define for_each_cpu_worker_pool(pool, cpu)                             \
552         for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];               \
553              (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
554              (pool)++)
555 
556 /**
557  * for_each_pool - iterate through all worker_pools in the system
558  * @pool: iteration cursor
559  * @pi: integer used for iteration
560  *
561  * This must be called either with wq_pool_mutex held or RCU read
562  * locked.  If the pool needs to be used beyond the locking in effect, the
563  * caller is responsible for guaranteeing that the pool stays online.
564  *
565  * The if/else clause exists only for the lockdep assertion and can be
566  * ignored.
567  */
568 #define for_each_pool(pool, pi)                                         \
569         idr_for_each_entry(&worker_pool_idr, pool, pi)                  \
570                 if (({ assert_rcu_or_pool_mutex(); false; })) { }       \
571                 else
572 
573 /**
574  * for_each_pool_worker - iterate through all workers of a worker_pool
575  * @worker: iteration cursor
576  * @pool: worker_pool to iterate workers of
577  *
578  * This must be called with wq_pool_attach_mutex.
579  *
580  * The if/else clause exists only for the lockdep assertion and can be
581  * ignored.
582  */
583 #define for_each_pool_worker(worker, pool)                              \
584         list_for_each_entry((worker), &(pool)->workers, node)           \
585                 if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
586                 else
587 
588 /**
589  * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
590  * @pwq: iteration cursor
591  * @wq: the target workqueue
592  *
593  * This must be called either with wq->mutex held or RCU read locked.
594  * If the pwq needs to be used beyond the locking in effect, the caller is
595  * responsible for guaranteeing that the pwq stays online.
596  *
597  * The if/else clause exists only for the lockdep assertion and can be
598  * ignored.
599  */
600 #define for_each_pwq(pwq, wq)                                           \
601         list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node,          \
602                                  lockdep_is_held(&(wq->mutex)))
603 
604 #ifdef CONFIG_DEBUG_OBJECTS_WORK
605 
606 static const struct debug_obj_descr work_debug_descr;
607 
608 static void *work_debug_hint(void *addr)
609 {
610         return ((struct work_struct *) addr)->func;
611 }
612 
613 static bool work_is_static_object(void *addr)
614 {
615         struct work_struct *work = addr;
616 
617         return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
618 }
619 
620 /*
621  * fixup_init is called when:
622  * - an active object is initialized
623  */
624 static bool work_fixup_init(void *addr, enum debug_obj_state state)
625 {
626         struct work_struct *work = addr;
627 
628         switch (state) {
629         case ODEBUG_STATE_ACTIVE:
630                 cancel_work_sync(work);
631                 debug_object_init(work, &work_debug_descr);
632                 return true;
633         default:
634                 return false;
635         }
636 }
637 
638 /*
639  * fixup_free is called when:
640  * - an active object is freed
641  */
642 static bool work_fixup_free(void *addr, enum debug_obj_state state)
643 {
644         struct work_struct *work = addr;
645 
646         switch (state) {
647         case ODEBUG_STATE_ACTIVE:
648                 cancel_work_sync(work);
649                 debug_object_free(work, &work_debug_descr);
650                 return true;
651         default:
652                 return false;
653         }
654 }
655 
656 static const struct debug_obj_descr work_debug_descr = {
657         .name           = "work_struct",
658         .debug_hint     = work_debug_hint,
659         .is_static_object = work_is_static_object,
660         .fixup_init     = work_fixup_init,
661         .fixup_free     = work_fixup_free,
662 };
663 
664 static inline void debug_work_activate(struct work_struct *work)
665 {
666         debug_object_activate(work, &work_debug_descr);
667 }
668 
669 static inline void debug_work_deactivate(struct work_struct *work)
670 {
671         debug_object_deactivate(work, &work_debug_descr);
672 }
673 
674 void __init_work(struct work_struct *work, int onstack)
675 {
676         if (onstack)
677                 debug_object_init_on_stack(work, &work_debug_descr);
678         else
679                 debug_object_init(work, &work_debug_descr);
680 }
681 EXPORT_SYMBOL_GPL(__init_work);
682 
683 void destroy_work_on_stack(struct work_struct *work)
684 {
685         debug_object_free(work, &work_debug_descr);
686 }
687 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
688 
689 void destroy_delayed_work_on_stack(struct delayed_work *work)
690 {
691         destroy_timer_on_stack(&work->timer);
692         debug_object_free(&work->work, &work_debug_descr);
693 }
694 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
695 
696 #else
697 static inline void debug_work_activate(struct work_struct *work) { }
698 static inline void debug_work_deactivate(struct work_struct *work) { }
699 #endif
700 
701 /**
702  * worker_pool_assign_id - allocate ID and assign it to @pool
703  * @pool: the pool pointer of interest
704  *
705  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
706  * successfully, -errno on failure.
707  */
708 static int worker_pool_assign_id(struct worker_pool *pool)
709 {
710         int ret;
711 
712         lockdep_assert_held(&wq_pool_mutex);
713 
714         ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
715                         GFP_KERNEL);
716         if (ret >= 0) {
717                 pool->id = ret;
718                 return 0;
719         }
720         return ret;
721 }
722 
723 static struct pool_workqueue __rcu **
724 unbound_pwq_slot(struct workqueue_struct *wq, int cpu)
725 {
726        if (cpu >= 0)
727                return per_cpu_ptr(wq->cpu_pwq, cpu);
728        else
729                return &wq->dfl_pwq;
730 }
731 
732 /* @cpu < 0 for dfl_pwq */
733 static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu)
734 {
735         return rcu_dereference_check(*unbound_pwq_slot(wq, cpu),
736                                      lockdep_is_held(&wq_pool_mutex) ||
737                                      lockdep_is_held(&wq->mutex));
738 }
739 
740 /**
741  * unbound_effective_cpumask - effective cpumask of an unbound workqueue
742  * @wq: workqueue of interest
743  *
744  * @wq->unbound_attrs->cpumask contains the cpumask requested by the user which
745  * is masked with wq_unbound_cpumask to determine the effective cpumask. The
746  * default pwq is always mapped to the pool with the current effective cpumask.
747  */
748 static struct cpumask *unbound_effective_cpumask(struct workqueue_struct *wq)
749 {
750         return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask;
751 }
752 
753 static unsigned int work_color_to_flags(int color)
754 {
755         return color << WORK_STRUCT_COLOR_SHIFT;
756 }
757 
758 static int get_work_color(unsigned long work_data)
759 {
760         return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
761                 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
762 }
763 
764 static int work_next_color(int color)
765 {
766         return (color + 1) % WORK_NR_COLORS;
767 }
768 
769 static unsigned long pool_offq_flags(struct worker_pool *pool)
770 {
771         return (pool->flags & POOL_BH) ? WORK_OFFQ_BH : 0;
772 }
773 
774 /*
775  * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
776  * contain the pointer to the queued pwq.  Once execution starts, the flag
777  * is cleared and the high bits contain OFFQ flags and pool ID.
778  *
779  * set_work_pwq(), set_work_pool_and_clear_pending() and mark_work_canceling()
780  * can be used to set the pwq, pool or clear work->data. These functions should
781  * only be called while the work is owned - ie. while the PENDING bit is set.
782  *
783  * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
784  * corresponding to a work.  Pool is available once the work has been
785  * queued anywhere after initialization until it is sync canceled.  pwq is
786  * available only while the work item is queued.
787  */
788 static inline void set_work_data(struct work_struct *work, unsigned long data)
789 {
790         WARN_ON_ONCE(!work_pending(work));
791         atomic_long_set(&work->data, data | work_static(work));
792 }
793 
794 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
795                          unsigned long flags)
796 {
797         set_work_data(work, (unsigned long)pwq | WORK_STRUCT_PENDING |
798                       WORK_STRUCT_PWQ | flags);
799 }
800 
801 static void set_work_pool_and_keep_pending(struct work_struct *work,
802                                            int pool_id, unsigned long flags)
803 {
804         set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) |
805                       WORK_STRUCT_PENDING | flags);
806 }
807 
808 static void set_work_pool_and_clear_pending(struct work_struct *work,
809                                             int pool_id, unsigned long flags)
810 {
811         /*
812          * The following wmb is paired with the implied mb in
813          * test_and_set_bit(PENDING) and ensures all updates to @work made
814          * here are visible to and precede any updates by the next PENDING
815          * owner.
816          */
817         smp_wmb();
818         set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) |
819                       flags);
820         /*
821          * The following mb guarantees that previous clear of a PENDING bit
822          * will not be reordered with any speculative LOADS or STORES from
823          * work->current_func, which is executed afterwards.  This possible
824          * reordering can lead to a missed execution on attempt to queue
825          * the same @work.  E.g. consider this case:
826          *
827          *   CPU#0                         CPU#1
828          *   ----------------------------  --------------------------------
829          *
830          * 1  STORE event_indicated
831          * 2  queue_work_on() {
832          * 3    test_and_set_bit(PENDING)
833          * 4 }                             set_..._and_clear_pending() {
834          * 5                                 set_work_data() # clear bit
835          * 6                                 smp_mb()
836          * 7                               work->current_func() {
837          * 8                                  LOAD event_indicated
838          *                                 }
839          *
840          * Without an explicit full barrier speculative LOAD on line 8 can
841          * be executed before CPU#0 does STORE on line 1.  If that happens,
842          * CPU#0 observes the PENDING bit is still set and new execution of
843          * a @work is not queued in a hope, that CPU#1 will eventually
844          * finish the queued @work.  Meanwhile CPU#1 does not see
845          * event_indicated is set, because speculative LOAD was executed
846          * before actual STORE.
847          */
848         smp_mb();
849 }
850 
851 static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
852 {
853         return (struct pool_workqueue *)(data & WORK_STRUCT_PWQ_MASK);
854 }
855 
856 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
857 {
858         unsigned long data = atomic_long_read(&work->data);
859 
860         if (data & WORK_STRUCT_PWQ)
861                 return work_struct_pwq(data);
862         else
863                 return NULL;
864 }
865 
866 /**
867  * get_work_pool - return the worker_pool a given work was associated with
868  * @work: the work item of interest
869  *
870  * Pools are created and destroyed under wq_pool_mutex, and allows read
871  * access under RCU read lock.  As such, this function should be
872  * called under wq_pool_mutex or inside of a rcu_read_lock() region.
873  *
874  * All fields of the returned pool are accessible as long as the above
875  * mentioned locking is in effect.  If the returned pool needs to be used
876  * beyond the critical section, the caller is responsible for ensuring the
877  * returned pool is and stays online.
878  *
879  * Return: The worker_pool @work was last associated with.  %NULL if none.
880  */
881 static struct worker_pool *get_work_pool(struct work_struct *work)
882 {
883         unsigned long data = atomic_long_read(&work->data);
884         int pool_id;
885 
886         assert_rcu_or_pool_mutex();
887 
888         if (data & WORK_STRUCT_PWQ)
889                 return work_struct_pwq(data)->pool;
890 
891         pool_id = data >> WORK_OFFQ_POOL_SHIFT;
892         if (pool_id == WORK_OFFQ_POOL_NONE)
893                 return NULL;
894 
895         return idr_find(&worker_pool_idr, pool_id);
896 }
897 
898 static unsigned long shift_and_mask(unsigned long v, u32 shift, u32 bits)
899 {
900         return (v >> shift) & ((1 << bits) - 1);
901 }
902 
903 static void work_offqd_unpack(struct work_offq_data *offqd, unsigned long data)
904 {
905         WARN_ON_ONCE(data & WORK_STRUCT_PWQ);
906 
907         offqd->pool_id = shift_and_mask(data, WORK_OFFQ_POOL_SHIFT,
908                                         WORK_OFFQ_POOL_BITS);
909         offqd->disable = shift_and_mask(data, WORK_OFFQ_DISABLE_SHIFT,
910                                         WORK_OFFQ_DISABLE_BITS);
911         offqd->flags = data & WORK_OFFQ_FLAG_MASK;
912 }
913 
914 static unsigned long work_offqd_pack_flags(struct work_offq_data *offqd)
915 {
916         return ((unsigned long)offqd->disable << WORK_OFFQ_DISABLE_SHIFT) |
917                 ((unsigned long)offqd->flags);
918 }
919 
920 /*
921  * Policy functions.  These define the policies on how the global worker
922  * pools are managed.  Unless noted otherwise, these functions assume that
923  * they're being called with pool->lock held.
924  */
925 
926 /*
927  * Need to wake up a worker?  Called from anything but currently
928  * running workers.
929  *
930  * Note that, because unbound workers never contribute to nr_running, this
931  * function will always return %true for unbound pools as long as the
932  * worklist isn't empty.
933  */
934 static bool need_more_worker(struct worker_pool *pool)
935 {
936         return !list_empty(&pool->worklist) && !pool->nr_running;
937 }
938 
939 /* Can I start working?  Called from busy but !running workers. */
940 static bool may_start_working(struct worker_pool *pool)
941 {
942         return pool->nr_idle;
943 }
944 
945 /* Do I need to keep working?  Called from currently running workers. */
946 static bool keep_working(struct worker_pool *pool)
947 {
948         return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
949 }
950 
951 /* Do we need a new worker?  Called from manager. */
952 static bool need_to_create_worker(struct worker_pool *pool)
953 {
954         return need_more_worker(pool) && !may_start_working(pool);
955 }
956 
957 /* Do we have too many workers and should some go away? */
958 static bool too_many_workers(struct worker_pool *pool)
959 {
960         bool managing = pool->flags & POOL_MANAGER_ACTIVE;
961         int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
962         int nr_busy = pool->nr_workers - nr_idle;
963 
964         return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
965 }
966 
967 /**
968  * worker_set_flags - set worker flags and adjust nr_running accordingly
969  * @worker: self
970  * @flags: flags to set
971  *
972  * Set @flags in @worker->flags and adjust nr_running accordingly.
973  */
974 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
975 {
976         struct worker_pool *pool = worker->pool;
977 
978         lockdep_assert_held(&pool->lock);
979 
980         /* If transitioning into NOT_RUNNING, adjust nr_running. */
981         if ((flags & WORKER_NOT_RUNNING) &&
982             !(worker->flags & WORKER_NOT_RUNNING)) {
983                 pool->nr_running--;
984         }
985 
986         worker->flags |= flags;
987 }
988 
989 /**
990  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
991  * @worker: self
992  * @flags: flags to clear
993  *
994  * Clear @flags in @worker->flags and adjust nr_running accordingly.
995  */
996 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
997 {
998         struct worker_pool *pool = worker->pool;
999         unsigned int oflags = worker->flags;
1000 
1001         lockdep_assert_held(&pool->lock);
1002 
1003         worker->flags &= ~flags;
1004 
1005         /*
1006          * If transitioning out of NOT_RUNNING, increment nr_running.  Note
1007          * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
1008          * of multiple flags, not a single flag.
1009          */
1010         if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
1011                 if (!(worker->flags & WORKER_NOT_RUNNING))
1012                         pool->nr_running++;
1013 }
1014 
1015 /* Return the first idle worker.  Called with pool->lock held. */
1016 static struct worker *first_idle_worker(struct worker_pool *pool)
1017 {
1018         if (unlikely(list_empty(&pool->idle_list)))
1019                 return NULL;
1020 
1021         return list_first_entry(&pool->idle_list, struct worker, entry);
1022 }
1023 
1024 /**
1025  * worker_enter_idle - enter idle state
1026  * @worker: worker which is entering idle state
1027  *
1028  * @worker is entering idle state.  Update stats and idle timer if
1029  * necessary.
1030  *
1031  * LOCKING:
1032  * raw_spin_lock_irq(pool->lock).
1033  */
1034 static void worker_enter_idle(struct worker *worker)
1035 {
1036         struct worker_pool *pool = worker->pool;
1037 
1038         if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1039             WARN_ON_ONCE(!list_empty(&worker->entry) &&
1040                          (worker->hentry.next || worker->hentry.pprev)))
1041                 return;
1042 
1043         /* can't use worker_set_flags(), also called from create_worker() */
1044         worker->flags |= WORKER_IDLE;
1045         pool->nr_idle++;
1046         worker->last_active = jiffies;
1047 
1048         /* idle_list is LIFO */
1049         list_add(&worker->entry, &pool->idle_list);
1050 
1051         if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1052                 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1053 
1054         /* Sanity check nr_running. */
1055         WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
1056 }
1057 
1058 /**
1059  * worker_leave_idle - leave idle state
1060  * @worker: worker which is leaving idle state
1061  *
1062  * @worker is leaving idle state.  Update stats.
1063  *
1064  * LOCKING:
1065  * raw_spin_lock_irq(pool->lock).
1066  */
1067 static void worker_leave_idle(struct worker *worker)
1068 {
1069         struct worker_pool *pool = worker->pool;
1070 
1071         if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1072                 return;
1073         worker_clr_flags(worker, WORKER_IDLE);
1074         pool->nr_idle--;
1075         list_del_init(&worker->entry);
1076 }
1077 
1078 /**
1079  * find_worker_executing_work - find worker which is executing a work
1080  * @pool: pool of interest
1081  * @work: work to find worker for
1082  *
1083  * Find a worker which is executing @work on @pool by searching
1084  * @pool->busy_hash which is keyed by the address of @work.  For a worker
1085  * to match, its current execution should match the address of @work and
1086  * its work function.  This is to avoid unwanted dependency between
1087  * unrelated work executions through a work item being recycled while still
1088  * being executed.
1089  *
1090  * This is a bit tricky.  A work item may be freed once its execution
1091  * starts and nothing prevents the freed area from being recycled for
1092  * another work item.  If the same work item address ends up being reused
1093  * before the original execution finishes, workqueue will identify the
1094  * recycled work item as currently executing and make it wait until the
1095  * current execution finishes, introducing an unwanted dependency.
1096  *
1097  * This function checks the work item address and work function to avoid
1098  * false positives.  Note that this isn't complete as one may construct a
1099  * work function which can introduce dependency onto itself through a
1100  * recycled work item.  Well, if somebody wants to shoot oneself in the
1101  * foot that badly, there's only so much we can do, and if such deadlock
1102  * actually occurs, it should be easy to locate the culprit work function.
1103  *
1104  * CONTEXT:
1105  * raw_spin_lock_irq(pool->lock).
1106  *
1107  * Return:
1108  * Pointer to worker which is executing @work if found, %NULL
1109  * otherwise.
1110  */
1111 static struct worker *find_worker_executing_work(struct worker_pool *pool,
1112                                                  struct work_struct *work)
1113 {
1114         struct worker *worker;
1115 
1116         hash_for_each_possible(pool->busy_hash, worker, hentry,
1117                                (unsigned long)work)
1118                 if (worker->current_work == work &&
1119                     worker->current_func == work->func)
1120                         return worker;
1121 
1122         return NULL;
1123 }
1124 
1125 /**
1126  * move_linked_works - move linked works to a list
1127  * @work: start of series of works to be scheduled
1128  * @head: target list to append @work to
1129  * @nextp: out parameter for nested worklist walking
1130  *
1131  * Schedule linked works starting from @work to @head. Work series to be
1132  * scheduled starts at @work and includes any consecutive work with
1133  * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on
1134  * @nextp.
1135  *
1136  * CONTEXT:
1137  * raw_spin_lock_irq(pool->lock).
1138  */
1139 static void move_linked_works(struct work_struct *work, struct list_head *head,
1140                               struct work_struct **nextp)
1141 {
1142         struct work_struct *n;
1143 
1144         /*
1145          * Linked worklist will always end before the end of the list,
1146          * use NULL for list head.
1147          */
1148         list_for_each_entry_safe_from(work, n, NULL, entry) {
1149                 list_move_tail(&work->entry, head);
1150                 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1151                         break;
1152         }
1153 
1154         /*
1155          * If we're already inside safe list traversal and have moved
1156          * multiple works to the scheduled queue, the next position
1157          * needs to be updated.
1158          */
1159         if (nextp)
1160                 *nextp = n;
1161 }
1162 
1163 /**
1164  * assign_work - assign a work item and its linked work items to a worker
1165  * @work: work to assign
1166  * @worker: worker to assign to
1167  * @nextp: out parameter for nested worklist walking
1168  *
1169  * Assign @work and its linked work items to @worker. If @work is already being
1170  * executed by another worker in the same pool, it'll be punted there.
1171  *
1172  * If @nextp is not NULL, it's updated to point to the next work of the last
1173  * scheduled work. This allows assign_work() to be nested inside
1174  * list_for_each_entry_safe().
1175  *
1176  * Returns %true if @work was successfully assigned to @worker. %false if @work
1177  * was punted to another worker already executing it.
1178  */
1179 static bool assign_work(struct work_struct *work, struct worker *worker,
1180                         struct work_struct **nextp)
1181 {
1182         struct worker_pool *pool = worker->pool;
1183         struct worker *collision;
1184 
1185         lockdep_assert_held(&pool->lock);
1186 
1187         /*
1188          * A single work shouldn't be executed concurrently by multiple workers.
1189          * __queue_work() ensures that @work doesn't jump to a different pool
1190          * while still running in the previous pool. Here, we should ensure that
1191          * @work is not executed concurrently by multiple workers from the same
1192          * pool. Check whether anyone is already processing the work. If so,
1193          * defer the work to the currently executing one.
1194          */
1195         collision = find_worker_executing_work(pool, work);
1196         if (unlikely(collision)) {
1197                 move_linked_works(work, &collision->scheduled, nextp);
1198                 return false;
1199         }
1200 
1201         move_linked_works(work, &worker->scheduled, nextp);
1202         return true;
1203 }
1204 
1205 static struct irq_work *bh_pool_irq_work(struct worker_pool *pool)
1206 {
1207         int high = pool->attrs->nice == HIGHPRI_NICE_LEVEL ? 1 : 0;
1208 
1209         return &per_cpu(bh_pool_irq_works, pool->cpu)[high];
1210 }
1211 
1212 static void kick_bh_pool(struct worker_pool *pool)
1213 {
1214 #ifdef CONFIG_SMP
1215         /* see drain_dead_softirq_workfn() for BH_DRAINING */
1216         if (unlikely(pool->cpu != smp_processor_id() &&
1217                      !(pool->flags & POOL_BH_DRAINING))) {
1218                 irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu);
1219                 return;
1220         }
1221 #endif
1222         if (pool->attrs->nice == HIGHPRI_NICE_LEVEL)
1223                 raise_softirq_irqoff(HI_SOFTIRQ);
1224         else
1225                 raise_softirq_irqoff(TASKLET_SOFTIRQ);
1226 }
1227 
1228 /**
1229  * kick_pool - wake up an idle worker if necessary
1230  * @pool: pool to kick
1231  *
1232  * @pool may have pending work items. Wake up worker if necessary. Returns
1233  * whether a worker was woken up.
1234  */
1235 static bool kick_pool(struct worker_pool *pool)
1236 {
1237         struct worker *worker = first_idle_worker(pool);
1238         struct task_struct *p;
1239 
1240         lockdep_assert_held(&pool->lock);
1241 
1242         if (!need_more_worker(pool) || !worker)
1243                 return false;
1244 
1245         if (pool->flags & POOL_BH) {
1246                 kick_bh_pool(pool);
1247                 return true;
1248         }
1249 
1250         p = worker->task;
1251 
1252 #ifdef CONFIG_SMP
1253         /*
1254          * Idle @worker is about to execute @work and waking up provides an
1255          * opportunity to migrate @worker at a lower cost by setting the task's
1256          * wake_cpu field. Let's see if we want to move @worker to improve
1257          * execution locality.
1258          *
1259          * We're waking the worker that went idle the latest and there's some
1260          * chance that @worker is marked idle but hasn't gone off CPU yet. If
1261          * so, setting the wake_cpu won't do anything. As this is a best-effort
1262          * optimization and the race window is narrow, let's leave as-is for
1263          * now. If this becomes pronounced, we can skip over workers which are
1264          * still on cpu when picking an idle worker.
1265          *
1266          * If @pool has non-strict affinity, @worker might have ended up outside
1267          * its affinity scope. Repatriate.
1268          */
1269         if (!pool->attrs->affn_strict &&
1270             !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
1271                 struct work_struct *work = list_first_entry(&pool->worklist,
1272                                                 struct work_struct, entry);
1273                 int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask,
1274                                                           cpu_online_mask);
1275                 if (wake_cpu < nr_cpu_ids) {
1276                         p->wake_cpu = wake_cpu;
1277                         get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
1278                 }
1279         }
1280 #endif
1281         wake_up_process(p);
1282         return true;
1283 }
1284 
1285 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
1286 
1287 /*
1288  * Concurrency-managed per-cpu work items that hog CPU for longer than
1289  * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism,
1290  * which prevents them from stalling other concurrency-managed work items. If a
1291  * work function keeps triggering this mechanism, it's likely that the work item
1292  * should be using an unbound workqueue instead.
1293  *
1294  * wq_cpu_intensive_report() tracks work functions which trigger such conditions
1295  * and report them so that they can be examined and converted to use unbound
1296  * workqueues as appropriate. To avoid flooding the console, each violating work
1297  * function is tracked and reported with exponential backoff.
1298  */
1299 #define WCI_MAX_ENTS 128
1300 
1301 struct wci_ent {
1302         work_func_t             func;
1303         atomic64_t              cnt;
1304         struct hlist_node       hash_node;
1305 };
1306 
1307 static struct wci_ent wci_ents[WCI_MAX_ENTS];
1308 static int wci_nr_ents;
1309 static DEFINE_RAW_SPINLOCK(wci_lock);
1310 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS));
1311 
1312 static struct wci_ent *wci_find_ent(work_func_t func)
1313 {
1314         struct wci_ent *ent;
1315 
1316         hash_for_each_possible_rcu(wci_hash, ent, hash_node,
1317                                    (unsigned long)func) {
1318                 if (ent->func == func)
1319                         return ent;
1320         }
1321         return NULL;
1322 }
1323 
1324 static void wq_cpu_intensive_report(work_func_t func)
1325 {
1326         struct wci_ent *ent;
1327 
1328 restart:
1329         ent = wci_find_ent(func);
1330         if (ent) {
1331                 u64 cnt;
1332 
1333                 /*
1334                  * Start reporting from the warning_thresh and back off
1335                  * exponentially.
1336                  */
1337                 cnt = atomic64_inc_return_relaxed(&ent->cnt);
1338                 if (wq_cpu_intensive_warning_thresh &&
1339                     cnt >= wq_cpu_intensive_warning_thresh &&
1340                     is_power_of_2(cnt + 1 - wq_cpu_intensive_warning_thresh))
1341                         printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n",
1342                                         ent->func, wq_cpu_intensive_thresh_us,
1343                                         atomic64_read(&ent->cnt));
1344                 return;
1345         }
1346 
1347         /*
1348          * @func is a new violation. Allocate a new entry for it. If wcn_ents[]
1349          * is exhausted, something went really wrong and we probably made enough
1350          * noise already.
1351          */
1352         if (wci_nr_ents >= WCI_MAX_ENTS)
1353                 return;
1354 
1355         raw_spin_lock(&wci_lock);
1356 
1357         if (wci_nr_ents >= WCI_MAX_ENTS) {
1358                 raw_spin_unlock(&wci_lock);
1359                 return;
1360         }
1361 
1362         if (wci_find_ent(func)) {
1363                 raw_spin_unlock(&wci_lock);
1364                 goto restart;
1365         }
1366 
1367         ent = &wci_ents[wci_nr_ents++];
1368         ent->func = func;
1369         atomic64_set(&ent->cnt, 0);
1370         hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func);
1371 
1372         raw_spin_unlock(&wci_lock);
1373 
1374         goto restart;
1375 }
1376 
1377 #else   /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
1378 static void wq_cpu_intensive_report(work_func_t func) {}
1379 #endif  /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
1380 
1381 /**
1382  * wq_worker_running - a worker is running again
1383  * @task: task waking up
1384  *
1385  * This function is called when a worker returns from schedule()
1386  */
1387 void wq_worker_running(struct task_struct *task)
1388 {
1389         struct worker *worker = kthread_data(task);
1390 
1391         if (!READ_ONCE(worker->sleeping))
1392                 return;
1393 
1394         /*
1395          * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
1396          * and the nr_running increment below, we may ruin the nr_running reset
1397          * and leave with an unexpected pool->nr_running == 1 on the newly unbound
1398          * pool. Protect against such race.
1399          */
1400         preempt_disable();
1401         if (!(worker->flags & WORKER_NOT_RUNNING))
1402                 worker->pool->nr_running++;
1403         preempt_enable();
1404 
1405         /*
1406          * CPU intensive auto-detection cares about how long a work item hogged
1407          * CPU without sleeping. Reset the starting timestamp on wakeup.
1408          */
1409         worker->current_at = worker->task->se.sum_exec_runtime;
1410 
1411         WRITE_ONCE(worker->sleeping, 0);
1412 }
1413 
1414 /**
1415  * wq_worker_sleeping - a worker is going to sleep
1416  * @task: task going to sleep
1417  *
1418  * This function is called from schedule() when a busy worker is
1419  * going to sleep.
1420  */
1421 void wq_worker_sleeping(struct task_struct *task)
1422 {
1423         struct worker *worker = kthread_data(task);
1424         struct worker_pool *pool;
1425 
1426         /*
1427          * Rescuers, which may not have all the fields set up like normal
1428          * workers, also reach here, let's not access anything before
1429          * checking NOT_RUNNING.
1430          */
1431         if (worker->flags & WORKER_NOT_RUNNING)
1432                 return;
1433 
1434         pool = worker->pool;
1435 
1436         /* Return if preempted before wq_worker_running() was reached */
1437         if (READ_ONCE(worker->sleeping))
1438                 return;
1439 
1440         WRITE_ONCE(worker->sleeping, 1);
1441         raw_spin_lock_irq(&pool->lock);
1442 
1443         /*
1444          * Recheck in case unbind_workers() preempted us. We don't
1445          * want to decrement nr_running after the worker is unbound
1446          * and nr_running has been reset.
1447          */
1448         if (worker->flags & WORKER_NOT_RUNNING) {
1449                 raw_spin_unlock_irq(&pool->lock);
1450                 return;
1451         }
1452 
1453         pool->nr_running--;
1454         if (kick_pool(pool))
1455                 worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
1456 
1457         raw_spin_unlock_irq(&pool->lock);
1458 }
1459 
1460 /**
1461  * wq_worker_tick - a scheduler tick occurred while a kworker is running
1462  * @task: task currently running
1463  *
1464  * Called from sched_tick(). We're in the IRQ context and the current
1465  * worker's fields which follow the 'K' locking rule can be accessed safely.
1466  */
1467 void wq_worker_tick(struct task_struct *task)
1468 {
1469         struct worker *worker = kthread_data(task);
1470         struct pool_workqueue *pwq = worker->current_pwq;
1471         struct worker_pool *pool = worker->pool;
1472 
1473         if (!pwq)
1474                 return;
1475 
1476         pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC;
1477 
1478         if (!wq_cpu_intensive_thresh_us)
1479                 return;
1480 
1481         /*
1482          * If the current worker is concurrency managed and hogged the CPU for
1483          * longer than wq_cpu_intensive_thresh_us, it's automatically marked
1484          * CPU_INTENSIVE to avoid stalling other concurrency-managed work items.
1485          *
1486          * Set @worker->sleeping means that @worker is in the process of
1487          * switching out voluntarily and won't be contributing to
1488          * @pool->nr_running until it wakes up. As wq_worker_sleeping() also
1489          * decrements ->nr_running, setting CPU_INTENSIVE here can lead to
1490          * double decrements. The task is releasing the CPU anyway. Let's skip.
1491          * We probably want to make this prettier in the future.
1492          */
1493         if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
1494             worker->task->se.sum_exec_runtime - worker->current_at <
1495             wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
1496                 return;
1497 
1498         raw_spin_lock(&pool->lock);
1499 
1500         worker_set_flags(worker, WORKER_CPU_INTENSIVE);
1501         wq_cpu_intensive_report(worker->current_func);
1502         pwq->stats[PWQ_STAT_CPU_INTENSIVE]++;
1503 
1504         if (kick_pool(pool))
1505                 pwq->stats[PWQ_STAT_CM_WAKEUP]++;
1506 
1507         raw_spin_unlock(&pool->lock);
1508 }
1509 
1510 /**
1511  * wq_worker_last_func - retrieve worker's last work function
1512  * @task: Task to retrieve last work function of.
1513  *
1514  * Determine the last function a worker executed. This is called from
1515  * the scheduler to get a worker's last known identity.
1516  *
1517  * CONTEXT:
1518  * raw_spin_lock_irq(rq->lock)
1519  *
1520  * This function is called during schedule() when a kworker is going
1521  * to sleep. It's used by psi to identify aggregation workers during
1522  * dequeuing, to allow periodic aggregation to shut-off when that
1523  * worker is the last task in the system or cgroup to go to sleep.
1524  *
1525  * As this function doesn't involve any workqueue-related locking, it
1526  * only returns stable values when called from inside the scheduler's
1527  * queuing and dequeuing paths, when @task, which must be a kworker,
1528  * is guaranteed to not be processing any works.
1529  *
1530  * Return:
1531  * The last work function %current executed as a worker, NULL if it
1532  * hasn't executed any work yet.
1533  */
1534 work_func_t wq_worker_last_func(struct task_struct *task)
1535 {
1536         struct worker *worker = kthread_data(task);
1537 
1538         return worker->last_func;
1539 }
1540 
1541 /**
1542  * wq_node_nr_active - Determine wq_node_nr_active to use
1543  * @wq: workqueue of interest
1544  * @node: NUMA node, can be %NUMA_NO_NODE
1545  *
1546  * Determine wq_node_nr_active to use for @wq on @node. Returns:
1547  *
1548  * - %NULL for per-cpu workqueues as they don't need to use shared nr_active.
1549  *
1550  * - node_nr_active[nr_node_ids] if @node is %NUMA_NO_NODE.
1551  *
1552  * - Otherwise, node_nr_active[@node].
1553  */
1554 static struct wq_node_nr_active *wq_node_nr_active(struct workqueue_struct *wq,
1555                                                    int node)
1556 {
1557         if (!(wq->flags & WQ_UNBOUND))
1558                 return NULL;
1559 
1560         if (node == NUMA_NO_NODE)
1561                 node = nr_node_ids;
1562 
1563         return wq->node_nr_active[node];
1564 }
1565 
1566 /**
1567  * wq_update_node_max_active - Update per-node max_actives to use
1568  * @wq: workqueue to update
1569  * @off_cpu: CPU that's going down, -1 if a CPU is not going down
1570  *
1571  * Update @wq->node_nr_active[]->max. @wq must be unbound. max_active is
1572  * distributed among nodes according to the proportions of numbers of online
1573  * cpus. The result is always between @wq->min_active and max_active.
1574  */
1575 static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
1576 {
1577         struct cpumask *effective = unbound_effective_cpumask(wq);
1578         int min_active = READ_ONCE(wq->min_active);
1579         int max_active = READ_ONCE(wq->max_active);
1580         int total_cpus, node;
1581 
1582         lockdep_assert_held(&wq->mutex);
1583 
1584         if (!wq_topo_initialized)
1585                 return;
1586 
1587         if (off_cpu >= 0 && !cpumask_test_cpu(off_cpu, effective))
1588                 off_cpu = -1;
1589 
1590         total_cpus = cpumask_weight_and(effective, cpu_online_mask);
1591         if (off_cpu >= 0)
1592                 total_cpus--;
1593 
1594         /* If all CPUs of the wq get offline, use the default values */
1595         if (unlikely(!total_cpus)) {
1596                 for_each_node(node)
1597                         wq_node_nr_active(wq, node)->max = min_active;
1598 
1599                 wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active;
1600                 return;
1601         }
1602 
1603         for_each_node(node) {
1604                 int node_cpus;
1605 
1606                 node_cpus = cpumask_weight_and(effective, cpumask_of_node(node));
1607                 if (off_cpu >= 0 && cpu_to_node(off_cpu) == node)
1608                         node_cpus--;
1609 
1610                 wq_node_nr_active(wq, node)->max =
1611                         clamp(DIV_ROUND_UP(max_active * node_cpus, total_cpus),
1612                               min_active, max_active);
1613         }
1614 
1615         wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active;
1616 }
1617 
1618 /**
1619  * get_pwq - get an extra reference on the specified pool_workqueue
1620  * @pwq: pool_workqueue to get
1621  *
1622  * Obtain an extra reference on @pwq.  The caller should guarantee that
1623  * @pwq has positive refcnt and be holding the matching pool->lock.
1624  */
1625 static void get_pwq(struct pool_workqueue *pwq)
1626 {
1627         lockdep_assert_held(&pwq->pool->lock);
1628         WARN_ON_ONCE(pwq->refcnt <= 0);
1629         pwq->refcnt++;
1630 }
1631 
1632 /**
1633  * put_pwq - put a pool_workqueue reference
1634  * @pwq: pool_workqueue to put
1635  *
1636  * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1637  * destruction.  The caller should be holding the matching pool->lock.
1638  */
1639 static void put_pwq(struct pool_workqueue *pwq)
1640 {
1641         lockdep_assert_held(&pwq->pool->lock);
1642         if (likely(--pwq->refcnt))
1643                 return;
1644         /*
1645          * @pwq can't be released under pool->lock, bounce to a dedicated
1646          * kthread_worker to avoid A-A deadlocks.
1647          */
1648         kthread_queue_work(pwq_release_worker, &pwq->release_work);
1649 }
1650 
1651 /**
1652  * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1653  * @pwq: pool_workqueue to put (can be %NULL)
1654  *
1655  * put_pwq() with locking.  This function also allows %NULL @pwq.
1656  */
1657 static void put_pwq_unlocked(struct pool_workqueue *pwq)
1658 {
1659         if (pwq) {
1660                 /*
1661                  * As both pwqs and pools are RCU protected, the
1662                  * following lock operations are safe.
1663                  */
1664                 raw_spin_lock_irq(&pwq->pool->lock);
1665                 put_pwq(pwq);
1666                 raw_spin_unlock_irq(&pwq->pool->lock);
1667         }
1668 }
1669 
1670 static bool pwq_is_empty(struct pool_workqueue *pwq)
1671 {
1672         return !pwq->nr_active && list_empty(&pwq->inactive_works);
1673 }
1674 
1675 static void __pwq_activate_work(struct pool_workqueue *pwq,
1676                                 struct work_struct *work)
1677 {
1678         unsigned long *wdb = work_data_bits(work);
1679 
1680         WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE));
1681         trace_workqueue_activate_work(work);
1682         if (list_empty(&pwq->pool->worklist))
1683                 pwq->pool->watchdog_ts = jiffies;
1684         move_linked_works(work, &pwq->pool->worklist, NULL);
1685         __clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb);
1686 }
1687 
1688 static bool tryinc_node_nr_active(struct wq_node_nr_active *nna)
1689 {
1690         int max = READ_ONCE(nna->max);
1691 
1692         while (true) {
1693                 int old, tmp;
1694 
1695                 old = atomic_read(&nna->nr);
1696                 if (old >= max)
1697                         return false;
1698                 tmp = atomic_cmpxchg_relaxed(&nna->nr, old, old + 1);
1699                 if (tmp == old)
1700                         return true;
1701         }
1702 }
1703 
1704 /**
1705  * pwq_tryinc_nr_active - Try to increment nr_active for a pwq
1706  * @pwq: pool_workqueue of interest
1707  * @fill: max_active may have increased, try to increase concurrency level
1708  *
1709  * Try to increment nr_active for @pwq. Returns %true if an nr_active count is
1710  * successfully obtained. %false otherwise.
1711  */
1712 static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill)
1713 {
1714         struct workqueue_struct *wq = pwq->wq;
1715         struct worker_pool *pool = pwq->pool;
1716         struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node);
1717         bool obtained = false;
1718 
1719         lockdep_assert_held(&pool->lock);
1720 
1721         if (!nna) {
1722                 /* BH or per-cpu workqueue, pwq->nr_active is sufficient */
1723                 obtained = pwq->nr_active < READ_ONCE(wq->max_active);
1724                 goto out;
1725         }
1726 
1727         if (unlikely(pwq->plugged))
1728                 return false;
1729 
1730         /*
1731          * Unbound workqueue uses per-node shared nr_active $nna. If @pwq is
1732          * already waiting on $nna, pwq_dec_nr_active() will maintain the
1733          * concurrency level. Don't jump the line.
1734          *
1735          * We need to ignore the pending test after max_active has increased as
1736          * pwq_dec_nr_active() can only maintain the concurrency level but not
1737          * increase it. This is indicated by @fill.
1738          */
1739         if (!list_empty(&pwq->pending_node) && likely(!fill))
1740                 goto out;
1741 
1742         obtained = tryinc_node_nr_active(nna);
1743         if (obtained)
1744                 goto out;
1745 
1746         /*
1747          * Lockless acquisition failed. Lock, add ourself to $nna->pending_pwqs
1748          * and try again. The smp_mb() is paired with the implied memory barrier
1749          * of atomic_dec_return() in pwq_dec_nr_active() to ensure that either
1750          * we see the decremented $nna->nr or they see non-empty
1751          * $nna->pending_pwqs.
1752          */
1753         raw_spin_lock(&nna->lock);
1754 
1755         if (list_empty(&pwq->pending_node))
1756                 list_add_tail(&pwq->pending_node, &nna->pending_pwqs);
1757         else if (likely(!fill))
1758                 goto out_unlock;
1759 
1760         smp_mb();
1761 
1762         obtained = tryinc_node_nr_active(nna);
1763 
1764         /*
1765          * If @fill, @pwq might have already been pending. Being spuriously
1766          * pending in cold paths doesn't affect anything. Let's leave it be.
1767          */
1768         if (obtained && likely(!fill))
1769                 list_del_init(&pwq->pending_node);
1770 
1771 out_unlock:
1772         raw_spin_unlock(&nna->lock);
1773 out:
1774         if (obtained)
1775                 pwq->nr_active++;
1776         return obtained;
1777 }
1778 
1779 /**
1780  * pwq_activate_first_inactive - Activate the first inactive work item on a pwq
1781  * @pwq: pool_workqueue of interest
1782  * @fill: max_active may have increased, try to increase concurrency level
1783  *
1784  * Activate the first inactive work item of @pwq if available and allowed by
1785  * max_active limit.
1786  *
1787  * Returns %true if an inactive work item has been activated. %false if no
1788  * inactive work item is found or max_active limit is reached.
1789  */
1790 static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill)
1791 {
1792         struct work_struct *work =
1793                 list_first_entry_or_null(&pwq->inactive_works,
1794                                          struct work_struct, entry);
1795 
1796         if (work && pwq_tryinc_nr_active(pwq, fill)) {
1797                 __pwq_activate_work(pwq, work);
1798                 return true;
1799         } else {
1800                 return false;
1801         }
1802 }
1803 
1804 /**
1805  * unplug_oldest_pwq - unplug the oldest pool_workqueue
1806  * @wq: workqueue_struct where its oldest pwq is to be unplugged
1807  *
1808  * This function should only be called for ordered workqueues where only the
1809  * oldest pwq is unplugged, the others are plugged to suspend execution to
1810  * ensure proper work item ordering::
1811  *
1812  *    dfl_pwq --------------+     [P] - plugged
1813  *                          |
1814  *                          v
1815  *    pwqs -> A -> B [P] -> C [P] (newest)
1816  *            |    |        |
1817  *            1    3        5
1818  *            |    |        |
1819  *            2    4        6
1820  *
1821  * When the oldest pwq is drained and removed, this function should be called
1822  * to unplug the next oldest one to start its work item execution. Note that
1823  * pwq's are linked into wq->pwqs with the oldest first, so the first one in
1824  * the list is the oldest.
1825  */
1826 static void unplug_oldest_pwq(struct workqueue_struct *wq)
1827 {
1828         struct pool_workqueue *pwq;
1829 
1830         lockdep_assert_held(&wq->mutex);
1831 
1832         /* Caller should make sure that pwqs isn't empty before calling */
1833         pwq = list_first_entry_or_null(&wq->pwqs, struct pool_workqueue,
1834                                        pwqs_node);
1835         raw_spin_lock_irq(&pwq->pool->lock);
1836         if (pwq->plugged) {
1837                 pwq->plugged = false;
1838                 if (pwq_activate_first_inactive(pwq, true))
1839                         kick_pool(pwq->pool);
1840         }
1841         raw_spin_unlock_irq(&pwq->pool->lock);
1842 }
1843 
1844 /**
1845  * node_activate_pending_pwq - Activate a pending pwq on a wq_node_nr_active
1846  * @nna: wq_node_nr_active to activate a pending pwq for
1847  * @caller_pool: worker_pool the caller is locking
1848  *
1849  * Activate a pwq in @nna->pending_pwqs. Called with @caller_pool locked.
1850  * @caller_pool may be unlocked and relocked to lock other worker_pools.
1851  */
1852 static void node_activate_pending_pwq(struct wq_node_nr_active *nna,
1853                                       struct worker_pool *caller_pool)
1854 {
1855         struct worker_pool *locked_pool = caller_pool;
1856         struct pool_workqueue *pwq;
1857         struct work_struct *work;
1858 
1859         lockdep_assert_held(&caller_pool->lock);
1860 
1861         raw_spin_lock(&nna->lock);
1862 retry:
1863         pwq = list_first_entry_or_null(&nna->pending_pwqs,
1864                                        struct pool_workqueue, pending_node);
1865         if (!pwq)
1866                 goto out_unlock;
1867 
1868         /*
1869          * If @pwq is for a different pool than @locked_pool, we need to lock
1870          * @pwq->pool->lock. Let's trylock first. If unsuccessful, do the unlock
1871          * / lock dance. For that, we also need to release @nna->lock as it's
1872          * nested inside pool locks.
1873          */
1874         if (pwq->pool != locked_pool) {
1875                 raw_spin_unlock(&locked_pool->lock);
1876                 locked_pool = pwq->pool;
1877                 if (!raw_spin_trylock(&locked_pool->lock)) {
1878                         raw_spin_unlock(&nna->lock);
1879                         raw_spin_lock(&locked_pool->lock);
1880                         raw_spin_lock(&nna->lock);
1881                         goto retry;
1882                 }
1883         }
1884 
1885         /*
1886          * $pwq may not have any inactive work items due to e.g. cancellations.
1887          * Drop it from pending_pwqs and see if there's another one.
1888          */
1889         work = list_first_entry_or_null(&pwq->inactive_works,
1890                                         struct work_struct, entry);
1891         if (!work) {
1892                 list_del_init(&pwq->pending_node);
1893                 goto retry;
1894         }
1895 
1896         /*
1897          * Acquire an nr_active count and activate the inactive work item. If
1898          * $pwq still has inactive work items, rotate it to the end of the
1899          * pending_pwqs so that we round-robin through them. This means that
1900          * inactive work items are not activated in queueing order which is fine
1901          * given that there has never been any ordering across different pwqs.
1902          */
1903         if (likely(tryinc_node_nr_active(nna))) {
1904                 pwq->nr_active++;
1905                 __pwq_activate_work(pwq, work);
1906 
1907                 if (list_empty(&pwq->inactive_works))
1908                         list_del_init(&pwq->pending_node);
1909                 else
1910                         list_move_tail(&pwq->pending_node, &nna->pending_pwqs);
1911 
1912                 /* if activating a foreign pool, make sure it's running */
1913                 if (pwq->pool != caller_pool)
1914                         kick_pool(pwq->pool);
1915         }
1916 
1917 out_unlock:
1918         raw_spin_unlock(&nna->lock);
1919         if (locked_pool != caller_pool) {
1920                 raw_spin_unlock(&locked_pool->lock);
1921                 raw_spin_lock(&caller_pool->lock);
1922         }
1923 }
1924 
1925 /**
1926  * pwq_dec_nr_active - Retire an active count
1927  * @pwq: pool_workqueue of interest
1928  *
1929  * Decrement @pwq's nr_active and try to activate the first inactive work item.
1930  * For unbound workqueues, this function may temporarily drop @pwq->pool->lock.
1931  */
1932 static void pwq_dec_nr_active(struct pool_workqueue *pwq)
1933 {
1934         struct worker_pool *pool = pwq->pool;
1935         struct wq_node_nr_active *nna = wq_node_nr_active(pwq->wq, pool->node);
1936 
1937         lockdep_assert_held(&pool->lock);
1938 
1939         /*
1940          * @pwq->nr_active should be decremented for both percpu and unbound
1941          * workqueues.
1942          */
1943         pwq->nr_active--;
1944 
1945         /*
1946          * For a percpu workqueue, it's simple. Just need to kick the first
1947          * inactive work item on @pwq itself.
1948          */
1949         if (!nna) {
1950                 pwq_activate_first_inactive(pwq, false);
1951                 return;
1952         }
1953 
1954         /*
1955          * If @pwq is for an unbound workqueue, it's more complicated because
1956          * multiple pwqs and pools may be sharing the nr_active count. When a
1957          * pwq needs to wait for an nr_active count, it puts itself on
1958          * $nna->pending_pwqs. The following atomic_dec_return()'s implied
1959          * memory barrier is paired with smp_mb() in pwq_tryinc_nr_active() to
1960          * guarantee that either we see non-empty pending_pwqs or they see
1961          * decremented $nna->nr.
1962          *
1963          * $nna->max may change as CPUs come online/offline and @pwq->wq's
1964          * max_active gets updated. However, it is guaranteed to be equal to or
1965          * larger than @pwq->wq->min_active which is above zero unless freezing.
1966          * This maintains the forward progress guarantee.
1967          */
1968         if (atomic_dec_return(&nna->nr) >= READ_ONCE(nna->max))
1969                 return;
1970 
1971         if (!list_empty(&nna->pending_pwqs))
1972                 node_activate_pending_pwq(nna, pool);
1973 }
1974 
1975 /**
1976  * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1977  * @pwq: pwq of interest
1978  * @work_data: work_data of work which left the queue
1979  *
1980  * A work either has completed or is removed from pending queue,
1981  * decrement nr_in_flight of its pwq and handle workqueue flushing.
1982  *
1983  * NOTE:
1984  * For unbound workqueues, this function may temporarily drop @pwq->pool->lock
1985  * and thus should be called after all other state updates for the in-flight
1986  * work item is complete.
1987  *
1988  * CONTEXT:
1989  * raw_spin_lock_irq(pool->lock).
1990  */
1991 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
1992 {
1993         int color = get_work_color(work_data);
1994 
1995         if (!(work_data & WORK_STRUCT_INACTIVE))
1996                 pwq_dec_nr_active(pwq);
1997 
1998         pwq->nr_in_flight[color]--;
1999 
2000         /* is flush in progress and are we at the flushing tip? */
2001         if (likely(pwq->flush_color != color))
2002                 goto out_put;
2003 
2004         /* are there still in-flight works? */
2005         if (pwq->nr_in_flight[color])
2006                 goto out_put;
2007 
2008         /* this pwq is done, clear flush_color */
2009         pwq->flush_color = -1;
2010 
2011         /*
2012          * If this was the last pwq, wake up the first flusher.  It
2013          * will handle the rest.
2014          */
2015         if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
2016                 complete(&pwq->wq->first_flusher->done);
2017 out_put:
2018         put_pwq(pwq);
2019 }
2020 
2021 /**
2022  * try_to_grab_pending - steal work item from worklist and disable irq
2023  * @work: work item to steal
2024  * @cflags: %WORK_CANCEL_ flags
2025  * @irq_flags: place to store irq state
2026  *
2027  * Try to grab PENDING bit of @work.  This function can handle @work in any
2028  * stable state - idle, on timer or on worklist.
2029  *
2030  * Return:
2031  *
2032  *  ========    ================================================================
2033  *  1           if @work was pending and we successfully stole PENDING
2034  *  0           if @work was idle and we claimed PENDING
2035  *  -EAGAIN     if PENDING couldn't be grabbed at the moment, safe to busy-retry
2036  *  ========    ================================================================
2037  *
2038  * Note:
2039  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
2040  * interrupted while holding PENDING and @work off queue, irq must be
2041  * disabled on entry.  This, combined with delayed_work->timer being
2042  * irqsafe, ensures that we return -EAGAIN for finite short period of time.
2043  *
2044  * On successful return, >= 0, irq is disabled and the caller is
2045  * responsible for releasing it using local_irq_restore(*@irq_flags).
2046  *
2047  * This function is safe to call from any context including IRQ handler.
2048  */
2049 static int try_to_grab_pending(struct work_struct *work, u32 cflags,
2050                                unsigned long *irq_flags)
2051 {
2052         struct worker_pool *pool;
2053         struct pool_workqueue *pwq;
2054 
2055         local_irq_save(*irq_flags);
2056 
2057         /* try to steal the timer if it exists */
2058         if (cflags & WORK_CANCEL_DELAYED) {
2059                 struct delayed_work *dwork = to_delayed_work(work);
2060 
2061                 /*
2062                  * dwork->timer is irqsafe.  If del_timer() fails, it's
2063                  * guaranteed that the timer is not queued anywhere and not
2064                  * running on the local CPU.
2065                  */
2066                 if (likely(del_timer(&dwork->timer)))
2067                         return 1;
2068         }
2069 
2070         /* try to claim PENDING the normal way */
2071         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2072                 return 0;
2073 
2074         rcu_read_lock();
2075         /*
2076          * The queueing is in progress, or it is already queued. Try to
2077          * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2078          */
2079         pool = get_work_pool(work);
2080         if (!pool)
2081                 goto fail;
2082 
2083         raw_spin_lock(&pool->lock);
2084         /*
2085          * work->data is guaranteed to point to pwq only while the work
2086          * item is queued on pwq->wq, and both updating work->data to point
2087          * to pwq on queueing and to pool on dequeueing are done under
2088          * pwq->pool->lock.  This in turn guarantees that, if work->data
2089          * points to pwq which is associated with a locked pool, the work
2090          * item is currently queued on that pool.
2091          */
2092         pwq = get_work_pwq(work);
2093         if (pwq && pwq->pool == pool) {
2094                 unsigned long work_data = *work_data_bits(work);
2095 
2096                 debug_work_deactivate(work);
2097 
2098                 /*
2099                  * A cancelable inactive work item must be in the
2100                  * pwq->inactive_works since a queued barrier can't be
2101                  * canceled (see the comments in insert_wq_barrier()).
2102                  *
2103                  * An inactive work item cannot be deleted directly because
2104                  * it might have linked barrier work items which, if left
2105                  * on the inactive_works list, will confuse pwq->nr_active
2106                  * management later on and cause stall.  Move the linked
2107                  * barrier work items to the worklist when deleting the grabbed
2108                  * item. Also keep WORK_STRUCT_INACTIVE in work_data, so that
2109                  * it doesn't participate in nr_active management in later
2110                  * pwq_dec_nr_in_flight().
2111                  */
2112                 if (work_data & WORK_STRUCT_INACTIVE)
2113                         move_linked_works(work, &pwq->pool->worklist, NULL);
2114 
2115                 list_del_init(&work->entry);
2116 
2117                 /*
2118                  * work->data points to pwq iff queued. Let's point to pool. As
2119                  * this destroys work->data needed by the next step, stash it.
2120                  */
2121                 set_work_pool_and_keep_pending(work, pool->id,
2122                                                pool_offq_flags(pool));
2123 
2124                 /* must be the last step, see the function comment */
2125                 pwq_dec_nr_in_flight(pwq, work_data);
2126 
2127                 raw_spin_unlock(&pool->lock);
2128                 rcu_read_unlock();
2129                 return 1;
2130         }
2131         raw_spin_unlock(&pool->lock);
2132 fail:
2133         rcu_read_unlock();
2134         local_irq_restore(*irq_flags);
2135         return -EAGAIN;
2136 }
2137 
2138 /**
2139  * work_grab_pending - steal work item from worklist and disable irq
2140  * @work: work item to steal
2141  * @cflags: %WORK_CANCEL_ flags
2142  * @irq_flags: place to store IRQ state
2143  *
2144  * Grab PENDING bit of @work. @work can be in any stable state - idle, on timer
2145  * or on worklist.
2146  *
2147  * Can be called from any context. IRQ is disabled on return with IRQ state
2148  * stored in *@irq_flags. The caller is responsible for re-enabling it using
2149  * local_irq_restore().
2150  *
2151  * Returns %true if @work was pending. %false if idle.
2152  */
2153 static bool work_grab_pending(struct work_struct *work, u32 cflags,
2154                               unsigned long *irq_flags)
2155 {
2156         int ret;
2157 
2158         while (true) {
2159                 ret = try_to_grab_pending(work, cflags, irq_flags);
2160                 if (ret >= 0)
2161                         return ret;
2162                 cpu_relax();
2163         }
2164 }
2165 
2166 /**
2167  * insert_work - insert a work into a pool
2168  * @pwq: pwq @work belongs to
2169  * @work: work to insert
2170  * @head: insertion point
2171  * @extra_flags: extra WORK_STRUCT_* flags to set
2172  *
2173  * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
2174  * work_struct flags.
2175  *
2176  * CONTEXT:
2177  * raw_spin_lock_irq(pool->lock).
2178  */
2179 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
2180                         struct list_head *head, unsigned int extra_flags)
2181 {
2182         debug_work_activate(work);
2183 
2184         /* record the work call stack in order to print it in KASAN reports */
2185         kasan_record_aux_stack_noalloc(work);
2186 
2187         /* we own @work, set data and link */
2188         set_work_pwq(work, pwq, extra_flags);
2189         list_add_tail(&work->entry, head);
2190         get_pwq(pwq);
2191 }
2192 
2193 /*
2194  * Test whether @work is being queued from another work executing on the
2195  * same workqueue.
2196  */
2197 static bool is_chained_work(struct workqueue_struct *wq)
2198 {
2199         struct worker *worker;
2200 
2201         worker = current_wq_worker();
2202         /*
2203          * Return %true iff I'm a worker executing a work item on @wq.  If
2204          * I'm @worker, it's safe to dereference it without locking.
2205          */
2206         return worker && worker->current_pwq->wq == wq;
2207 }
2208 
2209 /*
2210  * When queueing an unbound work item to a wq, prefer local CPU if allowed
2211  * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
2212  * avoid perturbing sensitive tasks.
2213  */
2214 static int wq_select_unbound_cpu(int cpu)
2215 {
2216         int new_cpu;
2217 
2218         if (likely(!wq_debug_force_rr_cpu)) {
2219                 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
2220                         return cpu;
2221         } else {
2222                 pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
2223         }
2224 
2225         new_cpu = __this_cpu_read(wq_rr_cpu_last);
2226         new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
2227         if (unlikely(new_cpu >= nr_cpu_ids)) {
2228                 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
2229                 if (unlikely(new_cpu >= nr_cpu_ids))
2230                         return cpu;
2231         }
2232         __this_cpu_write(wq_rr_cpu_last, new_cpu);
2233 
2234         return new_cpu;
2235 }
2236 
2237 static void __queue_work(int cpu, struct workqueue_struct *wq,
2238                          struct work_struct *work)
2239 {
2240         struct pool_workqueue *pwq;
2241         struct worker_pool *last_pool, *pool;
2242         unsigned int work_flags;
2243         unsigned int req_cpu = cpu;
2244 
2245         /*
2246          * While a work item is PENDING && off queue, a task trying to
2247          * steal the PENDING will busy-loop waiting for it to either get
2248          * queued or lose PENDING.  Grabbing PENDING and queueing should
2249          * happen with IRQ disabled.
2250          */
2251         lockdep_assert_irqs_disabled();
2252 
2253         /*
2254          * For a draining wq, only works from the same workqueue are
2255          * allowed. The __WQ_DESTROYING helps to spot the issue that
2256          * queues a new work item to a wq after destroy_workqueue(wq).
2257          */
2258         if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
2259                      WARN_ON_ONCE(!is_chained_work(wq))))
2260                 return;
2261         rcu_read_lock();
2262 retry:
2263         /* pwq which will be used unless @work is executing elsewhere */
2264         if (req_cpu == WORK_CPU_UNBOUND) {
2265                 if (wq->flags & WQ_UNBOUND)
2266                         cpu = wq_select_unbound_cpu(raw_smp_processor_id());
2267                 else
2268                         cpu = raw_smp_processor_id();
2269         }
2270 
2271         pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu));
2272         pool = pwq->pool;
2273 
2274         /*
2275          * If @work was previously on a different pool, it might still be
2276          * running there, in which case the work needs to be queued on that
2277          * pool to guarantee non-reentrancy.
2278          *
2279          * For ordered workqueue, work items must be queued on the newest pwq
2280          * for accurate order management.  Guaranteed order also guarantees
2281          * non-reentrancy.  See the comments above unplug_oldest_pwq().
2282          */
2283         last_pool = get_work_pool(work);
2284         if (last_pool && last_pool != pool && !(wq->flags & __WQ_ORDERED)) {
2285                 struct worker *worker;
2286 
2287                 raw_spin_lock(&last_pool->lock);
2288 
2289                 worker = find_worker_executing_work(last_pool, work);
2290 
2291                 if (worker && worker->current_pwq->wq == wq) {
2292                         pwq = worker->current_pwq;
2293                         pool = pwq->pool;
2294                         WARN_ON_ONCE(pool != last_pool);
2295                 } else {
2296                         /* meh... not running there, queue here */
2297                         raw_spin_unlock(&last_pool->lock);
2298                         raw_spin_lock(&pool->lock);
2299                 }
2300         } else {
2301                 raw_spin_lock(&pool->lock);
2302         }
2303 
2304         /*
2305          * pwq is determined and locked. For unbound pools, we could have raced
2306          * with pwq release and it could already be dead. If its refcnt is zero,
2307          * repeat pwq selection. Note that unbound pwqs never die without
2308          * another pwq replacing it in cpu_pwq or while work items are executing
2309          * on it, so the retrying is guaranteed to make forward-progress.
2310          */
2311         if (unlikely(!pwq->refcnt)) {
2312                 if (wq->flags & WQ_UNBOUND) {
2313                         raw_spin_unlock(&pool->lock);
2314                         cpu_relax();
2315                         goto retry;
2316                 }
2317                 /* oops */
2318                 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
2319                           wq->name, cpu);
2320         }
2321 
2322         /* pwq determined, queue */
2323         trace_workqueue_queue_work(req_cpu, pwq, work);
2324 
2325         if (WARN_ON(!list_empty(&work->entry)))
2326                 goto out;
2327 
2328         pwq->nr_in_flight[pwq->work_color]++;
2329         work_flags = work_color_to_flags(pwq->work_color);
2330 
2331         /*
2332          * Limit the number of concurrently active work items to max_active.
2333          * @work must also queue behind existing inactive work items to maintain
2334          * ordering when max_active changes. See wq_adjust_max_active().
2335          */
2336         if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) {
2337                 if (list_empty(&pool->worklist))
2338                         pool->watchdog_ts = jiffies;
2339 
2340                 trace_workqueue_activate_work(work);
2341                 insert_work(pwq, work, &pool->worklist, work_flags);
2342                 kick_pool(pool);
2343         } else {
2344                 work_flags |= WORK_STRUCT_INACTIVE;
2345                 insert_work(pwq, work, &pwq->inactive_works, work_flags);
2346         }
2347 
2348 out:
2349         raw_spin_unlock(&pool->lock);
2350         rcu_read_unlock();
2351 }
2352 
2353 static bool clear_pending_if_disabled(struct work_struct *work)
2354 {
2355         unsigned long data = *work_data_bits(work);
2356         struct work_offq_data offqd;
2357 
2358         if (likely((data & WORK_STRUCT_PWQ) ||
2359                    !(data & WORK_OFFQ_DISABLE_MASK)))
2360                 return false;
2361 
2362         work_offqd_unpack(&offqd, data);
2363         set_work_pool_and_clear_pending(work, offqd.pool_id,
2364                                         work_offqd_pack_flags(&offqd));
2365         return true;
2366 }
2367 
2368 /**
2369  * queue_work_on - queue work on specific cpu
2370  * @cpu: CPU number to execute work on
2371  * @wq: workqueue to use
2372  * @work: work to queue
2373  *
2374  * We queue the work to a specific CPU, the caller must ensure it
2375  * can't go away.  Callers that fail to ensure that the specified
2376  * CPU cannot go away will execute on a randomly chosen CPU.
2377  * But note well that callers specifying a CPU that never has been
2378  * online will get a splat.
2379  *
2380  * Return: %false if @work was already on a queue, %true otherwise.
2381  */
2382 bool queue_work_on(int cpu, struct workqueue_struct *wq,
2383                    struct work_struct *work)
2384 {
2385         bool ret = false;
2386         unsigned long irq_flags;
2387 
2388         local_irq_save(irq_flags);
2389 
2390         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
2391             !clear_pending_if_disabled(work)) {
2392                 __queue_work(cpu, wq, work);
2393                 ret = true;
2394         }
2395 
2396         local_irq_restore(irq_flags);
2397         return ret;
2398 }
2399 EXPORT_SYMBOL(queue_work_on);
2400 
2401 /**
2402  * select_numa_node_cpu - Select a CPU based on NUMA node
2403  * @node: NUMA node ID that we want to select a CPU from
2404  *
2405  * This function will attempt to find a "random" cpu available on a given
2406  * node. If there are no CPUs available on the given node it will return
2407  * WORK_CPU_UNBOUND indicating that we should just schedule to any
2408  * available CPU if we need to schedule this work.
2409  */
2410 static int select_numa_node_cpu(int node)
2411 {
2412         int cpu;
2413 
2414         /* Delay binding to CPU if node is not valid or online */
2415         if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
2416                 return WORK_CPU_UNBOUND;
2417 
2418         /* Use local node/cpu if we are already there */
2419         cpu = raw_smp_processor_id();
2420         if (node == cpu_to_node(cpu))
2421                 return cpu;
2422 
2423         /* Use "random" otherwise know as "first" online CPU of node */
2424         cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
2425 
2426         /* If CPU is valid return that, otherwise just defer */
2427         return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
2428 }
2429 
2430 /**
2431  * queue_work_node - queue work on a "random" cpu for a given NUMA node
2432  * @node: NUMA node that we are targeting the work for
2433  * @wq: workqueue to use
2434  * @work: work to queue
2435  *
2436  * We queue the work to a "random" CPU within a given NUMA node. The basic
2437  * idea here is to provide a way to somehow associate work with a given
2438  * NUMA node.
2439  *
2440  * This function will only make a best effort attempt at getting this onto
2441  * the right NUMA node. If no node is requested or the requested node is
2442  * offline then we just fall back to standard queue_work behavior.
2443  *
2444  * Currently the "random" CPU ends up being the first available CPU in the
2445  * intersection of cpu_online_mask and the cpumask of the node, unless we
2446  * are running on the node. In that case we just use the current CPU.
2447  *
2448  * Return: %false if @work was already on a queue, %true otherwise.
2449  */
2450 bool queue_work_node(int node, struct workqueue_struct *wq,
2451                      struct work_struct *work)
2452 {
2453         unsigned long irq_flags;
2454         bool ret = false;
2455 
2456         /*
2457          * This current implementation is specific to unbound workqueues.
2458          * Specifically we only return the first available CPU for a given
2459          * node instead of cycling through individual CPUs within the node.
2460          *
2461          * If this is used with a per-cpu workqueue then the logic in
2462          * workqueue_select_cpu_near would need to be updated to allow for
2463          * some round robin type logic.
2464          */
2465         WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
2466 
2467         local_irq_save(irq_flags);
2468 
2469         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
2470             !clear_pending_if_disabled(work)) {
2471                 int cpu = select_numa_node_cpu(node);
2472 
2473                 __queue_work(cpu, wq, work);
2474                 ret = true;
2475         }
2476 
2477         local_irq_restore(irq_flags);
2478         return ret;
2479 }
2480 EXPORT_SYMBOL_GPL(queue_work_node);
2481 
2482 void delayed_work_timer_fn(struct timer_list *t)
2483 {
2484         struct delayed_work *dwork = from_timer(dwork, t, timer);
2485 
2486         /* should have been called from irqsafe timer with irq already off */
2487         __queue_work(dwork->cpu, dwork->wq, &dwork->work);
2488 }
2489 EXPORT_SYMBOL(delayed_work_timer_fn);
2490 
2491 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
2492                                 struct delayed_work *dwork, unsigned long delay)
2493 {
2494         struct timer_list *timer = &dwork->timer;
2495         struct work_struct *work = &dwork->work;
2496 
2497         WARN_ON_ONCE(!wq);
2498         WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
2499         WARN_ON_ONCE(timer_pending(timer));
2500         WARN_ON_ONCE(!list_empty(&work->entry));
2501 
2502         /*
2503          * If @delay is 0, queue @dwork->work immediately.  This is for
2504          * both optimization and correctness.  The earliest @timer can
2505          * expire is on the closest next tick and delayed_work users depend
2506          * on that there's no such delay when @delay is 0.
2507          */
2508         if (!delay) {
2509                 __queue_work(cpu, wq, &dwork->work);
2510                 return;
2511         }
2512 
2513         dwork->wq = wq;
2514         dwork->cpu = cpu;
2515         timer->expires = jiffies + delay;
2516 
2517         if (housekeeping_enabled(HK_TYPE_TIMER)) {
2518                 /* If the current cpu is a housekeeping cpu, use it. */
2519                 cpu = smp_processor_id();
2520                 if (!housekeeping_test_cpu(cpu, HK_TYPE_TIMER))
2521                         cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
2522                 add_timer_on(timer, cpu);
2523         } else {
2524                 if (likely(cpu == WORK_CPU_UNBOUND))
2525                         add_timer_global(timer);
2526                 else
2527                         add_timer_on(timer, cpu);
2528         }
2529 }
2530 
2531 /**
2532  * queue_delayed_work_on - queue work on specific CPU after delay
2533  * @cpu: CPU number to execute work on
2534  * @wq: workqueue to use
2535  * @dwork: work to queue
2536  * @delay: number of jiffies to wait before queueing
2537  *
2538  * Return: %false if @work was already on a queue, %true otherwise.  If
2539  * @delay is zero and @dwork is idle, it will be scheduled for immediate
2540  * execution.
2541  */
2542 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
2543                            struct delayed_work *dwork, unsigned long delay)
2544 {
2545         struct work_struct *work = &dwork->work;
2546         bool ret = false;
2547         unsigned long irq_flags;
2548 
2549         /* read the comment in __queue_work() */
2550         local_irq_save(irq_flags);
2551 
2552         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
2553             !clear_pending_if_disabled(work)) {
2554                 __queue_delayed_work(cpu, wq, dwork, delay);
2555                 ret = true;
2556         }
2557 
2558         local_irq_restore(irq_flags);
2559         return ret;
2560 }
2561 EXPORT_SYMBOL(queue_delayed_work_on);
2562 
2563 /**
2564  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
2565  * @cpu: CPU number to execute work on
2566  * @wq: workqueue to use
2567  * @dwork: work to queue
2568  * @delay: number of jiffies to wait before queueing
2569  *
2570  * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
2571  * modify @dwork's timer so that it expires after @delay.  If @delay is
2572  * zero, @work is guaranteed to be scheduled immediately regardless of its
2573  * current state.
2574  *
2575  * Return: %false if @dwork was idle and queued, %true if @dwork was
2576  * pending and its timer was modified.
2577  *
2578  * This function is safe to call from any context including IRQ handler.
2579  * See try_to_grab_pending() for details.
2580  */
2581 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
2582                          struct delayed_work *dwork, unsigned long delay)
2583 {
2584         unsigned long irq_flags;
2585         bool ret;
2586 
2587         ret = work_grab_pending(&dwork->work, WORK_CANCEL_DELAYED, &irq_flags);
2588 
2589         if (!clear_pending_if_disabled(&dwork->work))
2590                 __queue_delayed_work(cpu, wq, dwork, delay);
2591 
2592         local_irq_restore(irq_flags);
2593         return ret;
2594 }
2595 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
2596 
2597 static void rcu_work_rcufn(struct rcu_head *rcu)
2598 {
2599         struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
2600 
2601         /* read the comment in __queue_work() */
2602         local_irq_disable();
2603         __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
2604         local_irq_enable();
2605 }
2606 
2607 /**
2608  * queue_rcu_work - queue work after a RCU grace period
2609  * @wq: workqueue to use
2610  * @rwork: work to queue
2611  *
2612  * Return: %false if @rwork was already pending, %true otherwise.  Note
2613  * that a full RCU grace period is guaranteed only after a %true return.
2614  * While @rwork is guaranteed to be executed after a %false return, the
2615  * execution may happen before a full RCU grace period has passed.
2616  */
2617 bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
2618 {
2619         struct work_struct *work = &rwork->work;
2620 
2621         /*
2622          * rcu_work can't be canceled or disabled. Warn if the user reached
2623          * inside @rwork and disabled the inner work.
2624          */
2625         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) &&
2626             !WARN_ON_ONCE(clear_pending_if_disabled(work))) {
2627                 rwork->wq = wq;
2628                 call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
2629                 return true;
2630         }
2631 
2632         return false;
2633 }
2634 EXPORT_SYMBOL(queue_rcu_work);
2635 
2636 static struct worker *alloc_worker(int node)
2637 {
2638         struct worker *worker;
2639 
2640         worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
2641         if (worker) {
2642                 INIT_LIST_HEAD(&worker->entry);
2643                 INIT_LIST_HEAD(&worker->scheduled);
2644                 INIT_LIST_HEAD(&worker->node);
2645                 /* on creation a worker is in !idle && prep state */
2646                 worker->flags = WORKER_PREP;
2647         }
2648         return worker;
2649 }
2650 
2651 static cpumask_t *pool_allowed_cpus(struct worker_pool *pool)
2652 {
2653         if (pool->cpu < 0 && pool->attrs->affn_strict)
2654                 return pool->attrs->__pod_cpumask;
2655         else
2656                 return pool->attrs->cpumask;
2657 }
2658 
2659 /**
2660  * worker_attach_to_pool() - attach a worker to a pool
2661  * @worker: worker to be attached
2662  * @pool: the target pool
2663  *
2664  * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
2665  * cpu-binding of @worker are kept coordinated with the pool across
2666  * cpu-[un]hotplugs.
2667  */
2668 static void worker_attach_to_pool(struct worker *worker,
2669                                   struct worker_pool *pool)
2670 {
2671         mutex_lock(&wq_pool_attach_mutex);
2672 
2673         /*
2674          * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains stable
2675          * across this function. See the comments above the flag definition for
2676          * details. BH workers are, while per-CPU, always DISASSOCIATED.
2677          */
2678         if (pool->flags & POOL_DISASSOCIATED) {
2679                 worker->flags |= WORKER_UNBOUND;
2680         } else {
2681                 WARN_ON_ONCE(pool->flags & POOL_BH);
2682                 kthread_set_per_cpu(worker->task, pool->cpu);
2683         }
2684 
2685         if (worker->rescue_wq)
2686                 set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool));
2687 
2688         list_add_tail(&worker->node, &pool->workers);
2689         worker->pool = pool;
2690 
2691         mutex_unlock(&wq_pool_attach_mutex);
2692 }
2693 
2694 static void unbind_worker(struct worker *worker)
2695 {
2696         lockdep_assert_held(&wq_pool_attach_mutex);
2697 
2698         kthread_set_per_cpu(worker->task, -1);
2699         if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
2700                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
2701         else
2702                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
2703 }
2704 
2705 
2706 static void detach_worker(struct worker *worker)
2707 {
2708         lockdep_assert_held(&wq_pool_attach_mutex);
2709 
2710         unbind_worker(worker);
2711         list_del(&worker->node);
2712         worker->pool = NULL;
2713 }
2714 
2715 /**
2716  * worker_detach_from_pool() - detach a worker from its pool
2717  * @worker: worker which is attached to its pool
2718  *
2719  * Undo the attaching which had been done in worker_attach_to_pool().  The
2720  * caller worker shouldn't access to the pool after detached except it has
2721  * other reference to the pool.
2722  */
2723 static void worker_detach_from_pool(struct worker *worker)
2724 {
2725         struct worker_pool *pool = worker->pool;
2726 
2727         /* there is one permanent BH worker per CPU which should never detach */
2728         WARN_ON_ONCE(pool->flags & POOL_BH);
2729 
2730         mutex_lock(&wq_pool_attach_mutex);
2731         detach_worker(worker);
2732         mutex_unlock(&wq_pool_attach_mutex);
2733 
2734         /* clear leftover flags without pool->lock after it is detached */
2735         worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
2736 }
2737 
2738 static int format_worker_id(char *buf, size_t size, struct worker *worker,
2739                             struct worker_pool *pool)
2740 {
2741         if (worker->rescue_wq)
2742                 return scnprintf(buf, size, "kworker/R-%s",
2743                                  worker->rescue_wq->name);
2744 
2745         if (pool) {
2746                 if (pool->cpu >= 0)
2747                         return scnprintf(buf, size, "kworker/%d:%d%s",
2748                                          pool->cpu, worker->id,
2749                                          pool->attrs->nice < 0  ? "H" : "");
2750                 else
2751                         return scnprintf(buf, size, "kworker/u%d:%d",
2752                                          pool->id, worker->id);
2753         } else {
2754                 return scnprintf(buf, size, "kworker/dying");
2755         }
2756 }
2757 
2758 /**
2759  * create_worker - create a new workqueue worker
2760  * @pool: pool the new worker will belong to
2761  *
2762  * Create and start a new worker which is attached to @pool.
2763  *
2764  * CONTEXT:
2765  * Might sleep.  Does GFP_KERNEL allocations.
2766  *
2767  * Return:
2768  * Pointer to the newly created worker.
2769  */
2770 static struct worker *create_worker(struct worker_pool *pool)
2771 {
2772         struct worker *worker;
2773         int id;
2774 
2775         /* ID is needed to determine kthread name */
2776         id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
2777         if (id < 0) {
2778                 pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n",
2779                             ERR_PTR(id));
2780                 return NULL;
2781         }
2782 
2783         worker = alloc_worker(pool->node);
2784         if (!worker) {
2785                 pr_err_once("workqueue: Failed to allocate a worker\n");
2786                 goto fail;
2787         }
2788 
2789         worker->id = id;
2790 
2791         if (!(pool->flags & POOL_BH)) {
2792                 char id_buf[WORKER_ID_LEN];
2793 
2794                 format_worker_id(id_buf, sizeof(id_buf), worker, pool);
2795                 worker->task = kthread_create_on_node(worker_thread, worker,
2796                                                       pool->node, "%s", id_buf);
2797                 if (IS_ERR(worker->task)) {
2798                         if (PTR_ERR(worker->task) == -EINTR) {
2799                                 pr_err("workqueue: Interrupted when creating a worker thread \"%s\"\n",
2800                                        id_buf);
2801                         } else {
2802                                 pr_err_once("workqueue: Failed to create a worker thread: %pe",
2803                                             worker->task);
2804                         }
2805                         goto fail;
2806                 }
2807 
2808                 set_user_nice(worker->task, pool->attrs->nice);
2809                 kthread_bind_mask(worker->task, pool_allowed_cpus(pool));
2810         }
2811 
2812         /* successful, attach the worker to the pool */
2813         worker_attach_to_pool(worker, pool);
2814 
2815         /* start the newly created worker */
2816         raw_spin_lock_irq(&pool->lock);
2817 
2818         worker->pool->nr_workers++;
2819         worker_enter_idle(worker);
2820 
2821         /*
2822          * @worker is waiting on a completion in kthread() and will trigger hung
2823          * check if not woken up soon. As kick_pool() is noop if @pool is empty,
2824          * wake it up explicitly.
2825          */
2826         if (worker->task)
2827                 wake_up_process(worker->task);
2828 
2829         raw_spin_unlock_irq(&pool->lock);
2830 
2831         return worker;
2832 
2833 fail:
2834         ida_free(&pool->worker_ida, id);
2835         kfree(worker);
2836         return NULL;
2837 }
2838 
2839 static void detach_dying_workers(struct list_head *cull_list)
2840 {
2841         struct worker *worker;
2842 
2843         list_for_each_entry(worker, cull_list, entry)
2844                 detach_worker(worker);
2845 }
2846 
2847 static void reap_dying_workers(struct list_head *cull_list)
2848 {
2849         struct worker *worker, *tmp;
2850 
2851         list_for_each_entry_safe(worker, tmp, cull_list, entry) {
2852                 list_del_init(&worker->entry);
2853                 kthread_stop_put(worker->task);
2854                 kfree(worker);
2855         }
2856 }
2857 
2858 /**
2859  * set_worker_dying - Tag a worker for destruction
2860  * @worker: worker to be destroyed
2861  * @list: transfer worker away from its pool->idle_list and into list
2862  *
2863  * Tag @worker for destruction and adjust @pool stats accordingly.  The worker
2864  * should be idle.
2865  *
2866  * CONTEXT:
2867  * raw_spin_lock_irq(pool->lock).
2868  */
2869 static void set_worker_dying(struct worker *worker, struct list_head *list)
2870 {
2871         struct worker_pool *pool = worker->pool;
2872 
2873         lockdep_assert_held(&pool->lock);
2874         lockdep_assert_held(&wq_pool_attach_mutex);
2875 
2876         /* sanity check frenzy */
2877         if (WARN_ON(worker->current_work) ||
2878             WARN_ON(!list_empty(&worker->scheduled)) ||
2879             WARN_ON(!(worker->flags & WORKER_IDLE)))
2880                 return;
2881 
2882         pool->nr_workers--;
2883         pool->nr_idle--;
2884 
2885         worker->flags |= WORKER_DIE;
2886 
2887         list_move(&worker->entry, list);
2888 
2889         /* get an extra task struct reference for later kthread_stop_put() */
2890         get_task_struct(worker->task);
2891 }
2892 
2893 /**
2894  * idle_worker_timeout - check if some idle workers can now be deleted.
2895  * @t: The pool's idle_timer that just expired
2896  *
2897  * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in
2898  * worker_leave_idle(), as a worker flicking between idle and active while its
2899  * pool is at the too_many_workers() tipping point would cause too much timer
2900  * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let
2901  * it expire and re-evaluate things from there.
2902  */
2903 static void idle_worker_timeout(struct timer_list *t)
2904 {
2905         struct worker_pool *pool = from_timer(pool, t, idle_timer);
2906         bool do_cull = false;
2907 
2908         if (work_pending(&pool->idle_cull_work))
2909                 return;
2910 
2911         raw_spin_lock_irq(&pool->lock);
2912 
2913         if (too_many_workers(pool)) {
2914                 struct worker *worker;
2915                 unsigned long expires;
2916 
2917                 /* idle_list is kept in LIFO order, check the last one */
2918                 worker = list_last_entry(&pool->idle_list, struct worker, entry);
2919                 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2920                 do_cull = !time_before(jiffies, expires);
2921 
2922                 if (!do_cull)
2923                         mod_timer(&pool->idle_timer, expires);
2924         }
2925         raw_spin_unlock_irq(&pool->lock);
2926 
2927         if (do_cull)
2928                 queue_work(system_unbound_wq, &pool->idle_cull_work);
2929 }
2930 
2931 /**
2932  * idle_cull_fn - cull workers that have been idle for too long.
2933  * @work: the pool's work for handling these idle workers
2934  *
2935  * This goes through a pool's idle workers and gets rid of those that have been
2936  * idle for at least IDLE_WORKER_TIMEOUT seconds.
2937  *
2938  * We don't want to disturb isolated CPUs because of a pcpu kworker being
2939  * culled, so this also resets worker affinity. This requires a sleepable
2940  * context, hence the split between timer callback and work item.
2941  */
2942 static void idle_cull_fn(struct work_struct *work)
2943 {
2944         struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
2945         LIST_HEAD(cull_list);
2946 
2947         /*
2948          * Grabbing wq_pool_attach_mutex here ensures an already-running worker
2949          * cannot proceed beyong set_pf_worker() in its self-destruct path.
2950          * This is required as a previously-preempted worker could run after
2951          * set_worker_dying() has happened but before detach_dying_workers() did.
2952          */
2953         mutex_lock(&wq_pool_attach_mutex);
2954         raw_spin_lock_irq(&pool->lock);
2955 
2956         while (too_many_workers(pool)) {
2957                 struct worker *worker;
2958                 unsigned long expires;
2959 
2960                 worker = list_last_entry(&pool->idle_list, struct worker, entry);
2961                 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2962 
2963                 if (time_before(jiffies, expires)) {
2964                         mod_timer(&pool->idle_timer, expires);
2965                         break;
2966                 }
2967 
2968                 set_worker_dying(worker, &cull_list);
2969         }
2970 
2971         raw_spin_unlock_irq(&pool->lock);
2972         detach_dying_workers(&cull_list);
2973         mutex_unlock(&wq_pool_attach_mutex);
2974 
2975         reap_dying_workers(&cull_list);
2976 }
2977 
2978 static void send_mayday(struct work_struct *work)
2979 {
2980         struct pool_workqueue *pwq = get_work_pwq(work);
2981         struct workqueue_struct *wq = pwq->wq;
2982 
2983         lockdep_assert_held(&wq_mayday_lock);
2984 
2985         if (!wq->rescuer)
2986                 return;
2987 
2988         /* mayday mayday mayday */
2989         if (list_empty(&pwq->mayday_node)) {
2990                 /*
2991                  * If @pwq is for an unbound wq, its base ref may be put at
2992                  * any time due to an attribute change.  Pin @pwq until the
2993                  * rescuer is done with it.
2994                  */
2995                 get_pwq(pwq);
2996                 list_add_tail(&pwq->mayday_node, &wq->maydays);
2997                 wake_up_process(wq->rescuer->task);
2998                 pwq->stats[PWQ_STAT_MAYDAY]++;
2999         }
3000 }
3001 
3002 static void pool_mayday_timeout(struct timer_list *t)
3003 {
3004         struct worker_pool *pool = from_timer(pool, t, mayday_timer);
3005         struct work_struct *work;
3006 
3007         raw_spin_lock_irq(&pool->lock);
3008         raw_spin_lock(&wq_mayday_lock);         /* for wq->maydays */
3009 
3010         if (need_to_create_worker(pool)) {
3011                 /*
3012                  * We've been trying to create a new worker but
3013                  * haven't been successful.  We might be hitting an
3014                  * allocation deadlock.  Send distress signals to
3015                  * rescuers.
3016                  */
3017                 list_for_each_entry(work, &pool->worklist, entry)
3018                         send_mayday(work);
3019         }
3020 
3021         raw_spin_unlock(&wq_mayday_lock);
3022         raw_spin_unlock_irq(&pool->lock);
3023 
3024         mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
3025 }
3026 
3027 /**
3028  * maybe_create_worker - create a new worker if necessary
3029  * @pool: pool to create a new worker for
3030  *
3031  * Create a new worker for @pool if necessary.  @pool is guaranteed to
3032  * have at least one idle worker on return from this function.  If
3033  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
3034  * sent to all rescuers with works scheduled on @pool to resolve
3035  * possible allocation deadlock.
3036  *
3037  * On return, need_to_create_worker() is guaranteed to be %false and
3038  * may_start_working() %true.
3039  *
3040  * LOCKING:
3041  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
3042  * multiple times.  Does GFP_KERNEL allocations.  Called only from
3043  * manager.
3044  */
3045 static void maybe_create_worker(struct worker_pool *pool)
3046 __releases(&pool->lock)
3047 __acquires(&pool->lock)
3048 {
3049 restart:
3050         raw_spin_unlock_irq(&pool->lock);
3051 
3052         /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
3053         mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
3054 
3055         while (true) {
3056                 if (create_worker(pool) || !need_to_create_worker(pool))
3057                         break;
3058 
3059                 schedule_timeout_interruptible(CREATE_COOLDOWN);
3060 
3061                 if (!need_to_create_worker(pool))
3062                         break;
3063         }
3064 
3065         del_timer_sync(&pool->mayday_timer);
3066         raw_spin_lock_irq(&pool->lock);
3067         /*
3068          * This is necessary even after a new worker was just successfully
3069          * created as @pool->lock was dropped and the new worker might have
3070          * already become busy.
3071          */
3072         if (need_to_create_worker(pool))
3073                 goto restart;
3074 }
3075 
3076 /**
3077  * manage_workers - manage worker pool
3078  * @worker: self
3079  *
3080  * Assume the manager role and manage the worker pool @worker belongs
3081  * to.  At any given time, there can be only zero or one manager per
3082  * pool.  The exclusion is handled automatically by this function.
3083  *
3084  * The caller can safely start processing works on false return.  On
3085  * true return, it's guaranteed that need_to_create_worker() is false
3086  * and may_start_working() is true.
3087  *
3088  * CONTEXT:
3089  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
3090  * multiple times.  Does GFP_KERNEL allocations.
3091  *
3092  * Return:
3093  * %false if the pool doesn't need management and the caller can safely
3094  * start processing works, %true if management function was performed and
3095  * the conditions that the caller verified before calling the function may
3096  * no longer be true.
3097  */
3098 static bool manage_workers(struct worker *worker)
3099 {
3100         struct worker_pool *pool = worker->pool;
3101 
3102         if (pool->flags & POOL_MANAGER_ACTIVE)
3103                 return false;
3104 
3105         pool->flags |= POOL_MANAGER_ACTIVE;
3106         pool->manager = worker;
3107 
3108         maybe_create_worker(pool);
3109 
3110         pool->manager = NULL;
3111         pool->flags &= ~POOL_MANAGER_ACTIVE;
3112         rcuwait_wake_up(&manager_wait);
3113         return true;
3114 }
3115 
3116 /**
3117  * process_one_work - process single work
3118  * @worker: self
3119  * @work: work to process
3120  *
3121  * Process @work.  This function contains all the logics necessary to
3122  * process a single work including synchronization against and
3123  * interaction with other workers on the same cpu, queueing and
3124  * flushing.  As long as context requirement is met, any worker can
3125  * call this function to process a work.
3126  *
3127  * CONTEXT:
3128  * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
3129  */
3130 static void process_one_work(struct worker *worker, struct work_struct *work)
3131 __releases(&pool->lock)
3132 __acquires(&pool->lock)
3133 {
3134         struct pool_workqueue *pwq = get_work_pwq(work);
3135         struct worker_pool *pool = worker->pool;
3136         unsigned long work_data;
3137         int lockdep_start_depth, rcu_start_depth;
3138         bool bh_draining = pool->flags & POOL_BH_DRAINING;
3139 #ifdef CONFIG_LOCKDEP
3140         /*
3141          * It is permissible to free the struct work_struct from
3142          * inside the function that is called from it, this we need to
3143          * take into account for lockdep too.  To avoid bogus "held
3144          * lock freed" warnings as well as problems when looking into
3145          * work->lockdep_map, make a copy and use that here.
3146          */
3147         struct lockdep_map lockdep_map;
3148 
3149         lockdep_copy_map(&lockdep_map, &work->lockdep_map);
3150 #endif
3151         /* ensure we're on the correct CPU */
3152         WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
3153                      raw_smp_processor_id() != pool->cpu);
3154 
3155         /* claim and dequeue */
3156         debug_work_deactivate(work);
3157         hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
3158         worker->current_work = work;
3159         worker->current_func = work->func;
3160         worker->current_pwq = pwq;
3161         if (worker->task)
3162                 worker->current_at = worker->task->se.sum_exec_runtime;
3163         work_data = *work_data_bits(work);
3164         worker->current_color = get_work_color(work_data);
3165 
3166         /*
3167          * Record wq name for cmdline and debug reporting, may get
3168          * overridden through set_worker_desc().
3169          */
3170         strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
3171 
3172         list_del_init(&work->entry);
3173 
3174         /*
3175          * CPU intensive works don't participate in concurrency management.
3176          * They're the scheduler's responsibility.  This takes @worker out
3177          * of concurrency management and the next code block will chain
3178          * execution of the pending work items.
3179          */
3180         if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE))
3181                 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
3182 
3183         /*
3184          * Kick @pool if necessary. It's always noop for per-cpu worker pools
3185          * since nr_running would always be >= 1 at this point. This is used to
3186          * chain execution of the pending work items for WORKER_NOT_RUNNING
3187          * workers such as the UNBOUND and CPU_INTENSIVE ones.
3188          */
3189         kick_pool(pool);
3190 
3191         /*
3192          * Record the last pool and clear PENDING which should be the last
3193          * update to @work.  Also, do this inside @pool->lock so that
3194          * PENDING and queued state changes happen together while IRQ is
3195          * disabled.
3196          */
3197         set_work_pool_and_clear_pending(work, pool->id, pool_offq_flags(pool));
3198 
3199         pwq->stats[PWQ_STAT_STARTED]++;
3200         raw_spin_unlock_irq(&pool->lock);
3201 
3202         rcu_start_depth = rcu_preempt_depth();
3203         lockdep_start_depth = lockdep_depth(current);
3204         /* see drain_dead_softirq_workfn() */
3205         if (!bh_draining)
3206                 lock_map_acquire(&pwq->wq->lockdep_map);
3207         lock_map_acquire(&lockdep_map);
3208         /*
3209          * Strictly speaking we should mark the invariant state without holding
3210          * any locks, that is, before these two lock_map_acquire()'s.
3211          *
3212          * However, that would result in:
3213          *
3214          *   A(W1)
3215          *   WFC(C)
3216          *              A(W1)
3217          *              C(C)
3218          *
3219          * Which would create W1->C->W1 dependencies, even though there is no
3220          * actual deadlock possible. There are two solutions, using a
3221          * read-recursive acquire on the work(queue) 'locks', but this will then
3222          * hit the lockdep limitation on recursive locks, or simply discard
3223          * these locks.
3224          *
3225          * AFAICT there is no possible deadlock scenario between the
3226          * flush_work() and complete() primitives (except for single-threaded
3227          * workqueues), so hiding them isn't a problem.
3228          */
3229         lockdep_invariant_state(true);
3230         trace_workqueue_execute_start(work);
3231         worker->current_func(work);
3232         /*
3233          * While we must be careful to not use "work" after this, the trace
3234          * point will only record its address.
3235          */
3236         trace_workqueue_execute_end(work, worker->current_func);
3237         pwq->stats[PWQ_STAT_COMPLETED]++;
3238         lock_map_release(&lockdep_map);
3239         if (!bh_draining)
3240                 lock_map_release(&pwq->wq->lockdep_map);
3241 
3242         if (unlikely((worker->task && in_atomic()) ||
3243                      lockdep_depth(current) != lockdep_start_depth ||
3244                      rcu_preempt_depth() != rcu_start_depth)) {
3245                 pr_err("BUG: workqueue leaked atomic, lock or RCU: %s[%d]\n"
3246                        "     preempt=0x%08x lock=%d->%d RCU=%d->%d workfn=%ps\n",
3247                        current->comm, task_pid_nr(current), preempt_count(),
3248                        lockdep_start_depth, lockdep_depth(current),
3249                        rcu_start_depth, rcu_preempt_depth(),
3250                        worker->current_func);
3251                 debug_show_held_locks(current);
3252                 dump_stack();
3253         }
3254 
3255         /*
3256          * The following prevents a kworker from hogging CPU on !PREEMPTION
3257          * kernels, where a requeueing work item waiting for something to
3258          * happen could deadlock with stop_machine as such work item could
3259          * indefinitely requeue itself while all other CPUs are trapped in
3260          * stop_machine. At the same time, report a quiescent RCU state so
3261          * the same condition doesn't freeze RCU.
3262          */
3263         if (worker->task)
3264                 cond_resched();
3265 
3266         raw_spin_lock_irq(&pool->lock);
3267 
3268         /*
3269          * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked
3270          * CPU intensive by wq_worker_tick() if @work hogged CPU longer than
3271          * wq_cpu_intensive_thresh_us. Clear it.
3272          */
3273         worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
3274 
3275         /* tag the worker for identification in schedule() */
3276         worker->last_func = worker->current_func;
3277 
3278         /* we're done with it, release */
3279         hash_del(&worker->hentry);
3280         worker->current_work = NULL;
3281         worker->current_func = NULL;
3282         worker->current_pwq = NULL;
3283         worker->current_color = INT_MAX;
3284 
3285         /* must be the last step, see the function comment */
3286         pwq_dec_nr_in_flight(pwq, work_data);
3287 }
3288 
3289 /**
3290  * process_scheduled_works - process scheduled works
3291  * @worker: self
3292  *
3293  * Process all scheduled works.  Please note that the scheduled list
3294  * may change while processing a work, so this function repeatedly
3295  * fetches a work from the top and executes it.
3296  *
3297  * CONTEXT:
3298  * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
3299  * multiple times.
3300  */
3301 static void process_scheduled_works(struct worker *worker)
3302 {
3303         struct work_struct *work;
3304         bool first = true;
3305 
3306         while ((work = list_first_entry_or_null(&worker->scheduled,
3307                                                 struct work_struct, entry))) {
3308                 if (first) {
3309                         worker->pool->watchdog_ts = jiffies;
3310                         first = false;
3311                 }
3312                 process_one_work(worker, work);
3313         }
3314 }
3315 
3316 static void set_pf_worker(bool val)
3317 {
3318         mutex_lock(&wq_pool_attach_mutex);
3319         if (val)
3320                 current->flags |= PF_WQ_WORKER;
3321         else
3322                 current->flags &= ~PF_WQ_WORKER;
3323         mutex_unlock(&wq_pool_attach_mutex);
3324 }
3325 
3326 /**
3327  * worker_thread - the worker thread function
3328  * @__worker: self
3329  *
3330  * The worker thread function.  All workers belong to a worker_pool -
3331  * either a per-cpu one or dynamic unbound one.  These workers process all
3332  * work items regardless of their specific target workqueue.  The only
3333  * exception is work items which belong to workqueues with a rescuer which
3334  * will be explained in rescuer_thread().
3335  *
3336  * Return: 0
3337  */
3338 static int worker_thread(void *__worker)
3339 {
3340         struct worker *worker = __worker;
3341         struct worker_pool *pool = worker->pool;
3342 
3343         /* tell the scheduler that this is a workqueue worker */
3344         set_pf_worker(true);
3345 woke_up:
3346         raw_spin_lock_irq(&pool->lock);
3347 
3348         /* am I supposed to die? */
3349         if (unlikely(worker->flags & WORKER_DIE)) {
3350                 raw_spin_unlock_irq(&pool->lock);
3351                 set_pf_worker(false);
3352 
3353                 ida_free(&pool->worker_ida, worker->id);
3354                 WARN_ON_ONCE(!list_empty(&worker->entry));
3355                 return 0;
3356         }
3357 
3358         worker_leave_idle(worker);
3359 recheck:
3360         /* no more worker necessary? */
3361         if (!need_more_worker(pool))
3362                 goto sleep;
3363 
3364         /* do we need to manage? */
3365         if (unlikely(!may_start_working(pool)) && manage_workers(worker))
3366                 goto recheck;
3367 
3368         /*
3369          * ->scheduled list can only be filled while a worker is
3370          * preparing to process a work or actually processing it.
3371          * Make sure nobody diddled with it while I was sleeping.
3372          */
3373         WARN_ON_ONCE(!list_empty(&worker->scheduled));
3374 
3375         /*
3376          * Finish PREP stage.  We're guaranteed to have at least one idle
3377          * worker or that someone else has already assumed the manager
3378          * role.  This is where @worker starts participating in concurrency
3379          * management if applicable and concurrency management is restored
3380          * after being rebound.  See rebind_workers() for details.
3381          */
3382         worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
3383 
3384         do {
3385                 struct work_struct *work =
3386                         list_first_entry(&pool->worklist,
3387                                          struct work_struct, entry);
3388 
3389                 if (assign_work(work, worker, NULL))
3390                         process_scheduled_works(worker);
3391         } while (keep_working(pool));
3392 
3393         worker_set_flags(worker, WORKER_PREP);
3394 sleep:
3395         /*
3396          * pool->lock is held and there's no work to process and no need to
3397          * manage, sleep.  Workers are woken up only while holding
3398          * pool->lock or from local cpu, so setting the current state
3399          * before releasing pool->lock is enough to prevent losing any
3400          * event.
3401          */
3402         worker_enter_idle(worker);
3403         __set_current_state(TASK_IDLE);
3404         raw_spin_unlock_irq(&pool->lock);
3405         schedule();
3406         goto woke_up;
3407 }
3408 
3409 /**
3410  * rescuer_thread - the rescuer thread function
3411  * @__rescuer: self
3412  *
3413  * Workqueue rescuer thread function.  There's one rescuer for each
3414  * workqueue which has WQ_MEM_RECLAIM set.
3415  *
3416  * Regular work processing on a pool may block trying to create a new
3417  * worker which uses GFP_KERNEL allocation which has slight chance of
3418  * developing into deadlock if some works currently on the same queue
3419  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
3420  * the problem rescuer solves.
3421  *
3422  * When such condition is possible, the pool summons rescuers of all
3423  * workqueues which have works queued on the pool and let them process
3424  * those works so that forward progress can be guaranteed.
3425  *
3426  * This should happen rarely.
3427  *
3428  * Return: 0
3429  */
3430 static int rescuer_thread(void *__rescuer)
3431 {
3432         struct worker *rescuer = __rescuer;
3433         struct workqueue_struct *wq = rescuer->rescue_wq;
3434         bool should_stop;
3435 
3436         set_user_nice(current, RESCUER_NICE_LEVEL);
3437 
3438         /*
3439          * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
3440          * doesn't participate in concurrency management.
3441          */
3442         set_pf_worker(true);
3443 repeat:
3444         set_current_state(TASK_IDLE);
3445 
3446         /*
3447          * By the time the rescuer is requested to stop, the workqueue
3448          * shouldn't have any work pending, but @wq->maydays may still have
3449          * pwq(s) queued.  This can happen by non-rescuer workers consuming
3450          * all the work items before the rescuer got to them.  Go through
3451          * @wq->maydays processing before acting on should_stop so that the
3452          * list is always empty on exit.
3453          */
3454         should_stop = kthread_should_stop();
3455 
3456         /* see whether any pwq is asking for help */
3457         raw_spin_lock_irq(&wq_mayday_lock);
3458 
3459         while (!list_empty(&wq->maydays)) {
3460                 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
3461                                         struct pool_workqueue, mayday_node);
3462                 struct worker_pool *pool = pwq->pool;
3463                 struct work_struct *work, *n;
3464 
3465                 __set_current_state(TASK_RUNNING);
3466                 list_del_init(&pwq->mayday_node);
3467 
3468                 raw_spin_unlock_irq(&wq_mayday_lock);
3469 
3470                 worker_attach_to_pool(rescuer, pool);
3471 
3472                 raw_spin_lock_irq(&pool->lock);
3473 
3474                 /*
3475                  * Slurp in all works issued via this workqueue and
3476                  * process'em.
3477                  */
3478                 WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
3479                 list_for_each_entry_safe(work, n, &pool->worklist, entry) {
3480                         if (get_work_pwq(work) == pwq &&
3481                             assign_work(work, rescuer, &n))
3482                                 pwq->stats[PWQ_STAT_RESCUED]++;
3483                 }
3484 
3485                 if (!list_empty(&rescuer->scheduled)) {
3486                         process_scheduled_works(rescuer);
3487 
3488                         /*
3489                          * The above execution of rescued work items could
3490                          * have created more to rescue through
3491                          * pwq_activate_first_inactive() or chained
3492                          * queueing.  Let's put @pwq back on mayday list so
3493                          * that such back-to-back work items, which may be
3494                          * being used to relieve memory pressure, don't
3495                          * incur MAYDAY_INTERVAL delay inbetween.
3496                          */
3497                         if (pwq->nr_active && need_to_create_worker(pool)) {
3498                                 raw_spin_lock(&wq_mayday_lock);
3499                                 /*
3500                                  * Queue iff we aren't racing destruction
3501                                  * and somebody else hasn't queued it already.
3502                                  */
3503                                 if (wq->rescuer && list_empty(&pwq->mayday_node)) {
3504                                         get_pwq(pwq);
3505                                         list_add_tail(&pwq->mayday_node, &wq->maydays);
3506                                 }
3507                                 raw_spin_unlock(&wq_mayday_lock);
3508                         }
3509                 }
3510 
3511                 /*
3512                  * Put the reference grabbed by send_mayday().  @pool won't
3513                  * go away while we're still attached to it.
3514                  */
3515                 put_pwq(pwq);
3516 
3517                 /*
3518                  * Leave this pool. Notify regular workers; otherwise, we end up
3519                  * with 0 concurrency and stalling the execution.
3520                  */
3521                 kick_pool(pool);
3522 
3523                 raw_spin_unlock_irq(&pool->lock);
3524 
3525                 worker_detach_from_pool(rescuer);
3526 
3527                 raw_spin_lock_irq(&wq_mayday_lock);
3528         }
3529 
3530         raw_spin_unlock_irq(&wq_mayday_lock);
3531 
3532         if (should_stop) {
3533                 __set_current_state(TASK_RUNNING);
3534                 set_pf_worker(false);
3535                 return 0;
3536         }
3537 
3538         /* rescuers should never participate in concurrency management */
3539         WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
3540         schedule();
3541         goto repeat;
3542 }
3543 
3544 static void bh_worker(struct worker *worker)
3545 {
3546         struct worker_pool *pool = worker->pool;
3547         int nr_restarts = BH_WORKER_RESTARTS;
3548         unsigned long end = jiffies + BH_WORKER_JIFFIES;
3549 
3550         raw_spin_lock_irq(&pool->lock);
3551         worker_leave_idle(worker);
3552 
3553         /*
3554          * This function follows the structure of worker_thread(). See there for
3555          * explanations on each step.
3556          */
3557         if (!need_more_worker(pool))
3558                 goto done;
3559 
3560         WARN_ON_ONCE(!list_empty(&worker->scheduled));
3561         worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
3562 
3563         do {
3564                 struct work_struct *work =
3565                         list_first_entry(&pool->worklist,
3566                                          struct work_struct, entry);
3567 
3568                 if (assign_work(work, worker, NULL))
3569                         process_scheduled_works(worker);
3570         } while (keep_working(pool) &&
3571                  --nr_restarts && time_before(jiffies, end));
3572 
3573         worker_set_flags(worker, WORKER_PREP);
3574 done:
3575         worker_enter_idle(worker);
3576         kick_pool(pool);
3577         raw_spin_unlock_irq(&pool->lock);
3578 }
3579 
3580 /*
3581  * TODO: Convert all tasklet users to workqueue and use softirq directly.
3582  *
3583  * This is currently called from tasklet[_hi]action() and thus is also called
3584  * whenever there are tasklets to run. Let's do an early exit if there's nothing
3585  * queued. Once conversion from tasklet is complete, the need_more_worker() test
3586  * can be dropped.
3587  *
3588  * After full conversion, we'll add worker->softirq_action, directly use the
3589  * softirq action and obtain the worker pointer from the softirq_action pointer.
3590  */
3591 void workqueue_softirq_action(bool highpri)
3592 {
3593         struct worker_pool *pool =
3594                 &per_cpu(bh_worker_pools, smp_processor_id())[highpri];
3595         if (need_more_worker(pool))
3596                 bh_worker(list_first_entry(&pool->workers, struct worker, node));
3597 }
3598 
3599 struct wq_drain_dead_softirq_work {
3600         struct work_struct      work;
3601         struct worker_pool      *pool;
3602         struct completion       done;
3603 };
3604 
3605 static void drain_dead_softirq_workfn(struct work_struct *work)
3606 {
3607         struct wq_drain_dead_softirq_work *dead_work =
3608                 container_of(work, struct wq_drain_dead_softirq_work, work);
3609         struct worker_pool *pool = dead_work->pool;
3610         bool repeat;
3611 
3612         /*
3613          * @pool's CPU is dead and we want to execute its still pending work
3614          * items from this BH work item which is running on a different CPU. As
3615          * its CPU is dead, @pool can't be kicked and, as work execution path
3616          * will be nested, a lockdep annotation needs to be suppressed. Mark
3617          * @pool with %POOL_BH_DRAINING for the special treatments.
3618          */
3619         raw_spin_lock_irq(&pool->lock);
3620         pool->flags |= POOL_BH_DRAINING;
3621         raw_spin_unlock_irq(&pool->lock);
3622 
3623         bh_worker(list_first_entry(&pool->workers, struct worker, node));
3624 
3625         raw_spin_lock_irq(&pool->lock);
3626         pool->flags &= ~POOL_BH_DRAINING;
3627         repeat = need_more_worker(pool);
3628         raw_spin_unlock_irq(&pool->lock);
3629 
3630         /*
3631          * bh_worker() might hit consecutive execution limit and bail. If there
3632          * still are pending work items, reschedule self and return so that we
3633          * don't hog this CPU's BH.
3634          */
3635         if (repeat) {
3636                 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL)
3637                         queue_work(system_bh_highpri_wq, work);
3638                 else
3639                         queue_work(system_bh_wq, work);
3640         } else {
3641                 complete(&dead_work->done);
3642         }
3643 }
3644 
3645 /*
3646  * @cpu is dead. Drain the remaining BH work items on the current CPU. It's
3647  * possible to allocate dead_work per CPU and avoid flushing. However, then we
3648  * have to worry about draining overlapping with CPU coming back online or
3649  * nesting (one CPU's dead_work queued on another CPU which is also dead and so
3650  * on). Let's keep it simple and drain them synchronously. These are BH work
3651  * items which shouldn't be requeued on the same pool. Shouldn't take long.
3652  */
3653 void workqueue_softirq_dead(unsigned int cpu)
3654 {
3655         int i;
3656 
3657         for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
3658                 struct worker_pool *pool = &per_cpu(bh_worker_pools, cpu)[i];
3659                 struct wq_drain_dead_softirq_work dead_work;
3660 
3661                 if (!need_more_worker(pool))
3662                         continue;
3663 
3664                 INIT_WORK_ONSTACK(&dead_work.work, drain_dead_softirq_workfn);
3665                 dead_work.pool = pool;
3666                 init_completion(&dead_work.done);
3667 
3668                 if (pool->attrs->nice == HIGHPRI_NICE_LEVEL)
3669                         queue_work(system_bh_highpri_wq, &dead_work.work);
3670                 else
3671                         queue_work(system_bh_wq, &dead_work.work);
3672 
3673                 wait_for_completion(&dead_work.done);
3674                 destroy_work_on_stack(&dead_work.work);
3675         }
3676 }
3677 
3678 /**
3679  * check_flush_dependency - check for flush dependency sanity
3680  * @target_wq: workqueue being flushed
3681  * @target_work: work item being flushed (NULL for workqueue flushes)
3682  *
3683  * %current is trying to flush the whole @target_wq or @target_work on it.
3684  * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
3685  * reclaiming memory or running on a workqueue which doesn't have
3686  * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
3687  * a deadlock.
3688  */
3689 static void check_flush_dependency(struct workqueue_struct *target_wq,
3690                                    struct work_struct *target_work)
3691 {
3692         work_func_t target_func = target_work ? target_work->func : NULL;
3693         struct worker *worker;
3694 
3695         if (target_wq->flags & WQ_MEM_RECLAIM)
3696                 return;
3697 
3698         worker = current_wq_worker();
3699 
3700         WARN_ONCE(current->flags & PF_MEMALLOC,
3701                   "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
3702                   current->pid, current->comm, target_wq->name, target_func);
3703         WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
3704                               (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
3705                   "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
3706                   worker->current_pwq->wq->name, worker->current_func,
3707                   target_wq->name, target_func);
3708 }
3709 
3710 struct wq_barrier {
3711         struct work_struct      work;
3712         struct completion       done;
3713         struct task_struct      *task;  /* purely informational */
3714 };
3715 
3716 static void wq_barrier_func(struct work_struct *work)
3717 {
3718         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
3719         complete(&barr->done);
3720 }
3721 
3722 /**
3723  * insert_wq_barrier - insert a barrier work
3724  * @pwq: pwq to insert barrier into
3725  * @barr: wq_barrier to insert
3726  * @target: target work to attach @barr to
3727  * @worker: worker currently executing @target, NULL if @target is not executing
3728  *
3729  * @barr is linked to @target such that @barr is completed only after
3730  * @target finishes execution.  Please note that the ordering
3731  * guarantee is observed only with respect to @target and on the local
3732  * cpu.
3733  *
3734  * Currently, a queued barrier can't be canceled.  This is because
3735  * try_to_grab_pending() can't determine whether the work to be
3736  * grabbed is at the head of the queue and thus can't clear LINKED
3737  * flag of the previous work while there must be a valid next work
3738  * after a work with LINKED flag set.
3739  *
3740  * Note that when @worker is non-NULL, @target may be modified
3741  * underneath us, so we can't reliably determine pwq from @target.
3742  *
3743  * CONTEXT:
3744  * raw_spin_lock_irq(pool->lock).
3745  */
3746 static void insert_wq_barrier(struct pool_workqueue *pwq,
3747                               struct wq_barrier *barr,
3748                               struct work_struct *target, struct worker *worker)
3749 {
3750         static __maybe_unused struct lock_class_key bh_key, thr_key;
3751         unsigned int work_flags = 0;
3752         unsigned int work_color;
3753         struct list_head *head;
3754 
3755         /*
3756          * debugobject calls are safe here even with pool->lock locked
3757          * as we know for sure that this will not trigger any of the
3758          * checks and call back into the fixup functions where we
3759          * might deadlock.
3760          *
3761          * BH and threaded workqueues need separate lockdep keys to avoid
3762          * spuriously triggering "inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W}
3763          * usage".
3764          */
3765         INIT_WORK_ONSTACK_KEY(&barr->work, wq_barrier_func,
3766                               (pwq->wq->flags & WQ_BH) ? &bh_key : &thr_key);
3767         __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
3768 
3769         init_completion_map(&barr->done, &target->lockdep_map);
3770 
3771         barr->task = current;
3772 
3773         /* The barrier work item does not participate in nr_active. */
3774         work_flags |= WORK_STRUCT_INACTIVE;
3775 
3776         /*
3777          * If @target is currently being executed, schedule the
3778          * barrier to the worker; otherwise, put it after @target.
3779          */
3780         if (worker) {
3781                 head = worker->scheduled.next;
3782                 work_color = worker->current_color;
3783         } else {
3784                 unsigned long *bits = work_data_bits(target);
3785 
3786                 head = target->entry.next;
3787                 /* there can already be other linked works, inherit and set */
3788                 work_flags |= *bits & WORK_STRUCT_LINKED;
3789                 work_color = get_work_color(*bits);
3790                 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
3791         }
3792 
3793         pwq->nr_in_flight[work_color]++;
3794         work_flags |= work_color_to_flags(work_color);
3795 
3796         insert_work(pwq, &barr->work, head, work_flags);
3797 }
3798 
3799 /**
3800  * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
3801  * @wq: workqueue being flushed
3802  * @flush_color: new flush color, < 0 for no-op
3803  * @work_color: new work color, < 0 for no-op
3804  *
3805  * Prepare pwqs for workqueue flushing.
3806  *
3807  * If @flush_color is non-negative, flush_color on all pwqs should be
3808  * -1.  If no pwq has in-flight commands at the specified color, all
3809  * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
3810  * has in flight commands, its pwq->flush_color is set to
3811  * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
3812  * wakeup logic is armed and %true is returned.
3813  *
3814  * The caller should have initialized @wq->first_flusher prior to
3815  * calling this function with non-negative @flush_color.  If
3816  * @flush_color is negative, no flush color update is done and %false
3817  * is returned.
3818  *
3819  * If @work_color is non-negative, all pwqs should have the same
3820  * work_color which is previous to @work_color and all will be
3821  * advanced to @work_color.
3822  *
3823  * CONTEXT:
3824  * mutex_lock(wq->mutex).
3825  *
3826  * Return:
3827  * %true if @flush_color >= 0 and there's something to flush.  %false
3828  * otherwise.
3829  */
3830 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
3831                                       int flush_color, int work_color)
3832 {
3833         bool wait = false;
3834         struct pool_workqueue *pwq;
3835 
3836         if (flush_color >= 0) {
3837                 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
3838                 atomic_set(&wq->nr_pwqs_to_flush, 1);
3839         }
3840 
3841         for_each_pwq(pwq, wq) {
3842                 struct worker_pool *pool = pwq->pool;
3843 
3844                 raw_spin_lock_irq(&pool->lock);
3845 
3846                 if (flush_color >= 0) {
3847                         WARN_ON_ONCE(pwq->flush_color != -1);
3848 
3849                         if (pwq->nr_in_flight[flush_color]) {
3850                                 pwq->flush_color = flush_color;
3851                                 atomic_inc(&wq->nr_pwqs_to_flush);
3852                                 wait = true;
3853                         }
3854                 }
3855 
3856                 if (work_color >= 0) {
3857                         WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
3858                         pwq->work_color = work_color;
3859                 }
3860 
3861                 raw_spin_unlock_irq(&pool->lock);
3862         }
3863 
3864         if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
3865                 complete(&wq->first_flusher->done);
3866 
3867         return wait;
3868 }
3869 
3870 static void touch_wq_lockdep_map(struct workqueue_struct *wq)
3871 {
3872 #ifdef CONFIG_LOCKDEP
3873         if (wq->flags & WQ_BH)
3874                 local_bh_disable();
3875 
3876         lock_map_acquire(&wq->lockdep_map);
3877         lock_map_release(&wq->lockdep_map);
3878 
3879         if (wq->flags & WQ_BH)
3880                 local_bh_enable();
3881 #endif
3882 }
3883 
3884 static void touch_work_lockdep_map(struct work_struct *work,
3885                                    struct workqueue_struct *wq)
3886 {
3887 #ifdef CONFIG_LOCKDEP
3888         if (wq->flags & WQ_BH)
3889                 local_bh_disable();
3890 
3891         lock_map_acquire(&work->lockdep_map);
3892         lock_map_release(&work->lockdep_map);
3893 
3894         if (wq->flags & WQ_BH)
3895                 local_bh_enable();
3896 #endif
3897 }
3898 
3899 /**
3900  * __flush_workqueue - ensure that any scheduled work has run to completion.
3901  * @wq: workqueue to flush
3902  *
3903  * This function sleeps until all work items which were queued on entry
3904  * have finished execution, but it is not livelocked by new incoming ones.
3905  */
3906 void __flush_workqueue(struct workqueue_struct *wq)
3907 {
3908         struct wq_flusher this_flusher = {
3909                 .list = LIST_HEAD_INIT(this_flusher.list),
3910                 .flush_color = -1,
3911                 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
3912         };
3913         int next_color;
3914 
3915         if (WARN_ON(!wq_online))
3916                 return;
3917 
3918         touch_wq_lockdep_map(wq);
3919 
3920         mutex_lock(&wq->mutex);
3921 
3922         /*
3923          * Start-to-wait phase
3924          */
3925         next_color = work_next_color(wq->work_color);
3926 
3927         if (next_color != wq->flush_color) {
3928                 /*
3929                  * Color space is not full.  The current work_color
3930                  * becomes our flush_color and work_color is advanced
3931                  * by one.
3932                  */
3933                 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
3934                 this_flusher.flush_color = wq->work_color;
3935                 wq->work_color = next_color;
3936 
3937                 if (!wq->first_flusher) {
3938                         /* no flush in progress, become the first flusher */
3939                         WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
3940 
3941                         wq->first_flusher = &this_flusher;
3942 
3943                         if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
3944                                                        wq->work_color)) {
3945                                 /* nothing to flush, done */
3946                                 wq->flush_color = next_color;
3947                                 wq->first_flusher = NULL;
3948                                 goto out_unlock;
3949                         }
3950                 } else {
3951                         /* wait in queue */
3952                         WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
3953                         list_add_tail(&this_flusher.list, &wq->flusher_queue);
3954                         flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
3955                 }
3956         } else {
3957                 /*
3958                  * Oops, color space is full, wait on overflow queue.
3959                  * The next flush completion will assign us
3960                  * flush_color and transfer to flusher_queue.
3961                  */
3962                 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
3963         }
3964 
3965         check_flush_dependency(wq, NULL);
3966 
3967         mutex_unlock(&wq->mutex);
3968 
3969         wait_for_completion(&this_flusher.done);
3970 
3971         /*
3972          * Wake-up-and-cascade phase
3973          *
3974          * First flushers are responsible for cascading flushes and
3975          * handling overflow.  Non-first flushers can simply return.
3976          */
3977         if (READ_ONCE(wq->first_flusher) != &this_flusher)
3978                 return;
3979 
3980         mutex_lock(&wq->mutex);
3981 
3982         /* we might have raced, check again with mutex held */
3983         if (wq->first_flusher != &this_flusher)
3984                 goto out_unlock;
3985 
3986         WRITE_ONCE(wq->first_flusher, NULL);
3987 
3988         WARN_ON_ONCE(!list_empty(&this_flusher.list));
3989         WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
3990 
3991         while (true) {
3992                 struct wq_flusher *next, *tmp;
3993 
3994                 /* complete all the flushers sharing the current flush color */
3995                 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
3996                         if (next->flush_color != wq->flush_color)
3997                                 break;
3998                         list_del_init(&next->list);
3999                         complete(&next->done);
4000                 }
4001 
4002                 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
4003                              wq->flush_color != work_next_color(wq->work_color));
4004 
4005                 /* this flush_color is finished, advance by one */
4006                 wq->flush_color = work_next_color(wq->flush_color);
4007 
4008                 /* one color has been freed, handle overflow queue */
4009                 if (!list_empty(&wq->flusher_overflow)) {
4010                         /*
4011                          * Assign the same color to all overflowed
4012                          * flushers, advance work_color and append to
4013                          * flusher_queue.  This is the start-to-wait
4014                          * phase for these overflowed flushers.
4015                          */
4016                         list_for_each_entry(tmp, &wq->flusher_overflow, list)
4017                                 tmp->flush_color = wq->work_color;
4018 
4019                         wq->work_color = work_next_color(wq->work_color);
4020 
4021                         list_splice_tail_init(&wq->flusher_overflow,
4022                                               &wq->flusher_queue);
4023                         flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
4024                 }
4025 
4026                 if (list_empty(&wq->flusher_queue)) {
4027                         WARN_ON_ONCE(wq->flush_color != wq->work_color);
4028                         break;
4029                 }
4030 
4031                 /*
4032                  * Need to flush more colors.  Make the next flusher
4033                  * the new first flusher and arm pwqs.
4034                  */
4035                 WARN_ON_ONCE(wq->flush_color == wq->work_color);
4036                 WARN_ON_ONCE(wq->flush_color != next->flush_color);
4037 
4038                 list_del_init(&next->list);
4039                 wq->first_flusher = next;
4040 
4041                 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
4042                         break;
4043 
4044                 /*
4045                  * Meh... this color is already done, clear first
4046                  * flusher and repeat cascading.
4047                  */
4048                 wq->first_flusher = NULL;
4049         }
4050 
4051 out_unlock:
4052         mutex_unlock(&wq->mutex);
4053 }
4054 EXPORT_SYMBOL(__flush_workqueue);
4055 
4056 /**
4057  * drain_workqueue - drain a workqueue
4058  * @wq: workqueue to drain
4059  *
4060  * Wait until the workqueue becomes empty.  While draining is in progress,
4061  * only chain queueing is allowed.  IOW, only currently pending or running
4062  * work items on @wq can queue further work items on it.  @wq is flushed
4063  * repeatedly until it becomes empty.  The number of flushing is determined
4064  * by the depth of chaining and should be relatively short.  Whine if it
4065  * takes too long.
4066  */
4067 void drain_workqueue(struct workqueue_struct *wq)
4068 {
4069         unsigned int flush_cnt = 0;
4070         struct pool_workqueue *pwq;
4071 
4072         /*
4073          * __queue_work() needs to test whether there are drainers, is much
4074          * hotter than drain_workqueue() and already looks at @wq->flags.
4075          * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
4076          */
4077         mutex_lock(&wq->mutex);
4078         if (!wq->nr_drainers++)
4079                 wq->flags |= __WQ_DRAINING;
4080         mutex_unlock(&wq->mutex);
4081 reflush:
4082         __flush_workqueue(wq);
4083 
4084         mutex_lock(&wq->mutex);
4085 
4086         for_each_pwq(pwq, wq) {
4087                 bool drained;
4088 
4089                 raw_spin_lock_irq(&pwq->pool->lock);
4090                 drained = pwq_is_empty(pwq);
4091                 raw_spin_unlock_irq(&pwq->pool->lock);
4092 
4093                 if (drained)
4094                         continue;
4095 
4096                 if (++flush_cnt == 10 ||
4097                     (flush_cnt % 100 == 0 && flush_cnt <= 1000))
4098                         pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
4099                                 wq->name, __func__, flush_cnt);
4100 
4101                 mutex_unlock(&wq->mutex);
4102                 goto reflush;
4103         }
4104 
4105         if (!--wq->nr_drainers)
4106                 wq->flags &= ~__WQ_DRAINING;
4107         mutex_unlock(&wq->mutex);
4108 }
4109 EXPORT_SYMBOL_GPL(drain_workqueue);
4110 
4111 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
4112                              bool from_cancel)
4113 {
4114         struct worker *worker = NULL;
4115         struct worker_pool *pool;
4116         struct pool_workqueue *pwq;
4117         struct workqueue_struct *wq;
4118 
4119         rcu_read_lock();
4120         pool = get_work_pool(work);
4121         if (!pool) {
4122                 rcu_read_unlock();
4123                 return false;
4124         }
4125 
4126         raw_spin_lock_irq(&pool->lock);
4127         /* see the comment in try_to_grab_pending() with the same code */
4128         pwq = get_work_pwq(work);
4129         if (pwq) {
4130                 if (unlikely(pwq->pool != pool))
4131                         goto already_gone;
4132         } else {
4133                 worker = find_worker_executing_work(pool, work);
4134                 if (!worker)
4135                         goto already_gone;
4136                 pwq = worker->current_pwq;
4137         }
4138 
4139         wq = pwq->wq;
4140         check_flush_dependency(wq, work);
4141 
4142         insert_wq_barrier(pwq, barr, work, worker);
4143         raw_spin_unlock_irq(&pool->lock);
4144 
4145         touch_work_lockdep_map(work, wq);
4146 
4147         /*
4148          * Force a lock recursion deadlock when using flush_work() inside a
4149          * single-threaded or rescuer equipped workqueue.
4150          *
4151          * For single threaded workqueues the deadlock happens when the work
4152          * is after the work issuing the flush_work(). For rescuer equipped
4153          * workqueues the deadlock happens when the rescuer stalls, blocking
4154          * forward progress.
4155          */
4156         if (!from_cancel && (wq->saved_max_active == 1 || wq->rescuer))
4157                 touch_wq_lockdep_map(wq);
4158 
4159         rcu_read_unlock();
4160         return true;
4161 already_gone:
4162         raw_spin_unlock_irq(&pool->lock);
4163         rcu_read_unlock();
4164         return false;
4165 }
4166 
4167 static bool __flush_work(struct work_struct *work, bool from_cancel)
4168 {
4169         struct wq_barrier barr;
4170         unsigned long data;
4171 
4172         if (WARN_ON(!wq_online))
4173                 return false;
4174 
4175         if (WARN_ON(!work->func))
4176                 return false;
4177 
4178         if (!start_flush_work(work, &barr, from_cancel))
4179                 return false;
4180 
4181         /*
4182          * start_flush_work() returned %true. If @from_cancel is set, we know
4183          * that @work must have been executing during start_flush_work() and
4184          * can't currently be queued. Its data must contain OFFQ bits. If @work
4185          * was queued on a BH workqueue, we also know that it was running in the
4186          * BH context and thus can be busy-waited.
4187          */
4188         data = *work_data_bits(work);
4189         if (from_cancel &&
4190             !WARN_ON_ONCE(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_BH)) {
4191                 /*
4192                  * On RT, prevent a live lock when %current preempted soft
4193                  * interrupt processing or prevents ksoftirqd from running by
4194                  * keeping flipping BH. If the BH work item runs on a different
4195                  * CPU then this has no effect other than doing the BH
4196                  * disable/enable dance for nothing. This is copied from
4197                  * kernel/softirq.c::tasklet_unlock_spin_wait().
4198                  */
4199                 while (!try_wait_for_completion(&barr.done)) {
4200                         if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
4201                                 local_bh_disable();
4202                                 local_bh_enable();
4203                         } else {
4204                                 cpu_relax();
4205                         }
4206                 }
4207         } else {
4208                 wait_for_completion(&barr.done);
4209         }
4210 
4211         destroy_work_on_stack(&barr.work);
4212         return true;
4213 }
4214 
4215 /**
4216  * flush_work - wait for a work to finish executing the last queueing instance
4217  * @work: the work to flush
4218  *
4219  * Wait until @work has finished execution.  @work is guaranteed to be idle
4220  * on return if it hasn't been requeued since flush started.
4221  *
4222  * Return:
4223  * %true if flush_work() waited for the work to finish execution,
4224  * %false if it was already idle.
4225  */
4226 bool flush_work(struct work_struct *work)
4227 {
4228         might_sleep();
4229         return __flush_work(work, false);
4230 }
4231 EXPORT_SYMBOL_GPL(flush_work);
4232 
4233 /**
4234  * flush_delayed_work - wait for a dwork to finish executing the last queueing
4235  * @dwork: the delayed work to flush
4236  *
4237  * Delayed timer is cancelled and the pending work is queued for
4238  * immediate execution.  Like flush_work(), this function only
4239  * considers the last queueing instance of @dwork.
4240  *
4241  * Return:
4242  * %true if flush_work() waited for the work to finish execution,
4243  * %false if it was already idle.
4244  */
4245 bool flush_delayed_work(struct delayed_work *dwork)
4246 {
4247         local_irq_disable();
4248         if (del_timer_sync(&dwork->timer))
4249                 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
4250         local_irq_enable();
4251         return flush_work(&dwork->work);
4252 }
4253 EXPORT_SYMBOL(flush_delayed_work);
4254 
4255 /**
4256  * flush_rcu_work - wait for a rwork to finish executing the last queueing
4257  * @rwork: the rcu work to flush
4258  *
4259  * Return:
4260  * %true if flush_rcu_work() waited for the work to finish execution,
4261  * %false if it was already idle.
4262  */
4263 bool flush_rcu_work(struct rcu_work *rwork)
4264 {
4265         if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
4266                 rcu_barrier();
4267                 flush_work(&rwork->work);
4268                 return true;
4269         } else {
4270                 return flush_work(&rwork->work);
4271         }
4272 }
4273 EXPORT_SYMBOL(flush_rcu_work);
4274 
4275 static void work_offqd_disable(struct work_offq_data *offqd)
4276 {
4277         const unsigned long max = (1lu << WORK_OFFQ_DISABLE_BITS) - 1;
4278 
4279         if (likely(offqd->disable < max))
4280                 offqd->disable++;
4281         else
4282                 WARN_ONCE(true, "workqueue: work disable count overflowed\n");
4283 }
4284 
4285 static void work_offqd_enable(struct work_offq_data *offqd)
4286 {
4287         if (likely(offqd->disable > 0))
4288                 offqd->disable--;
4289         else
4290                 WARN_ONCE(true, "workqueue: work disable count underflowed\n");
4291 }
4292 
4293 static bool __cancel_work(struct work_struct *work, u32 cflags)
4294 {
4295         struct work_offq_data offqd;
4296         unsigned long irq_flags;
4297         int ret;
4298 
4299         ret = work_grab_pending(work, cflags, &irq_flags);
4300 
4301         work_offqd_unpack(&offqd, *work_data_bits(work));
4302 
4303         if (cflags & WORK_CANCEL_DISABLE)
4304                 work_offqd_disable(&offqd);
4305 
4306         set_work_pool_and_clear_pending(work, offqd.pool_id,
4307                                         work_offqd_pack_flags(&offqd));
4308         local_irq_restore(irq_flags);
4309         return ret;
4310 }
4311 
4312 static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
4313 {
4314         bool ret;
4315 
4316         ret = __cancel_work(work, cflags | WORK_CANCEL_DISABLE);
4317 
4318         if (*work_data_bits(work) & WORK_OFFQ_BH)
4319                 WARN_ON_ONCE(in_hardirq());
4320         else
4321                 might_sleep();
4322 
4323         /*
4324          * Skip __flush_work() during early boot when we know that @work isn't
4325          * executing. This allows canceling during early boot.
4326          */
4327         if (wq_online)
4328                 __flush_work(work, true);
4329 
4330         if (!(cflags & WORK_CANCEL_DISABLE))
4331                 enable_work(work);
4332 
4333         return ret;
4334 }
4335 
4336 /*
4337  * See cancel_delayed_work()
4338  */
4339 bool cancel_work(struct work_struct *work)
4340 {
4341         return __cancel_work(work, 0);
4342 }
4343 EXPORT_SYMBOL(cancel_work);
4344 
4345 /**
4346  * cancel_work_sync - cancel a work and wait for it to finish
4347  * @work: the work to cancel
4348  *
4349  * Cancel @work and wait for its execution to finish. This function can be used
4350  * even if the work re-queues itself or migrates to another workqueue. On return
4351  * from this function, @work is guaranteed to be not pending or executing on any
4352  * CPU as long as there aren't racing enqueues.
4353  *
4354  * cancel_work_sync(&delayed_work->work) must not be used for delayed_work's.
4355  * Use cancel_delayed_work_sync() instead.
4356  *
4357  * Must be called from a sleepable context if @work was last queued on a non-BH
4358  * workqueue. Can also be called from non-hardirq atomic contexts including BH
4359  * if @work was last queued on a BH workqueue.
4360  *
4361  * Returns %true if @work was pending, %false otherwise.
4362  */
4363 bool cancel_work_sync(struct work_struct *work)
4364 {
4365         return __cancel_work_sync(work, 0);
4366 }
4367 EXPORT_SYMBOL_GPL(cancel_work_sync);
4368 
4369 /**
4370  * cancel_delayed_work - cancel a delayed work
4371  * @dwork: delayed_work to cancel
4372  *
4373  * Kill off a pending delayed_work.
4374  *
4375  * Return: %true if @dwork was pending and canceled; %false if it wasn't
4376  * pending.
4377  *
4378  * Note:
4379  * The work callback function may still be running on return, unless
4380  * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
4381  * use cancel_delayed_work_sync() to wait on it.
4382  *
4383  * This function is safe to call from any context including IRQ handler.
4384  */
4385 bool cancel_delayed_work(struct delayed_work *dwork)
4386 {
4387         return __cancel_work(&dwork->work, WORK_CANCEL_DELAYED);
4388 }
4389 EXPORT_SYMBOL(cancel_delayed_work);
4390 
4391 /**
4392  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
4393  * @dwork: the delayed work cancel
4394  *
4395  * This is cancel_work_sync() for delayed works.
4396  *
4397  * Return:
4398  * %true if @dwork was pending, %false otherwise.
4399  */
4400 bool cancel_delayed_work_sync(struct delayed_work *dwork)
4401 {
4402         return __cancel_work_sync(&dwork->work, WORK_CANCEL_DELAYED);
4403 }
4404 EXPORT_SYMBOL(cancel_delayed_work_sync);
4405 
4406 /**
4407  * disable_work - Disable and cancel a work item
4408  * @work: work item to disable
4409  *
4410  * Disable @work by incrementing its disable count and cancel it if currently
4411  * pending. As long as the disable count is non-zero, any attempt to queue @work
4412  * will fail and return %false. The maximum supported disable depth is 2 to the
4413  * power of %WORK_OFFQ_DISABLE_BITS, currently 65536.
4414  *
4415  * Can be called from any context. Returns %true if @work was pending, %false
4416  * otherwise.
4417  */
4418 bool disable_work(struct work_struct *work)
4419 {
4420         return __cancel_work(work, WORK_CANCEL_DISABLE);
4421 }
4422 EXPORT_SYMBOL_GPL(disable_work);
4423 
4424 /**
4425  * disable_work_sync - Disable, cancel and drain a work item
4426  * @work: work item to disable
4427  *
4428  * Similar to disable_work() but also wait for @work to finish if currently
4429  * executing.
4430  *
4431  * Must be called from a sleepable context if @work was last queued on a non-BH
4432  * workqueue. Can also be called from non-hardirq atomic contexts including BH
4433  * if @work was last queued on a BH workqueue.
4434  *
4435  * Returns %true if @work was pending, %false otherwise.
4436  */
4437 bool disable_work_sync(struct work_struct *work)
4438 {
4439         return __cancel_work_sync(work, WORK_CANCEL_DISABLE);
4440 }
4441 EXPORT_SYMBOL_GPL(disable_work_sync);
4442 
4443 /**
4444  * enable_work - Enable a work item
4445  * @work: work item to enable
4446  *
4447  * Undo disable_work[_sync]() by decrementing @work's disable count. @work can
4448  * only be queued if its disable count is 0.
4449  *
4450  * Can be called from any context. Returns %true if the disable count reached 0.
4451  * Otherwise, %false.
4452  */
4453 bool enable_work(struct work_struct *work)
4454 {
4455         struct work_offq_data offqd;
4456         unsigned long irq_flags;
4457 
4458         work_grab_pending(work, 0, &irq_flags);
4459 
4460         work_offqd_unpack(&offqd, *work_data_bits(work));
4461         work_offqd_enable(&offqd);
4462         set_work_pool_and_clear_pending(work, offqd.pool_id,
4463                                         work_offqd_pack_flags(&offqd));
4464         local_irq_restore(irq_flags);
4465 
4466         return !offqd.disable;
4467 }
4468 EXPORT_SYMBOL_GPL(enable_work);
4469 
4470 /**
4471  * disable_delayed_work - Disable and cancel a delayed work item
4472  * @dwork: delayed work item to disable
4473  *
4474  * disable_work() for delayed work items.
4475  */
4476 bool disable_delayed_work(struct delayed_work *dwork)
4477 {
4478         return __cancel_work(&dwork->work,
4479                              WORK_CANCEL_DELAYED | WORK_CANCEL_DISABLE);
4480 }
4481 EXPORT_SYMBOL_GPL(disable_delayed_work);
4482 
4483 /**
4484  * disable_delayed_work_sync - Disable, cancel and drain a delayed work item
4485  * @dwork: delayed work item to disable
4486  *
4487  * disable_work_sync() for delayed work items.
4488  */
4489 bool disable_delayed_work_sync(struct delayed_work *dwork)
4490 {
4491         return __cancel_work_sync(&dwork->work,
4492                                   WORK_CANCEL_DELAYED | WORK_CANCEL_DISABLE);
4493 }
4494 EXPORT_SYMBOL_GPL(disable_delayed_work_sync);
4495 
4496 /**
4497  * enable_delayed_work - Enable a delayed work item
4498  * @dwork: delayed work item to enable
4499  *
4500  * enable_work() for delayed work items.
4501  */
4502 bool enable_delayed_work(struct delayed_work *dwork)
4503 {
4504         return enable_work(&dwork->work);
4505 }
4506 EXPORT_SYMBOL_GPL(enable_delayed_work);
4507 
4508 /**
4509  * schedule_on_each_cpu - execute a function synchronously on each online CPU
4510  * @func: the function to call
4511  *
4512  * schedule_on_each_cpu() executes @func on each online CPU using the
4513  * system workqueue and blocks until all CPUs have completed.
4514  * schedule_on_each_cpu() is very slow.
4515  *
4516  * Return:
4517  * 0 on success, -errno on failure.
4518  */
4519 int schedule_on_each_cpu(work_func_t func)
4520 {
4521         int cpu;
4522         struct work_struct __percpu *works;
4523 
4524         works = alloc_percpu(struct work_struct);
4525         if (!works)
4526                 return -ENOMEM;
4527 
4528         cpus_read_lock();
4529 
4530         for_each_online_cpu(cpu) {
4531                 struct work_struct *work = per_cpu_ptr(works, cpu);
4532 
4533                 INIT_WORK(work, func);
4534                 schedule_work_on(cpu, work);
4535         }
4536 
4537         for_each_online_cpu(cpu)
4538                 flush_work(per_cpu_ptr(works, cpu));
4539 
4540         cpus_read_unlock();
4541         free_percpu(works);
4542         return 0;
4543 }
4544 
4545 /**
4546  * execute_in_process_context - reliably execute the routine with user context
4547  * @fn:         the function to execute
4548  * @ew:         guaranteed storage for the execute work structure (must
4549  *              be available when the work executes)
4550  *
4551  * Executes the function immediately if process context is available,
4552  * otherwise schedules the function for delayed execution.
4553  *
4554  * Return:      0 - function was executed
4555  *              1 - function was scheduled for execution
4556  */
4557 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
4558 {
4559         if (!in_interrupt()) {
4560                 fn(&ew->work);
4561                 return 0;
4562         }
4563 
4564         INIT_WORK(&ew->work, fn);
4565         schedule_work(&ew->work);
4566 
4567         return 1;
4568 }
4569 EXPORT_SYMBOL_GPL(execute_in_process_context);
4570 
4571 /**
4572  * free_workqueue_attrs - free a workqueue_attrs
4573  * @attrs: workqueue_attrs to free
4574  *
4575  * Undo alloc_workqueue_attrs().
4576  */
4577 void free_workqueue_attrs(struct workqueue_attrs *attrs)
4578 {
4579         if (attrs) {
4580                 free_cpumask_var(attrs->cpumask);
4581                 free_cpumask_var(attrs->__pod_cpumask);
4582                 kfree(attrs);
4583         }
4584 }
4585 
4586 /**
4587  * alloc_workqueue_attrs - allocate a workqueue_attrs
4588  *
4589  * Allocate a new workqueue_attrs, initialize with default settings and
4590  * return it.
4591  *
4592  * Return: The allocated new workqueue_attr on success. %NULL on failure.
4593  */
4594 struct workqueue_attrs *alloc_workqueue_attrs(void)
4595 {
4596         struct workqueue_attrs *attrs;
4597 
4598         attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
4599         if (!attrs)
4600                 goto fail;
4601         if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
4602                 goto fail;
4603         if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL))
4604                 goto fail;
4605 
4606         cpumask_copy(attrs->cpumask, cpu_possible_mask);
4607         attrs->affn_scope = WQ_AFFN_DFL;
4608         return attrs;
4609 fail:
4610         free_workqueue_attrs(attrs);
4611         return NULL;
4612 }
4613 
4614 static void copy_workqueue_attrs(struct workqueue_attrs *to,
4615                                  const struct workqueue_attrs *from)
4616 {
4617         to->nice = from->nice;
4618         cpumask_copy(to->cpumask, from->cpumask);
4619         cpumask_copy(to->__pod_cpumask, from->__pod_cpumask);
4620         to->affn_strict = from->affn_strict;
4621 
4622         /*
4623          * Unlike hash and equality test, copying shouldn't ignore wq-only
4624          * fields as copying is used for both pool and wq attrs. Instead,
4625          * get_unbound_pool() explicitly clears the fields.
4626          */
4627         to->affn_scope = from->affn_scope;
4628         to->ordered = from->ordered;
4629 }
4630 
4631 /*
4632  * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the
4633  * comments in 'struct workqueue_attrs' definition.
4634  */
4635 static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs)
4636 {
4637         attrs->affn_scope = WQ_AFFN_NR_TYPES;
4638         attrs->ordered = false;
4639         if (attrs->affn_strict)
4640                 cpumask_copy(attrs->cpumask, cpu_possible_mask);
4641 }
4642 
4643 /* hash value of the content of @attr */
4644 static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
4645 {
4646         u32 hash = 0;
4647 
4648         hash = jhash_1word(attrs->nice, hash);
4649         hash = jhash_1word(attrs->affn_strict, hash);
4650         hash = jhash(cpumask_bits(attrs->__pod_cpumask),
4651                      BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
4652         if (!attrs->affn_strict)
4653                 hash = jhash(cpumask_bits(attrs->cpumask),
4654                              BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
4655         return hash;
4656 }
4657 
4658 /* content equality test */
4659 static bool wqattrs_equal(const struct workqueue_attrs *a,
4660                           const struct workqueue_attrs *b)
4661 {
4662         if (a->nice != b->nice)
4663                 return false;
4664         if (a->affn_strict != b->affn_strict)
4665                 return false;
4666         if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask))
4667                 return false;
4668         if (!a->affn_strict && !cpumask_equal(a->cpumask, b->cpumask))
4669                 return false;
4670         return true;
4671 }
4672 
4673 /* Update @attrs with actually available CPUs */
4674 static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs,
4675                                       const cpumask_t *unbound_cpumask)
4676 {
4677         /*
4678          * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If
4679          * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to
4680          * @unbound_cpumask.
4681          */
4682         cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask);
4683         if (unlikely(cpumask_empty(attrs->cpumask)))
4684                 cpumask_copy(attrs->cpumask, unbound_cpumask);
4685 }
4686 
4687 /* find wq_pod_type to use for @attrs */
4688 static const struct wq_pod_type *
4689 wqattrs_pod_type(const struct workqueue_attrs *attrs)
4690 {
4691         enum wq_affn_scope scope;
4692         struct wq_pod_type *pt;
4693 
4694         /* to synchronize access to wq_affn_dfl */
4695         lockdep_assert_held(&wq_pool_mutex);
4696 
4697         if (attrs->affn_scope == WQ_AFFN_DFL)
4698                 scope = wq_affn_dfl;
4699         else
4700                 scope = attrs->affn_scope;
4701 
4702         pt = &wq_pod_types[scope];
4703 
4704         if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) &&
4705             likely(pt->nr_pods))
4706                 return pt;
4707 
4708         /*
4709          * Before workqueue_init_topology(), only SYSTEM is available which is
4710          * initialized in workqueue_init_early().
4711          */
4712         pt = &wq_pod_types[WQ_AFFN_SYSTEM];
4713         BUG_ON(!pt->nr_pods);
4714         return pt;
4715 }
4716 
4717 /**
4718  * init_worker_pool - initialize a newly zalloc'd worker_pool
4719  * @pool: worker_pool to initialize
4720  *
4721  * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
4722  *
4723  * Return: 0 on success, -errno on failure.  Even on failure, all fields
4724  * inside @pool proper are initialized and put_unbound_pool() can be called
4725  * on @pool safely to release it.
4726  */
4727 static int init_worker_pool(struct worker_pool *pool)
4728 {
4729         raw_spin_lock_init(&pool->lock);
4730         pool->id = -1;
4731         pool->cpu = -1;
4732         pool->node = NUMA_NO_NODE;
4733         pool->flags |= POOL_DISASSOCIATED;
4734         pool->watchdog_ts = jiffies;
4735         INIT_LIST_HEAD(&pool->worklist);
4736         INIT_LIST_HEAD(&pool->idle_list);
4737         hash_init(pool->busy_hash);
4738 
4739         timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
4740         INIT_WORK(&pool->idle_cull_work, idle_cull_fn);
4741 
4742         timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
4743 
4744         INIT_LIST_HEAD(&pool->workers);
4745 
4746         ida_init(&pool->worker_ida);
4747         INIT_HLIST_NODE(&pool->hash_node);
4748         pool->refcnt = 1;
4749 
4750         /* shouldn't fail above this point */
4751         pool->attrs = alloc_workqueue_attrs();
4752         if (!pool->attrs)
4753                 return -ENOMEM;
4754 
4755         wqattrs_clear_for_pool(pool->attrs);
4756 
4757         return 0;
4758 }
4759 
4760 #ifdef CONFIG_LOCKDEP
4761 static void wq_init_lockdep(struct workqueue_struct *wq)
4762 {
4763         char *lock_name;
4764 
4765         lockdep_register_key(&wq->key);
4766         lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
4767         if (!lock_name)
4768                 lock_name = wq->name;
4769 
4770         wq->lock_name = lock_name;
4771         lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
4772 }
4773 
4774 static void wq_unregister_lockdep(struct workqueue_struct *wq)
4775 {
4776         lockdep_unregister_key(&wq->key);
4777 }
4778 
4779 static void wq_free_lockdep(struct workqueue_struct *wq)
4780 {
4781         if (wq->lock_name != wq->name)
4782                 kfree(wq->lock_name);
4783 }
4784 #else
4785 static void wq_init_lockdep(struct workqueue_struct *wq)
4786 {
4787 }
4788 
4789 static void wq_unregister_lockdep(struct workqueue_struct *wq)
4790 {
4791 }
4792 
4793 static void wq_free_lockdep(struct workqueue_struct *wq)
4794 {
4795 }
4796 #endif
4797 
4798 static void free_node_nr_active(struct wq_node_nr_active **nna_ar)
4799 {
4800         int node;
4801 
4802         for_each_node(node) {
4803                 kfree(nna_ar[node]);
4804                 nna_ar[node] = NULL;
4805         }
4806 
4807         kfree(nna_ar[nr_node_ids]);
4808         nna_ar[nr_node_ids] = NULL;
4809 }
4810 
4811 static void init_node_nr_active(struct wq_node_nr_active *nna)
4812 {
4813         nna->max = WQ_DFL_MIN_ACTIVE;
4814         atomic_set(&nna->nr, 0);
4815         raw_spin_lock_init(&nna->lock);
4816         INIT_LIST_HEAD(&nna->pending_pwqs);
4817 }
4818 
4819 /*
4820  * Each node's nr_active counter will be accessed mostly from its own node and
4821  * should be allocated in the node.
4822  */
4823 static int alloc_node_nr_active(struct wq_node_nr_active **nna_ar)
4824 {
4825         struct wq_node_nr_active *nna;
4826         int node;
4827 
4828         for_each_node(node) {
4829                 nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, node);
4830                 if (!nna)
4831                         goto err_free;
4832                 init_node_nr_active(nna);
4833                 nna_ar[node] = nna;
4834         }
4835 
4836         /* [nr_node_ids] is used as the fallback */
4837         nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, NUMA_NO_NODE);
4838         if (!nna)
4839                 goto err_free;
4840         init_node_nr_active(nna);
4841         nna_ar[nr_node_ids] = nna;
4842 
4843         return 0;
4844 
4845 err_free:
4846         free_node_nr_active(nna_ar);
4847         return -ENOMEM;
4848 }
4849 
4850 static void rcu_free_wq(struct rcu_head *rcu)
4851 {
4852         struct workqueue_struct *wq =
4853                 container_of(rcu, struct workqueue_struct, rcu);
4854 
4855         if (wq->flags & WQ_UNBOUND)
4856                 free_node_nr_active(wq->node_nr_active);
4857 
4858         wq_free_lockdep(wq);
4859         free_percpu(wq->cpu_pwq);
4860         free_workqueue_attrs(wq->unbound_attrs);
4861         kfree(wq);
4862 }
4863 
4864 static void rcu_free_pool(struct rcu_head *rcu)
4865 {
4866         struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
4867 
4868         ida_destroy(&pool->worker_ida);
4869         free_workqueue_attrs(pool->attrs);
4870         kfree(pool);
4871 }
4872 
4873 /**
4874  * put_unbound_pool - put a worker_pool
4875  * @pool: worker_pool to put
4876  *
4877  * Put @pool.  If its refcnt reaches zero, it gets destroyed in RCU
4878  * safe manner.  get_unbound_pool() calls this function on its failure path
4879  * and this function should be able to release pools which went through,
4880  * successfully or not, init_worker_pool().
4881  *
4882  * Should be called with wq_pool_mutex held.
4883  */
4884 static void put_unbound_pool(struct worker_pool *pool)
4885 {
4886         struct worker *worker;
4887         LIST_HEAD(cull_list);
4888 
4889         lockdep_assert_held(&wq_pool_mutex);
4890 
4891         if (--pool->refcnt)
4892                 return;
4893 
4894         /* sanity checks */
4895         if (WARN_ON(!(pool->cpu < 0)) ||
4896             WARN_ON(!list_empty(&pool->worklist)))
4897                 return;
4898 
4899         /* release id and unhash */
4900         if (pool->id >= 0)
4901                 idr_remove(&worker_pool_idr, pool->id);
4902         hash_del(&pool->hash_node);
4903 
4904         /*
4905          * Become the manager and destroy all workers.  This prevents
4906          * @pool's workers from blocking on attach_mutex.  We're the last
4907          * manager and @pool gets freed with the flag set.
4908          *
4909          * Having a concurrent manager is quite unlikely to happen as we can
4910          * only get here with
4911          *   pwq->refcnt == pool->refcnt == 0
4912          * which implies no work queued to the pool, which implies no worker can
4913          * become the manager. However a worker could have taken the role of
4914          * manager before the refcnts dropped to 0, since maybe_create_worker()
4915          * drops pool->lock
4916          */
4917         while (true) {
4918                 rcuwait_wait_event(&manager_wait,
4919                                    !(pool->flags & POOL_MANAGER_ACTIVE),
4920                                    TASK_UNINTERRUPTIBLE);
4921 
4922                 mutex_lock(&wq_pool_attach_mutex);
4923                 raw_spin_lock_irq(&pool->lock);
4924                 if (!(pool->flags & POOL_MANAGER_ACTIVE)) {
4925                         pool->flags |= POOL_MANAGER_ACTIVE;
4926                         break;
4927                 }
4928                 raw_spin_unlock_irq(&pool->lock);
4929                 mutex_unlock(&wq_pool_attach_mutex);
4930         }
4931 
4932         while ((worker = first_idle_worker(pool)))
4933                 set_worker_dying(worker, &cull_list);
4934         WARN_ON(pool->nr_workers || pool->nr_idle);
4935         raw_spin_unlock_irq(&pool->lock);
4936 
4937         detach_dying_workers(&cull_list);
4938 
4939         mutex_unlock(&wq_pool_attach_mutex);
4940 
4941         reap_dying_workers(&cull_list);
4942 
4943         /* shut down the timers */
4944         del_timer_sync(&pool->idle_timer);
4945         cancel_work_sync(&pool->idle_cull_work);
4946         del_timer_sync(&pool->mayday_timer);
4947 
4948         /* RCU protected to allow dereferences from get_work_pool() */
4949         call_rcu(&pool->rcu, rcu_free_pool);
4950 }
4951 
4952 /**
4953  * get_unbound_pool - get a worker_pool with the specified attributes
4954  * @attrs: the attributes of the worker_pool to get
4955  *
4956  * Obtain a worker_pool which has the same attributes as @attrs, bump the
4957  * reference count and return it.  If there already is a matching
4958  * worker_pool, it will be used; otherwise, this function attempts to
4959  * create a new one.
4960  *
4961  * Should be called with wq_pool_mutex held.
4962  *
4963  * Return: On success, a worker_pool with the same attributes as @attrs.
4964  * On failure, %NULL.
4965  */
4966 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
4967 {
4968         struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA];
4969         u32 hash = wqattrs_hash(attrs);
4970         struct worker_pool *pool;
4971         int pod, node = NUMA_NO_NODE;
4972 
4973         lockdep_assert_held(&wq_pool_mutex);
4974 
4975         /* do we already have a matching pool? */
4976         hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
4977                 if (wqattrs_equal(pool->attrs, attrs)) {
4978                         pool->refcnt++;
4979                         return pool;
4980                 }
4981         }
4982 
4983         /* If __pod_cpumask is contained inside a NUMA pod, that's our node */
4984         for (pod = 0; pod < pt->nr_pods; pod++) {
4985                 if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) {
4986                         node = pt->pod_node[pod];
4987                         break;
4988                 }
4989         }
4990 
4991         /* nope, create a new one */
4992         pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node);
4993         if (!pool || init_worker_pool(pool) < 0)
4994                 goto fail;
4995 
4996         pool->node = node;
4997         copy_workqueue_attrs(pool->attrs, attrs);
4998         wqattrs_clear_for_pool(pool->attrs);
4999 
5000         if (worker_pool_assign_id(pool) < 0)
5001                 goto fail;
5002 
5003         /* create and start the initial worker */
5004         if (wq_online && !create_worker(pool))
5005                 goto fail;
5006 
5007         /* install */
5008         hash_add(unbound_pool_hash, &pool->hash_node, hash);
5009 
5010         return pool;
5011 fail:
5012         if (pool)
5013                 put_unbound_pool(pool);
5014         return NULL;
5015 }
5016 
5017 /*
5018  * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero
5019  * refcnt and needs to be destroyed.
5020  */
5021 static void pwq_release_workfn(struct kthread_work *work)
5022 {
5023         struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
5024                                                   release_work);
5025         struct workqueue_struct *wq = pwq->wq;
5026         struct worker_pool *pool = pwq->pool;
5027         bool is_last = false;
5028 
5029         /*
5030          * When @pwq is not linked, it doesn't hold any reference to the
5031          * @wq, and @wq is invalid to access.
5032          */
5033         if (!list_empty(&pwq->pwqs_node)) {
5034                 mutex_lock(&wq->mutex);
5035                 list_del_rcu(&pwq->pwqs_node);
5036                 is_last = list_empty(&wq->pwqs);
5037 
5038                 /*
5039                  * For ordered workqueue with a plugged dfl_pwq, restart it now.
5040                  */
5041                 if (!is_last && (wq->flags & __WQ_ORDERED))
5042                         unplug_oldest_pwq(wq);
5043 
5044                 mutex_unlock(&wq->mutex);
5045         }
5046 
5047         if (wq->flags & WQ_UNBOUND) {
5048                 mutex_lock(&wq_pool_mutex);
5049                 put_unbound_pool(pool);
5050                 mutex_unlock(&wq_pool_mutex);
5051         }
5052 
5053         if (!list_empty(&pwq->pending_node)) {
5054                 struct wq_node_nr_active *nna =
5055                         wq_node_nr_active(pwq->wq, pwq->pool->node);
5056 
5057                 raw_spin_lock_irq(&nna->lock);
5058                 list_del_init(&pwq->pending_node);
5059                 raw_spin_unlock_irq(&nna->lock);
5060         }
5061 
5062         kfree_rcu(pwq, rcu);
5063 
5064         /*
5065          * If we're the last pwq going away, @wq is already dead and no one
5066          * is gonna access it anymore.  Schedule RCU free.
5067          */
5068         if (is_last) {
5069                 wq_unregister_lockdep(wq);
5070                 call_rcu(&wq->rcu, rcu_free_wq);
5071         }
5072 }
5073 
5074 /* initialize newly allocated @pwq which is associated with @wq and @pool */
5075 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
5076                      struct worker_pool *pool)
5077 {
5078         BUG_ON((unsigned long)pwq & ~WORK_STRUCT_PWQ_MASK);
5079 
5080         memset(pwq, 0, sizeof(*pwq));
5081 
5082         pwq->pool = pool;
5083         pwq->wq = wq;
5084         pwq->flush_color = -1;
5085         pwq->refcnt = 1;
5086         INIT_LIST_HEAD(&pwq->inactive_works);
5087         INIT_LIST_HEAD(&pwq->pending_node);
5088         INIT_LIST_HEAD(&pwq->pwqs_node);
5089         INIT_LIST_HEAD(&pwq->mayday_node);
5090         kthread_init_work(&pwq->release_work, pwq_release_workfn);
5091 }
5092 
5093 /* sync @pwq with the current state of its associated wq and link it */
5094 static void link_pwq(struct pool_workqueue *pwq)
5095 {
5096         struct workqueue_struct *wq = pwq->wq;
5097 
5098         lockdep_assert_held(&wq->mutex);
5099 
5100         /* may be called multiple times, ignore if already linked */
5101         if (!list_empty(&pwq->pwqs_node))
5102                 return;
5103 
5104         /* set the matching work_color */
5105         pwq->work_color = wq->work_color;
5106 
5107         /* link in @pwq */
5108         list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs);
5109 }
5110 
5111 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
5112 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
5113                                         const struct workqueue_attrs *attrs)
5114 {
5115         struct worker_pool *pool;
5116         struct pool_workqueue *pwq;
5117 
5118         lockdep_assert_held(&wq_pool_mutex);
5119 
5120         pool = get_unbound_pool(attrs);
5121         if (!pool)
5122                 return NULL;
5123 
5124         pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
5125         if (!pwq) {
5126                 put_unbound_pool(pool);
5127                 return NULL;
5128         }
5129 
5130         init_pwq(pwq, wq, pool);
5131         return pwq;
5132 }
5133 
5134 static void apply_wqattrs_lock(void)
5135 {
5136         mutex_lock(&wq_pool_mutex);
5137 }
5138 
5139 static void apply_wqattrs_unlock(void)
5140 {
5141         mutex_unlock(&wq_pool_mutex);
5142 }
5143 
5144 /**
5145  * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
5146  * @attrs: the wq_attrs of the default pwq of the target workqueue
5147  * @cpu: the target CPU
5148  *
5149  * Calculate the cpumask a workqueue with @attrs should use on @pod.
5150  * The result is stored in @attrs->__pod_cpumask.
5151  *
5152  * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled
5153  * and @pod has online CPUs requested by @attrs, the returned cpumask is the
5154  * intersection of the possible CPUs of @pod and @attrs->cpumask.
5155  *
5156  * The caller is responsible for ensuring that the cpumask of @pod stays stable.
5157  */
5158 static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu)
5159 {
5160         const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
5161         int pod = pt->cpu_pod[cpu];
5162 
5163         /* calculate possible CPUs in @pod that @attrs wants */
5164         cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask);
5165         /* does @pod have any online CPUs @attrs wants? */
5166         if (!cpumask_intersects(attrs->__pod_cpumask, wq_online_cpumask)) {
5167                 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask);
5168                 return;
5169         }
5170 }
5171 
5172 /* install @pwq into @wq and return the old pwq, @cpu < 0 for dfl_pwq */
5173 static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
5174                                         int cpu, struct pool_workqueue *pwq)
5175 {
5176         struct pool_workqueue __rcu **slot = unbound_pwq_slot(wq, cpu);
5177         struct pool_workqueue *old_pwq;
5178 
5179         lockdep_assert_held(&wq_pool_mutex);
5180         lockdep_assert_held(&wq->mutex);
5181 
5182         /* link_pwq() can handle duplicate calls */
5183         link_pwq(pwq);
5184 
5185         old_pwq = rcu_access_pointer(*slot);
5186         rcu_assign_pointer(*slot, pwq);
5187         return old_pwq;
5188 }
5189 
5190 /* context to store the prepared attrs & pwqs before applying */
5191 struct apply_wqattrs_ctx {
5192         struct workqueue_struct *wq;            /* target workqueue */
5193         struct workqueue_attrs  *attrs;         /* attrs to apply */
5194         struct list_head        list;           /* queued for batching commit */
5195         struct pool_workqueue   *dfl_pwq;
5196         struct pool_workqueue   *pwq_tbl[];
5197 };
5198 
5199 /* free the resources after success or abort */
5200 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
5201 {
5202         if (ctx) {
5203                 int cpu;
5204 
5205                 for_each_possible_cpu(cpu)
5206                         put_pwq_unlocked(ctx->pwq_tbl[cpu]);
5207                 put_pwq_unlocked(ctx->dfl_pwq);
5208 
5209                 free_workqueue_attrs(ctx->attrs);
5210 
5211                 kfree(ctx);
5212         }
5213 }
5214 
5215 /* allocate the attrs and pwqs for later installation */
5216 static struct apply_wqattrs_ctx *
5217 apply_wqattrs_prepare(struct workqueue_struct *wq,
5218                       const struct workqueue_attrs *attrs,
5219                       const cpumask_var_t unbound_cpumask)
5220 {
5221         struct apply_wqattrs_ctx *ctx;
5222         struct workqueue_attrs *new_attrs;
5223         int cpu;
5224 
5225         lockdep_assert_held(&wq_pool_mutex);
5226 
5227         if (WARN_ON(attrs->affn_scope < 0 ||
5228                     attrs->affn_scope >= WQ_AFFN_NR_TYPES))
5229                 return ERR_PTR(-EINVAL);
5230 
5231         ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL);
5232 
5233         new_attrs = alloc_workqueue_attrs();
5234         if (!ctx || !new_attrs)
5235                 goto out_free;
5236 
5237         /*
5238          * If something goes wrong during CPU up/down, we'll fall back to
5239          * the default pwq covering whole @attrs->cpumask.  Always create
5240          * it even if we don't use it immediately.
5241          */
5242         copy_workqueue_attrs(new_attrs, attrs);
5243         wqattrs_actualize_cpumask(new_attrs, unbound_cpumask);
5244         cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
5245         ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
5246         if (!ctx->dfl_pwq)
5247                 goto out_free;
5248 
5249         for_each_possible_cpu(cpu) {
5250                 if (new_attrs->ordered) {
5251                         ctx->dfl_pwq->refcnt++;
5252                         ctx->pwq_tbl[cpu] = ctx->dfl_pwq;
5253                 } else {
5254                         wq_calc_pod_cpumask(new_attrs, cpu);
5255                         ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs);
5256                         if (!ctx->pwq_tbl[cpu])
5257                                 goto out_free;
5258                 }
5259         }
5260 
5261         /* save the user configured attrs and sanitize it. */
5262         copy_workqueue_attrs(new_attrs, attrs);
5263         cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
5264         cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
5265         ctx->attrs = new_attrs;
5266 
5267         /*
5268          * For initialized ordered workqueues, there should only be one pwq
5269          * (dfl_pwq). Set the plugged flag of ctx->dfl_pwq to suspend execution
5270          * of newly queued work items until execution of older work items in
5271          * the old pwq's have completed.
5272          */
5273         if ((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))
5274                 ctx->dfl_pwq->plugged = true;
5275 
5276         ctx->wq = wq;
5277         return ctx;
5278 
5279 out_free:
5280         free_workqueue_attrs(new_attrs);
5281         apply_wqattrs_cleanup(ctx);
5282         return ERR_PTR(-ENOMEM);
5283 }
5284 
5285 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
5286 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
5287 {
5288         int cpu;
5289 
5290         /* all pwqs have been created successfully, let's install'em */
5291         mutex_lock(&ctx->wq->mutex);
5292 
5293         copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
5294 
5295         /* save the previous pwqs and install the new ones */
5296         for_each_possible_cpu(cpu)
5297                 ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu,
5298                                                         ctx->pwq_tbl[cpu]);
5299         ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq);
5300 
5301         /* update node_nr_active->max */
5302         wq_update_node_max_active(ctx->wq, -1);
5303 
5304         /* rescuer needs to respect wq cpumask changes */
5305         if (ctx->wq->rescuer)
5306                 set_cpus_allowed_ptr(ctx->wq->rescuer->task,
5307                                      unbound_effective_cpumask(ctx->wq));
5308 
5309         mutex_unlock(&ctx->wq->mutex);
5310 }
5311 
5312 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
5313                                         const struct workqueue_attrs *attrs)
5314 {
5315         struct apply_wqattrs_ctx *ctx;
5316 
5317         /* only unbound workqueues can change attributes */
5318         if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
5319                 return -EINVAL;
5320 
5321         ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
5322         if (IS_ERR(ctx))
5323                 return PTR_ERR(ctx);
5324 
5325         /* the ctx has been prepared successfully, let's commit it */
5326         apply_wqattrs_commit(ctx);
5327         apply_wqattrs_cleanup(ctx);
5328 
5329         return 0;
5330 }
5331 
5332 /**
5333  * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
5334  * @wq: the target workqueue
5335  * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
5336  *
5337  * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps
5338  * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that
5339  * work items are affine to the pod it was issued on. Older pwqs are released as
5340  * in-flight work items finish. Note that a work item which repeatedly requeues
5341  * itself back-to-back will stay on its current pwq.
5342  *
5343  * Performs GFP_KERNEL allocations.
5344  *
5345  * Return: 0 on success and -errno on failure.
5346  */
5347 int apply_workqueue_attrs(struct workqueue_struct *wq,
5348                           const struct workqueue_attrs *attrs)
5349 {
5350         int ret;
5351 
5352         mutex_lock(&wq_pool_mutex);
5353         ret = apply_workqueue_attrs_locked(wq, attrs);
5354         mutex_unlock(&wq_pool_mutex);
5355 
5356         return ret;
5357 }
5358 
5359 /**
5360  * unbound_wq_update_pwq - update a pwq slot for CPU hot[un]plug
5361  * @wq: the target workqueue
5362  * @cpu: the CPU to update the pwq slot for
5363  *
5364  * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
5365  * %CPU_DOWN_FAILED.  @cpu is in the same pod of the CPU being hot[un]plugged.
5366  *
5367  *
5368  * If pod affinity can't be adjusted due to memory allocation failure, it falls
5369  * back to @wq->dfl_pwq which may not be optimal but is always correct.
5370  *
5371  * Note that when the last allowed CPU of a pod goes offline for a workqueue
5372  * with a cpumask spanning multiple pods, the workers which were already
5373  * executing the work items for the workqueue will lose their CPU affinity and
5374  * may execute on any CPU. This is similar to how per-cpu workqueues behave on
5375  * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
5376  * responsibility to flush the work item from CPU_DOWN_PREPARE.
5377  */
5378 static void unbound_wq_update_pwq(struct workqueue_struct *wq, int cpu)
5379 {
5380         struct pool_workqueue *old_pwq = NULL, *pwq;
5381         struct workqueue_attrs *target_attrs;
5382 
5383         lockdep_assert_held(&wq_pool_mutex);
5384 
5385         if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered)
5386                 return;
5387 
5388         /*
5389          * We don't wanna alloc/free wq_attrs for each wq for each CPU.
5390          * Let's use a preallocated one.  The following buf is protected by
5391          * CPU hotplug exclusion.
5392          */
5393         target_attrs = unbound_wq_update_pwq_attrs_buf;
5394 
5395         copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
5396         wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
5397 
5398         /* nothing to do if the target cpumask matches the current pwq */
5399         wq_calc_pod_cpumask(target_attrs, cpu);
5400         if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs))
5401                 return;
5402 
5403         /* create a new pwq */
5404         pwq = alloc_unbound_pwq(wq, target_attrs);
5405         if (!pwq) {
5406                 pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n",
5407                         wq->name);
5408                 goto use_dfl_pwq;
5409         }
5410 
5411         /* Install the new pwq. */
5412         mutex_lock(&wq->mutex);
5413         old_pwq = install_unbound_pwq(wq, cpu, pwq);
5414         goto out_unlock;
5415 
5416 use_dfl_pwq:
5417         mutex_lock(&wq->mutex);
5418         pwq = unbound_pwq(wq, -1);
5419         raw_spin_lock_irq(&pwq->pool->lock);
5420         get_pwq(pwq);
5421         raw_spin_unlock_irq(&pwq->pool->lock);
5422         old_pwq = install_unbound_pwq(wq, cpu, pwq);
5423 out_unlock:
5424         mutex_unlock(&wq->mutex);
5425         put_pwq_unlocked(old_pwq);
5426 }
5427 
5428 static int alloc_and_link_pwqs(struct workqueue_struct *wq)
5429 {
5430         bool highpri = wq->flags & WQ_HIGHPRI;
5431         int cpu, ret;
5432 
5433         lockdep_assert_held(&wq_pool_mutex);
5434 
5435         wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
5436         if (!wq->cpu_pwq)
5437                 goto enomem;
5438 
5439         if (!(wq->flags & WQ_UNBOUND)) {
5440                 struct worker_pool __percpu *pools;
5441 
5442                 if (wq->flags & WQ_BH)
5443                         pools = bh_worker_pools;
5444                 else
5445                         pools = cpu_worker_pools;
5446 
5447                 for_each_possible_cpu(cpu) {
5448                         struct pool_workqueue **pwq_p;
5449                         struct worker_pool *pool;
5450 
5451                         pool = &(per_cpu_ptr(pools, cpu)[highpri]);
5452                         pwq_p = per_cpu_ptr(wq->cpu_pwq, cpu);
5453 
5454                         *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL,
5455                                                        pool->node);
5456                         if (!*pwq_p)
5457                                 goto enomem;
5458 
5459                         init_pwq(*pwq_p, wq, pool);
5460 
5461                         mutex_lock(&wq->mutex);
5462                         link_pwq(*pwq_p);
5463                         mutex_unlock(&wq->mutex);
5464                 }
5465                 return 0;
5466         }
5467 
5468         if (wq->flags & __WQ_ORDERED) {
5469                 struct pool_workqueue *dfl_pwq;
5470 
5471                 ret = apply_workqueue_attrs_locked(wq, ordered_wq_attrs[highpri]);
5472                 /* there should only be single pwq for ordering guarantee */
5473                 dfl_pwq = rcu_access_pointer(wq->dfl_pwq);
5474                 WARN(!ret && (wq->pwqs.next != &dfl_pwq->pwqs_node ||
5475                               wq->pwqs.prev != &dfl_pwq->pwqs_node),
5476                      "ordering guarantee broken for workqueue %s\n", wq->name);
5477         } else {
5478                 ret = apply_workqueue_attrs_locked(wq, unbound_std_wq_attrs[highpri]);
5479         }
5480 
5481         return ret;
5482 
5483 enomem:
5484         if (wq->cpu_pwq) {
5485                 for_each_possible_cpu(cpu) {
5486                         struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
5487 
5488                         if (pwq)
5489                                 kmem_cache_free(pwq_cache, pwq);
5490                 }
5491                 free_percpu(wq->cpu_pwq);
5492                 wq->cpu_pwq = NULL;
5493         }
5494         return -ENOMEM;
5495 }
5496 
5497 static int wq_clamp_max_active(int max_active, unsigned int flags,
5498                                const char *name)
5499 {
5500         if (max_active < 1 || max_active > WQ_MAX_ACTIVE)
5501                 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
5502                         max_active, name, 1, WQ_MAX_ACTIVE);
5503 
5504         return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
5505 }
5506 
5507 /*
5508  * Workqueues which may be used during memory reclaim should have a rescuer
5509  * to guarantee forward progress.
5510  */
5511 static int init_rescuer(struct workqueue_struct *wq)
5512 {
5513         struct worker *rescuer;
5514         char id_buf[WORKER_ID_LEN];
5515         int ret;
5516 
5517         lockdep_assert_held(&wq_pool_mutex);
5518 
5519         if (!(wq->flags & WQ_MEM_RECLAIM))
5520                 return 0;
5521 
5522         rescuer = alloc_worker(NUMA_NO_NODE);
5523         if (!rescuer) {
5524                 pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n",
5525                        wq->name);
5526                 return -ENOMEM;
5527         }
5528 
5529         rescuer->rescue_wq = wq;
5530         format_worker_id(id_buf, sizeof(id_buf), rescuer, NULL);
5531 
5532         rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", id_buf);
5533         if (IS_ERR(rescuer->task)) {
5534                 ret = PTR_ERR(rescuer->task);
5535                 pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
5536                        wq->name, ERR_PTR(ret));
5537                 kfree(rescuer);
5538                 return ret;
5539         }
5540 
5541         wq->rescuer = rescuer;
5542         if (wq->flags & WQ_UNBOUND)
5543                 kthread_bind_mask(rescuer->task, unbound_effective_cpumask(wq));
5544         else
5545                 kthread_bind_mask(rescuer->task, cpu_possible_mask);
5546         wake_up_process(rescuer->task);
5547 
5548         return 0;
5549 }
5550 
5551 /**
5552  * wq_adjust_max_active - update a wq's max_active to the current setting
5553  * @wq: target workqueue
5554  *
5555  * If @wq isn't freezing, set @wq->max_active to the saved_max_active and
5556  * activate inactive work items accordingly. If @wq is freezing, clear
5557  * @wq->max_active to zero.
5558  */
5559 static void wq_adjust_max_active(struct workqueue_struct *wq)
5560 {
5561         bool activated;
5562         int new_max, new_min;
5563 
5564         lockdep_assert_held(&wq->mutex);
5565 
5566         if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) {
5567                 new_max = 0;
5568                 new_min = 0;
5569         } else {
5570                 new_max = wq->saved_max_active;
5571                 new_min = wq->saved_min_active;
5572         }
5573 
5574         if (wq->max_active == new_max && wq->min_active == new_min)
5575                 return;
5576 
5577         /*
5578          * Update @wq->max/min_active and then kick inactive work items if more
5579          * active work items are allowed. This doesn't break work item ordering
5580          * because new work items are always queued behind existing inactive
5581          * work items if there are any.
5582          */
5583         WRITE_ONCE(wq->max_active, new_max);
5584         WRITE_ONCE(wq->min_active, new_min);
5585 
5586         if (wq->flags & WQ_UNBOUND)
5587                 wq_update_node_max_active(wq, -1);
5588 
5589         if (new_max == 0)
5590                 return;
5591 
5592         /*
5593          * Round-robin through pwq's activating the first inactive work item
5594          * until max_active is filled.
5595          */
5596         do {
5597                 struct pool_workqueue *pwq;
5598 
5599                 activated = false;
5600                 for_each_pwq(pwq, wq) {
5601                         unsigned long irq_flags;
5602 
5603                         /* can be called during early boot w/ irq disabled */
5604                         raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags);
5605                         if (pwq_activate_first_inactive(pwq, true)) {
5606                                 activated = true;
5607                                 kick_pool(pwq->pool);
5608                         }
5609                         raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags);
5610                 }
5611         } while (activated);
5612 }
5613 
5614 __printf(1, 4)
5615 struct workqueue_struct *alloc_workqueue(const char *fmt,
5616                                          unsigned int flags,
5617                                          int max_active, ...)
5618 {
5619         va_list args;
5620         struct workqueue_struct *wq;
5621         size_t wq_size;
5622         int name_len;
5623 
5624         if (flags & WQ_BH) {
5625                 if (WARN_ON_ONCE(flags & ~__WQ_BH_ALLOWS))
5626                         return NULL;
5627                 if (WARN_ON_ONCE(max_active))
5628                         return NULL;
5629         }
5630 
5631         /* see the comment above the definition of WQ_POWER_EFFICIENT */
5632         if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
5633                 flags |= WQ_UNBOUND;
5634 
5635         /* allocate wq and format name */
5636         if (flags & WQ_UNBOUND)
5637                 wq_size = struct_size(wq, node_nr_active, nr_node_ids + 1);
5638         else
5639                 wq_size = sizeof(*wq);
5640 
5641         wq = kzalloc(wq_size, GFP_KERNEL);
5642         if (!wq)
5643                 return NULL;
5644 
5645         if (flags & WQ_UNBOUND) {
5646                 wq->unbound_attrs = alloc_workqueue_attrs();
5647                 if (!wq->unbound_attrs)
5648                         goto err_free_wq;
5649         }
5650 
5651         va_start(args, max_active);
5652         name_len = vsnprintf(wq->name, sizeof(wq->name), fmt, args);
5653         va_end(args);
5654 
5655         if (name_len >= WQ_NAME_LEN)
5656                 pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n",
5657                              wq->name);
5658 
5659         if (flags & WQ_BH) {
5660                 /*
5661                  * BH workqueues always share a single execution context per CPU
5662                  * and don't impose any max_active limit.
5663                  */
5664                 max_active = INT_MAX;
5665         } else {
5666                 max_active = max_active ?: WQ_DFL_ACTIVE;
5667                 max_active = wq_clamp_max_active(max_active, flags, wq->name);
5668         }
5669 
5670         /* init wq */
5671         wq->flags = flags;
5672         wq->max_active = max_active;
5673         wq->min_active = min(max_active, WQ_DFL_MIN_ACTIVE);
5674         wq->saved_max_active = wq->max_active;
5675         wq->saved_min_active = wq->min_active;
5676         mutex_init(&wq->mutex);
5677         atomic_set(&wq->nr_pwqs_to_flush, 0);
5678         INIT_LIST_HEAD(&wq->pwqs);
5679         INIT_LIST_HEAD(&wq->flusher_queue);
5680         INIT_LIST_HEAD(&wq->flusher_overflow);
5681         INIT_LIST_HEAD(&wq->maydays);
5682 
5683         wq_init_lockdep(wq);
5684         INIT_LIST_HEAD(&wq->list);
5685 
5686         if (flags & WQ_UNBOUND) {
5687                 if (alloc_node_nr_active(wq->node_nr_active) < 0)
5688                         goto err_unreg_lockdep;
5689         }
5690 
5691         /*
5692          * wq_pool_mutex protects the workqueues list, allocations of PWQs,
5693          * and the global freeze state.
5694          */
5695         apply_wqattrs_lock();
5696 
5697         if (alloc_and_link_pwqs(wq) < 0)
5698                 goto err_unlock_free_node_nr_active;
5699 
5700         mutex_lock(&wq->mutex);
5701         wq_adjust_max_active(wq);
5702         mutex_unlock(&wq->mutex);
5703 
5704         list_add_tail_rcu(&wq->list, &workqueues);
5705 
5706         if (wq_online && init_rescuer(wq) < 0)
5707                 goto err_unlock_destroy;
5708 
5709         apply_wqattrs_unlock();
5710 
5711         if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
5712                 goto err_destroy;
5713 
5714         return wq;
5715 
5716 err_unlock_free_node_nr_active:
5717         apply_wqattrs_unlock();
5718         /*
5719          * Failed alloc_and_link_pwqs() may leave pending pwq->release_work,
5720          * flushing the pwq_release_worker ensures that the pwq_release_workfn()
5721          * completes before calling kfree(wq).
5722          */
5723         if (wq->flags & WQ_UNBOUND) {
5724                 kthread_flush_worker(pwq_release_worker);
5725                 free_node_nr_active(wq->node_nr_active);
5726         }
5727 err_unreg_lockdep:
5728         wq_unregister_lockdep(wq);
5729         wq_free_lockdep(wq);
5730 err_free_wq:
5731         free_workqueue_attrs(wq->unbound_attrs);
5732         kfree(wq);
5733         return NULL;
5734 err_unlock_destroy:
5735         apply_wqattrs_unlock();
5736 err_destroy:
5737         destroy_workqueue(wq);
5738         return NULL;
5739 }
5740 EXPORT_SYMBOL_GPL(alloc_workqueue);
5741 
5742 static bool pwq_busy(struct pool_workqueue *pwq)
5743 {
5744         int i;
5745 
5746         for (i = 0; i < WORK_NR_COLORS; i++)
5747                 if (pwq->nr_in_flight[i])
5748                         return true;
5749 
5750         if ((pwq != rcu_access_pointer(pwq->wq->dfl_pwq)) && (pwq->refcnt > 1))
5751                 return true;
5752         if (!pwq_is_empty(pwq))
5753                 return true;
5754 
5755         return false;
5756 }
5757 
5758 /**
5759  * destroy_workqueue - safely terminate a workqueue
5760  * @wq: target workqueue
5761  *
5762  * Safely destroy a workqueue. All work currently pending will be done first.
5763  */
5764 void destroy_workqueue(struct workqueue_struct *wq)
5765 {
5766         struct pool_workqueue *pwq;
5767         int cpu;
5768 
5769         /*
5770          * Remove it from sysfs first so that sanity check failure doesn't
5771          * lead to sysfs name conflicts.
5772          */
5773         workqueue_sysfs_unregister(wq);
5774 
5775         /* mark the workqueue destruction is in progress */
5776         mutex_lock(&wq->mutex);
5777         wq->flags |= __WQ_DESTROYING;
5778         mutex_unlock(&wq->mutex);
5779 
5780         /* drain it before proceeding with destruction */
5781         drain_workqueue(wq);
5782 
5783         /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
5784         if (wq->rescuer) {
5785                 struct worker *rescuer = wq->rescuer;
5786 
5787                 /* this prevents new queueing */
5788                 raw_spin_lock_irq(&wq_mayday_lock);
5789                 wq->rescuer = NULL;
5790                 raw_spin_unlock_irq(&wq_mayday_lock);
5791 
5792                 /* rescuer will empty maydays list before exiting */
5793                 kthread_stop(rescuer->task);
5794                 kfree(rescuer);
5795         }
5796 
5797         /*
5798          * Sanity checks - grab all the locks so that we wait for all
5799          * in-flight operations which may do put_pwq().
5800          */
5801         mutex_lock(&wq_pool_mutex);
5802         mutex_lock(&wq->mutex);
5803         for_each_pwq(pwq, wq) {
5804                 raw_spin_lock_irq(&pwq->pool->lock);
5805                 if (WARN_ON(pwq_busy(pwq))) {
5806                         pr_warn("%s: %s has the following busy pwq\n",
5807                                 __func__, wq->name);
5808                         show_pwq(pwq);
5809                         raw_spin_unlock_irq(&pwq->pool->lock);
5810                         mutex_unlock(&wq->mutex);
5811                         mutex_unlock(&wq_pool_mutex);
5812                         show_one_workqueue(wq);
5813                         return;
5814                 }
5815                 raw_spin_unlock_irq(&pwq->pool->lock);
5816         }
5817         mutex_unlock(&wq->mutex);
5818 
5819         /*
5820          * wq list is used to freeze wq, remove from list after
5821          * flushing is complete in case freeze races us.
5822          */
5823         list_del_rcu(&wq->list);
5824         mutex_unlock(&wq_pool_mutex);
5825 
5826         /*
5827          * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq
5828          * to put the base refs. @wq will be auto-destroyed from the last
5829          * pwq_put. RCU read lock prevents @wq from going away from under us.
5830          */
5831         rcu_read_lock();
5832 
5833         for_each_possible_cpu(cpu) {
5834                 put_pwq_unlocked(unbound_pwq(wq, cpu));
5835                 RCU_INIT_POINTER(*unbound_pwq_slot(wq, cpu), NULL);
5836         }
5837 
5838         put_pwq_unlocked(unbound_pwq(wq, -1));
5839         RCU_INIT_POINTER(*unbound_pwq_slot(wq, -1), NULL);
5840 
5841         rcu_read_unlock();
5842 }
5843 EXPORT_SYMBOL_GPL(destroy_workqueue);
5844 
5845 /**
5846  * workqueue_set_max_active - adjust max_active of a workqueue
5847  * @wq: target workqueue
5848  * @max_active: new max_active value.
5849  *
5850  * Set max_active of @wq to @max_active. See the alloc_workqueue() function
5851  * comment.
5852  *
5853  * CONTEXT:
5854  * Don't call from IRQ context.
5855  */
5856 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
5857 {
5858         /* max_active doesn't mean anything for BH workqueues */
5859         if (WARN_ON(wq->flags & WQ_BH))
5860                 return;
5861         /* disallow meddling with max_active for ordered workqueues */
5862         if (WARN_ON(wq->flags & __WQ_ORDERED))
5863                 return;
5864 
5865         max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
5866 
5867         mutex_lock(&wq->mutex);
5868 
5869         wq->saved_max_active = max_active;
5870         if (wq->flags & WQ_UNBOUND)
5871                 wq->saved_min_active = min(wq->saved_min_active, max_active);
5872 
5873         wq_adjust_max_active(wq);
5874 
5875         mutex_unlock(&wq->mutex);
5876 }
5877 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
5878 
5879 /**
5880  * workqueue_set_min_active - adjust min_active of an unbound workqueue
5881  * @wq: target unbound workqueue
5882  * @min_active: new min_active value
5883  *
5884  * Set min_active of an unbound workqueue. Unlike other types of workqueues, an
5885  * unbound workqueue is not guaranteed to be able to process max_active
5886  * interdependent work items. Instead, an unbound workqueue is guaranteed to be
5887  * able to process min_active number of interdependent work items which is
5888  * %WQ_DFL_MIN_ACTIVE by default.
5889  *
5890  * Use this function to adjust the min_active value between 0 and the current
5891  * max_active.
5892  */
5893 void workqueue_set_min_active(struct workqueue_struct *wq, int min_active)
5894 {
5895         /* min_active is only meaningful for non-ordered unbound workqueues */
5896         if (WARN_ON((wq->flags & (WQ_BH | WQ_UNBOUND | __WQ_ORDERED)) !=
5897                     WQ_UNBOUND))
5898                 return;
5899 
5900         mutex_lock(&wq->mutex);
5901         wq->saved_min_active = clamp(min_active, 0, wq->saved_max_active);
5902         wq_adjust_max_active(wq);
5903         mutex_unlock(&wq->mutex);
5904 }
5905 
5906 /**
5907  * current_work - retrieve %current task's work struct
5908  *
5909  * Determine if %current task is a workqueue worker and what it's working on.
5910  * Useful to find out the context that the %current task is running in.
5911  *
5912  * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
5913  */
5914 struct work_struct *current_work(void)
5915 {
5916         struct worker *worker = current_wq_worker();
5917 
5918         return worker ? worker->current_work : NULL;
5919 }
5920 EXPORT_SYMBOL(current_work);
5921 
5922 /**
5923  * current_is_workqueue_rescuer - is %current workqueue rescuer?
5924  *
5925  * Determine whether %current is a workqueue rescuer.  Can be used from
5926  * work functions to determine whether it's being run off the rescuer task.
5927  *
5928  * Return: %true if %current is a workqueue rescuer. %false otherwise.
5929  */
5930 bool current_is_workqueue_rescuer(void)
5931 {
5932         struct worker *worker = current_wq_worker();
5933 
5934         return worker && worker->rescue_wq;
5935 }
5936 
5937 /**
5938  * workqueue_congested - test whether a workqueue is congested
5939  * @cpu: CPU in question
5940  * @wq: target workqueue
5941  *
5942  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
5943  * no synchronization around this function and the test result is
5944  * unreliable and only useful as advisory hints or for debugging.
5945  *
5946  * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
5947  *
5948  * With the exception of ordered workqueues, all workqueues have per-cpu
5949  * pool_workqueues, each with its own congested state. A workqueue being
5950  * congested on one CPU doesn't mean that the workqueue is contested on any
5951  * other CPUs.
5952  *
5953  * Return:
5954  * %true if congested, %false otherwise.
5955  */
5956 bool workqueue_congested(int cpu, struct workqueue_struct *wq)
5957 {
5958         struct pool_workqueue *pwq;
5959         bool ret;
5960 
5961         rcu_read_lock();
5962         preempt_disable();
5963 
5964         if (cpu == WORK_CPU_UNBOUND)
5965                 cpu = smp_processor_id();
5966 
5967         pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
5968         ret = !list_empty(&pwq->inactive_works);
5969 
5970         preempt_enable();
5971         rcu_read_unlock();
5972 
5973         return ret;
5974 }
5975 EXPORT_SYMBOL_GPL(workqueue_congested);
5976 
5977 /**
5978  * work_busy - test whether a work is currently pending or running
5979  * @work: the work to be tested
5980  *
5981  * Test whether @work is currently pending or running.  There is no
5982  * synchronization around this function and the test result is
5983  * unreliable and only useful as advisory hints or for debugging.
5984  *
5985  * Return:
5986  * OR'd bitmask of WORK_BUSY_* bits.
5987  */
5988 unsigned int work_busy(struct work_struct *work)
5989 {
5990         struct worker_pool *pool;
5991         unsigned long irq_flags;
5992         unsigned int ret = 0;
5993 
5994         if (work_pending(work))
5995                 ret |= WORK_BUSY_PENDING;
5996 
5997         rcu_read_lock();
5998         pool = get_work_pool(work);
5999         if (pool) {
6000                 raw_spin_lock_irqsave(&pool->lock, irq_flags);
6001                 if (find_worker_executing_work(pool, work))
6002                         ret |= WORK_BUSY_RUNNING;
6003                 raw_spin_unlock_irqrestore(&pool->lock, irq_flags);
6004         }
6005         rcu_read_unlock();
6006 
6007         return ret;
6008 }
6009 EXPORT_SYMBOL_GPL(work_busy);
6010 
6011 /**
6012  * set_worker_desc - set description for the current work item
6013  * @fmt: printf-style format string
6014  * @...: arguments for the format string
6015  *
6016  * This function can be called by a running work function to describe what
6017  * the work item is about.  If the worker task gets dumped, this
6018  * information will be printed out together to help debugging.  The
6019  * description can be at most WORKER_DESC_LEN including the trailing '\0'.
6020  */
6021 void set_worker_desc(const char *fmt, ...)
6022 {
6023         struct worker *worker = current_wq_worker();
6024         va_list args;
6025 
6026         if (worker) {
6027                 va_start(args, fmt);
6028                 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
6029                 va_end(args);
6030         }
6031 }
6032 EXPORT_SYMBOL_GPL(set_worker_desc);
6033 
6034 /**
6035  * print_worker_info - print out worker information and description
6036  * @log_lvl: the log level to use when printing
6037  * @task: target task
6038  *
6039  * If @task is a worker and currently executing a work item, print out the
6040  * name of the workqueue being serviced and worker description set with
6041  * set_worker_desc() by the currently executing work item.
6042  *
6043  * This function can be safely called on any task as long as the
6044  * task_struct itself is accessible.  While safe, this function isn't
6045  * synchronized and may print out mixups or garbages of limited length.
6046  */
6047 void print_worker_info(const char *log_lvl, struct task_struct *task)
6048 {
6049         work_func_t *fn = NULL;
6050         char name[WQ_NAME_LEN] = { };
6051         char desc[WORKER_DESC_LEN] = { };
6052         struct pool_workqueue *pwq = NULL;
6053         struct workqueue_struct *wq = NULL;
6054         struct worker *worker;
6055 
6056         if (!(task->flags & PF_WQ_WORKER))
6057                 return;
6058 
6059         /*
6060          * This function is called without any synchronization and @task
6061          * could be in any state.  Be careful with dereferences.
6062          */
6063         worker = kthread_probe_data(task);
6064 
6065         /*
6066          * Carefully copy the associated workqueue's workfn, name and desc.
6067          * Keep the original last '\0' in case the original is garbage.
6068          */
6069         copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
6070         copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
6071         copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
6072         copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
6073         copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
6074 
6075         if (fn || name[0] || desc[0]) {
6076                 printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
6077                 if (strcmp(name, desc))
6078                         pr_cont(" (%s)", desc);
6079                 pr_cont("\n");
6080         }
6081 }
6082 
6083 static void pr_cont_pool_info(struct worker_pool *pool)
6084 {
6085         pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
6086         if (pool->node != NUMA_NO_NODE)
6087                 pr_cont(" node=%d", pool->node);
6088         pr_cont(" flags=0x%x", pool->flags);
6089         if (pool->flags & POOL_BH)
6090                 pr_cont(" bh%s",
6091                         pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : "");
6092         else
6093                 pr_cont(" nice=%d", pool->attrs->nice);
6094 }
6095 
6096 static void pr_cont_worker_id(struct worker *worker)
6097 {
6098         struct worker_pool *pool = worker->pool;
6099 
6100         if (pool->flags & WQ_BH)
6101                 pr_cont("bh%s",
6102                         pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : "");
6103         else
6104                 pr_cont("%d%s", task_pid_nr(worker->task),
6105                         worker->rescue_wq ? "(RESCUER)" : "");
6106 }
6107 
6108 struct pr_cont_work_struct {
6109         bool comma;
6110         work_func_t func;
6111         long ctr;
6112 };
6113 
6114 static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp)
6115 {
6116         if (!pcwsp->ctr)
6117                 goto out_record;
6118         if (func == pcwsp->func) {
6119                 pcwsp->ctr++;
6120                 return;
6121         }
6122         if (pcwsp->ctr == 1)
6123                 pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func);
6124         else
6125                 pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func);
6126         pcwsp->ctr = 0;
6127 out_record:
6128         if ((long)func == -1L)
6129                 return;
6130         pcwsp->comma = comma;
6131         pcwsp->func = func;
6132         pcwsp->ctr = 1;
6133 }
6134 
6135 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp)
6136 {
6137         if (work->func == wq_barrier_func) {
6138                 struct wq_barrier *barr;
6139 
6140                 barr = container_of(work, struct wq_barrier, work);
6141 
6142                 pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
6143                 pr_cont("%s BAR(%d)", comma ? "," : "",
6144                         task_pid_nr(barr->task));
6145         } else {
6146                 if (!comma)
6147                         pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
6148                 pr_cont_work_flush(comma, work->func, pcwsp);
6149         }
6150 }
6151 
6152 static void show_pwq(struct pool_workqueue *pwq)
6153 {
6154         struct pr_cont_work_struct pcws = { .ctr = 0, };
6155         struct worker_pool *pool = pwq->pool;
6156         struct work_struct *work;
6157         struct worker *worker;
6158         bool has_in_flight = false, has_pending = false;
6159         int bkt;
6160 
6161         pr_info("  pwq %d:", pool->id);
6162         pr_cont_pool_info(pool);
6163 
6164         pr_cont(" active=%d refcnt=%d%s\n",
6165                 pwq->nr_active, pwq->refcnt,
6166                 !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
6167 
6168         hash_for_each(pool->busy_hash, bkt, worker, hentry) {
6169                 if (worker->current_pwq == pwq) {
6170                         has_in_flight = true;
6171                         break;
6172                 }
6173         }
6174         if (has_in_flight) {
6175                 bool comma = false;
6176 
6177                 pr_info("    in-flight:");
6178                 hash_for_each(pool->busy_hash, bkt, worker, hentry) {
6179                         if (worker->current_pwq != pwq)
6180                                 continue;
6181 
6182                         pr_cont(" %s", comma ? "," : "");
6183                         pr_cont_worker_id(worker);
6184                         pr_cont(":%ps", worker->current_func);
6185                         list_for_each_entry(work, &worker->scheduled, entry)
6186                                 pr_cont_work(false, work, &pcws);
6187                         pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
6188                         comma = true;
6189                 }
6190                 pr_cont("\n");
6191         }
6192 
6193         list_for_each_entry(work, &pool->worklist, entry) {
6194                 if (get_work_pwq(work) == pwq) {
6195                         has_pending = true;
6196                         break;
6197                 }
6198         }
6199         if (has_pending) {
6200                 bool comma = false;
6201 
6202                 pr_info("    pending:");
6203                 list_for_each_entry(work, &pool->worklist, entry) {
6204                         if (get_work_pwq(work) != pwq)
6205                                 continue;
6206 
6207                         pr_cont_work(comma, work, &pcws);
6208                         comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
6209                 }
6210                 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
6211                 pr_cont("\n");
6212         }
6213 
6214         if (!list_empty(&pwq->inactive_works)) {
6215                 bool comma = false;
6216 
6217                 pr_info("    inactive:");
6218                 list_for_each_entry(work, &pwq->inactive_works, entry) {
6219                         pr_cont_work(comma, work, &pcws);
6220                         comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
6221                 }
6222                 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
6223                 pr_cont("\n");
6224         }
6225 }
6226 
6227 /**
6228  * show_one_workqueue - dump state of specified workqueue
6229  * @wq: workqueue whose state will be printed
6230  */
6231 void show_one_workqueue(struct workqueue_struct *wq)
6232 {
6233         struct pool_workqueue *pwq;
6234         bool idle = true;
6235         unsigned long irq_flags;
6236 
6237         for_each_pwq(pwq, wq) {
6238                 if (!pwq_is_empty(pwq)) {
6239                         idle = false;
6240                         break;
6241                 }
6242         }
6243         if (idle) /* Nothing to print for idle workqueue */
6244                 return;
6245 
6246         pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
6247 
6248         for_each_pwq(pwq, wq) {
6249                 raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags);
6250                 if (!pwq_is_empty(pwq)) {
6251                         /*
6252                          * Defer printing to avoid deadlocks in console
6253                          * drivers that queue work while holding locks
6254                          * also taken in their write paths.
6255                          */
6256                         printk_deferred_enter();
6257                         show_pwq(pwq);
6258                         printk_deferred_exit();
6259                 }
6260                 raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags);
6261                 /*
6262                  * We could be printing a lot from atomic context, e.g.
6263                  * sysrq-t -> show_all_workqueues(). Avoid triggering
6264                  * hard lockup.
6265                  */
6266                 touch_nmi_watchdog();
6267         }
6268 
6269 }
6270 
6271 /**
6272  * show_one_worker_pool - dump state of specified worker pool
6273  * @pool: worker pool whose state will be printed
6274  */
6275 static void show_one_worker_pool(struct worker_pool *pool)
6276 {
6277         struct worker *worker;
6278         bool first = true;
6279         unsigned long irq_flags;
6280         unsigned long hung = 0;
6281 
6282         raw_spin_lock_irqsave(&pool->lock, irq_flags);
6283         if (pool->nr_workers == pool->nr_idle)
6284                 goto next_pool;
6285 
6286         /* How long the first pending work is waiting for a worker. */
6287         if (!list_empty(&pool->worklist))
6288                 hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
6289 
6290         /*
6291          * Defer printing to avoid deadlocks in console drivers that
6292          * queue work while holding locks also taken in their write
6293          * paths.
6294          */
6295         printk_deferred_enter();
6296         pr_info("pool %d:", pool->id);
6297         pr_cont_pool_info(pool);
6298         pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
6299         if (pool->manager)
6300                 pr_cont(" manager: %d",
6301                         task_pid_nr(pool->manager->task));
6302         list_for_each_entry(worker, &pool->idle_list, entry) {
6303                 pr_cont(" %s", first ? "idle: " : "");
6304                 pr_cont_worker_id(worker);
6305                 first = false;
6306         }
6307         pr_cont("\n");
6308         printk_deferred_exit();
6309 next_pool:
6310         raw_spin_unlock_irqrestore(&pool->lock, irq_flags);
6311         /*
6312          * We could be printing a lot from atomic context, e.g.
6313          * sysrq-t -> show_all_workqueues(). Avoid triggering
6314          * hard lockup.
6315          */
6316         touch_nmi_watchdog();
6317 
6318 }
6319 
6320 /**
6321  * show_all_workqueues - dump workqueue state
6322  *
6323  * Called from a sysrq handler and prints out all busy workqueues and pools.
6324  */
6325 void show_all_workqueues(void)
6326 {
6327         struct workqueue_struct *wq;
6328         struct worker_pool *pool;
6329         int pi;
6330 
6331         rcu_read_lock();
6332 
6333         pr_info("Showing busy workqueues and worker pools:\n");
6334 
6335         list_for_each_entry_rcu(wq, &workqueues, list)
6336                 show_one_workqueue(wq);
6337 
6338         for_each_pool(pool, pi)
6339                 show_one_worker_pool(pool);
6340 
6341         rcu_read_unlock();
6342 }
6343 
6344 /**
6345  * show_freezable_workqueues - dump freezable workqueue state
6346  *
6347  * Called from try_to_freeze_tasks() and prints out all freezable workqueues
6348  * still busy.
6349  */
6350 void show_freezable_workqueues(void)
6351 {
6352         struct workqueue_struct *wq;
6353 
6354         rcu_read_lock();
6355 
6356         pr_info("Showing freezable workqueues that are still busy:\n");
6357 
6358         list_for_each_entry_rcu(wq, &workqueues, list) {
6359                 if (!(wq->flags & WQ_FREEZABLE))
6360                         continue;
6361                 show_one_workqueue(wq);
6362         }
6363 
6364         rcu_read_unlock();
6365 }
6366 
6367 /* used to show worker information through /proc/PID/{comm,stat,status} */
6368 void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
6369 {
6370         /* stabilize PF_WQ_WORKER and worker pool association */
6371         mutex_lock(&wq_pool_attach_mutex);
6372 
6373         if (task->flags & PF_WQ_WORKER) {
6374                 struct worker *worker = kthread_data(task);
6375                 struct worker_pool *pool = worker->pool;
6376                 int off;
6377 
6378                 off = format_worker_id(buf, size, worker, pool);
6379 
6380                 if (pool) {
6381                         raw_spin_lock_irq(&pool->lock);
6382                         /*
6383                          * ->desc tracks information (wq name or
6384                          * set_worker_desc()) for the latest execution.  If
6385                          * current, prepend '+', otherwise '-'.
6386                          */
6387                         if (worker->desc[0] != '\0') {
6388                                 if (worker->current_work)
6389                                         scnprintf(buf + off, size - off, "+%s",
6390                                                   worker->desc);
6391                                 else
6392                                         scnprintf(buf + off, size - off, "-%s",
6393                                                   worker->desc);
6394                         }
6395                         raw_spin_unlock_irq(&pool->lock);
6396                 }
6397         } else {
6398                 strscpy(buf, task->comm, size);
6399         }
6400 
6401         mutex_unlock(&wq_pool_attach_mutex);
6402 }
6403 
6404 #ifdef CONFIG_SMP
6405 
6406 /*
6407  * CPU hotplug.
6408  *
6409  * There are two challenges in supporting CPU hotplug.  Firstly, there
6410  * are a lot of assumptions on strong associations among work, pwq and
6411  * pool which make migrating pending and scheduled works very
6412  * difficult to implement without impacting hot paths.  Secondly,
6413  * worker pools serve mix of short, long and very long running works making
6414  * blocked draining impractical.
6415  *
6416  * This is solved by allowing the pools to be disassociated from the CPU
6417  * running as an unbound one and allowing it to be reattached later if the
6418  * cpu comes back online.
6419  */
6420 
6421 static void unbind_workers(int cpu)
6422 {
6423         struct worker_pool *pool;
6424         struct worker *worker;
6425 
6426         for_each_cpu_worker_pool(pool, cpu) {
6427                 mutex_lock(&wq_pool_attach_mutex);
6428                 raw_spin_lock_irq(&pool->lock);
6429 
6430                 /*
6431                  * We've blocked all attach/detach operations. Make all workers
6432                  * unbound and set DISASSOCIATED.  Before this, all workers
6433                  * must be on the cpu.  After this, they may become diasporas.
6434                  * And the preemption disabled section in their sched callbacks
6435                  * are guaranteed to see WORKER_UNBOUND since the code here
6436                  * is on the same cpu.
6437                  */
6438                 for_each_pool_worker(worker, pool)
6439                         worker->flags |= WORKER_UNBOUND;
6440 
6441                 pool->flags |= POOL_DISASSOCIATED;
6442 
6443                 /*
6444                  * The handling of nr_running in sched callbacks are disabled
6445                  * now.  Zap nr_running.  After this, nr_running stays zero and
6446                  * need_more_worker() and keep_working() are always true as
6447                  * long as the worklist is not empty.  This pool now behaves as
6448                  * an unbound (in terms of concurrency management) pool which
6449                  * are served by workers tied to the pool.
6450                  */
6451                 pool->nr_running = 0;
6452 
6453                 /*
6454                  * With concurrency management just turned off, a busy
6455                  * worker blocking could lead to lengthy stalls.  Kick off
6456                  * unbound chain execution of currently pending work items.
6457                  */
6458                 kick_pool(pool);
6459 
6460                 raw_spin_unlock_irq(&pool->lock);
6461 
6462                 for_each_pool_worker(worker, pool)
6463                         unbind_worker(worker);
6464 
6465                 mutex_unlock(&wq_pool_attach_mutex);
6466         }
6467 }
6468 
6469 /**
6470  * rebind_workers - rebind all workers of a pool to the associated CPU
6471  * @pool: pool of interest
6472  *
6473  * @pool->cpu is coming online.  Rebind all workers to the CPU.
6474  */
6475 static void rebind_workers(struct worker_pool *pool)
6476 {
6477         struct worker *worker;
6478 
6479         lockdep_assert_held(&wq_pool_attach_mutex);
6480 
6481         /*
6482          * Restore CPU affinity of all workers.  As all idle workers should
6483          * be on the run-queue of the associated CPU before any local
6484          * wake-ups for concurrency management happen, restore CPU affinity
6485          * of all workers first and then clear UNBOUND.  As we're called
6486          * from CPU_ONLINE, the following shouldn't fail.
6487          */
6488         for_each_pool_worker(worker, pool) {
6489                 kthread_set_per_cpu(worker->task, pool->cpu);
6490                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
6491                                                   pool_allowed_cpus(pool)) < 0);
6492         }
6493 
6494         raw_spin_lock_irq(&pool->lock);
6495 
6496         pool->flags &= ~POOL_DISASSOCIATED;
6497 
6498         for_each_pool_worker(worker, pool) {
6499                 unsigned int worker_flags = worker->flags;
6500 
6501                 /*
6502                  * We want to clear UNBOUND but can't directly call
6503                  * worker_clr_flags() or adjust nr_running.  Atomically
6504                  * replace UNBOUND with another NOT_RUNNING flag REBOUND.
6505                  * @worker will clear REBOUND using worker_clr_flags() when
6506                  * it initiates the next execution cycle thus restoring
6507                  * concurrency management.  Note that when or whether
6508                  * @worker clears REBOUND doesn't affect correctness.
6509                  *
6510                  * WRITE_ONCE() is necessary because @worker->flags may be
6511                  * tested without holding any lock in
6512                  * wq_worker_running().  Without it, NOT_RUNNING test may
6513                  * fail incorrectly leading to premature concurrency
6514                  * management operations.
6515                  */
6516                 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
6517                 worker_flags |= WORKER_REBOUND;
6518                 worker_flags &= ~WORKER_UNBOUND;
6519                 WRITE_ONCE(worker->flags, worker_flags);
6520         }
6521 
6522         raw_spin_unlock_irq(&pool->lock);
6523 }
6524 
6525 /**
6526  * restore_unbound_workers_cpumask - restore cpumask of unbound workers
6527  * @pool: unbound pool of interest
6528  * @cpu: the CPU which is coming up
6529  *
6530  * An unbound pool may end up with a cpumask which doesn't have any online
6531  * CPUs.  When a worker of such pool get scheduled, the scheduler resets
6532  * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
6533  * online CPU before, cpus_allowed of all its workers should be restored.
6534  */
6535 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
6536 {
6537         static cpumask_t cpumask;
6538         struct worker *worker;
6539 
6540         lockdep_assert_held(&wq_pool_attach_mutex);
6541 
6542         /* is @cpu allowed for @pool? */
6543         if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
6544                 return;
6545 
6546         cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
6547 
6548         /* as we're called from CPU_ONLINE, the following shouldn't fail */
6549         for_each_pool_worker(worker, pool)
6550                 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
6551 }
6552 
6553 int workqueue_prepare_cpu(unsigned int cpu)
6554 {
6555         struct worker_pool *pool;
6556 
6557         for_each_cpu_worker_pool(pool, cpu) {
6558                 if (pool->nr_workers)
6559                         continue;
6560                 if (!create_worker(pool))
6561                         return -ENOMEM;
6562         }
6563         return 0;
6564 }
6565 
6566 int workqueue_online_cpu(unsigned int cpu)
6567 {
6568         struct worker_pool *pool;
6569         struct workqueue_struct *wq;
6570         int pi;
6571 
6572         mutex_lock(&wq_pool_mutex);
6573 
6574         cpumask_set_cpu(cpu, wq_online_cpumask);
6575 
6576         for_each_pool(pool, pi) {
6577                 /* BH pools aren't affected by hotplug */
6578                 if (pool->flags & POOL_BH)
6579                         continue;
6580 
6581                 mutex_lock(&wq_pool_attach_mutex);
6582                 if (pool->cpu == cpu)
6583                         rebind_workers(pool);
6584                 else if (pool->cpu < 0)
6585                         restore_unbound_workers_cpumask(pool, cpu);
6586                 mutex_unlock(&wq_pool_attach_mutex);
6587         }
6588 
6589         /* update pod affinity of unbound workqueues */
6590         list_for_each_entry(wq, &workqueues, list) {
6591                 struct workqueue_attrs *attrs = wq->unbound_attrs;
6592 
6593                 if (attrs) {
6594                         const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
6595                         int tcpu;
6596 
6597                         for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
6598                                 unbound_wq_update_pwq(wq, tcpu);
6599 
6600                         mutex_lock(&wq->mutex);
6601                         wq_update_node_max_active(wq, -1);
6602                         mutex_unlock(&wq->mutex);
6603                 }
6604         }
6605 
6606         mutex_unlock(&wq_pool_mutex);
6607         return 0;
6608 }
6609 
6610 int workqueue_offline_cpu(unsigned int cpu)
6611 {
6612         struct workqueue_struct *wq;
6613 
6614         /* unbinding per-cpu workers should happen on the local CPU */
6615         if (WARN_ON(cpu != smp_processor_id()))
6616                 return -1;
6617 
6618         unbind_workers(cpu);
6619 
6620         /* update pod affinity of unbound workqueues */
6621         mutex_lock(&wq_pool_mutex);
6622 
6623         cpumask_clear_cpu(cpu, wq_online_cpumask);
6624 
6625         list_for_each_entry(wq, &workqueues, list) {
6626                 struct workqueue_attrs *attrs = wq->unbound_attrs;
6627 
6628                 if (attrs) {
6629                         const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
6630                         int tcpu;
6631 
6632                         for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
6633                                 unbound_wq_update_pwq(wq, tcpu);
6634 
6635                         mutex_lock(&wq->mutex);
6636                         wq_update_node_max_active(wq, cpu);
6637                         mutex_unlock(&wq->mutex);
6638                 }
6639         }
6640         mutex_unlock(&wq_pool_mutex);
6641 
6642         return 0;
6643 }
6644 
6645 struct work_for_cpu {
6646         struct work_struct work;
6647         long (*fn)(void *);
6648         void *arg;
6649         long ret;
6650 };
6651 
6652 static void work_for_cpu_fn(struct work_struct *work)
6653 {
6654         struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
6655 
6656         wfc->ret = wfc->fn(wfc->arg);
6657 }
6658 
6659 /**
6660  * work_on_cpu_key - run a function in thread context on a particular cpu
6661  * @cpu: the cpu to run on
6662  * @fn: the function to run
6663  * @arg: the function arg
6664  * @key: The lock class key for lock debugging purposes
6665  *
6666  * It is up to the caller to ensure that the cpu doesn't go offline.
6667  * The caller must not hold any locks which would prevent @fn from completing.
6668  *
6669  * Return: The value @fn returns.
6670  */
6671 long work_on_cpu_key(int cpu, long (*fn)(void *),
6672                      void *arg, struct lock_class_key *key)
6673 {
6674         struct work_for_cpu wfc = { .fn = fn, .arg = arg };
6675 
6676         INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
6677         schedule_work_on(cpu, &wfc.work);
6678         flush_work(&wfc.work);
6679         destroy_work_on_stack(&wfc.work);
6680         return wfc.ret;
6681 }
6682 EXPORT_SYMBOL_GPL(work_on_cpu_key);
6683 
6684 /**
6685  * work_on_cpu_safe_key - run a function in thread context on a particular cpu
6686  * @cpu: the cpu to run on
6687  * @fn:  the function to run
6688  * @arg: the function argument
6689  * @key: The lock class key for lock debugging purposes
6690  *
6691  * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
6692  * any locks which would prevent @fn from completing.
6693  *
6694  * Return: The value @fn returns.
6695  */
6696 long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
6697                           void *arg, struct lock_class_key *key)
6698 {
6699         long ret = -ENODEV;
6700 
6701         cpus_read_lock();
6702         if (cpu_online(cpu))
6703                 ret = work_on_cpu_key(cpu, fn, arg, key);
6704         cpus_read_unlock();
6705         return ret;
6706 }
6707 EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
6708 #endif /* CONFIG_SMP */
6709 
6710 #ifdef CONFIG_FREEZER
6711 
6712 /**
6713  * freeze_workqueues_begin - begin freezing workqueues
6714  *
6715  * Start freezing workqueues.  After this function returns, all freezable
6716  * workqueues will queue new works to their inactive_works list instead of
6717  * pool->worklist.
6718  *
6719  * CONTEXT:
6720  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
6721  */
6722 void freeze_workqueues_begin(void)
6723 {
6724         struct workqueue_struct *wq;
6725 
6726         mutex_lock(&wq_pool_mutex);
6727 
6728         WARN_ON_ONCE(workqueue_freezing);
6729         workqueue_freezing = true;
6730 
6731         list_for_each_entry(wq, &workqueues, list) {
6732                 mutex_lock(&wq->mutex);
6733                 wq_adjust_max_active(wq);
6734                 mutex_unlock(&wq->mutex);
6735         }
6736 
6737         mutex_unlock(&wq_pool_mutex);
6738 }
6739 
6740 /**
6741  * freeze_workqueues_busy - are freezable workqueues still busy?
6742  *
6743  * Check whether freezing is complete.  This function must be called
6744  * between freeze_workqueues_begin() and thaw_workqueues().
6745  *
6746  * CONTEXT:
6747  * Grabs and releases wq_pool_mutex.
6748  *
6749  * Return:
6750  * %true if some freezable workqueues are still busy.  %false if freezing
6751  * is complete.
6752  */
6753 bool freeze_workqueues_busy(void)
6754 {
6755         bool busy = false;
6756         struct workqueue_struct *wq;
6757         struct pool_workqueue *pwq;
6758 
6759         mutex_lock(&wq_pool_mutex);
6760 
6761         WARN_ON_ONCE(!workqueue_freezing);
6762 
6763         list_for_each_entry(wq, &workqueues, list) {
6764                 if (!(wq->flags & WQ_FREEZABLE))
6765                         continue;
6766                 /*
6767                  * nr_active is monotonically decreasing.  It's safe
6768                  * to peek without lock.
6769                  */
6770                 rcu_read_lock();
6771                 for_each_pwq(pwq, wq) {
6772                         WARN_ON_ONCE(pwq->nr_active < 0);
6773                         if (pwq->nr_active) {
6774                                 busy = true;
6775                                 rcu_read_unlock();
6776                                 goto out_unlock;
6777                         }
6778                 }
6779                 rcu_read_unlock();
6780         }
6781 out_unlock:
6782         mutex_unlock(&wq_pool_mutex);
6783         return busy;
6784 }
6785 
6786 /**
6787  * thaw_workqueues - thaw workqueues
6788  *
6789  * Thaw workqueues.  Normal queueing is restored and all collected
6790  * frozen works are transferred to their respective pool worklists.
6791  *
6792  * CONTEXT:
6793  * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
6794  */
6795 void thaw_workqueues(void)
6796 {
6797         struct workqueue_struct *wq;
6798 
6799         mutex_lock(&wq_pool_mutex);
6800 
6801         if (!workqueue_freezing)
6802                 goto out_unlock;
6803 
6804         workqueue_freezing = false;
6805 
6806         /* restore max_active and repopulate worklist */
6807         list_for_each_entry(wq, &workqueues, list) {
6808                 mutex_lock(&wq->mutex);
6809                 wq_adjust_max_active(wq);
6810                 mutex_unlock(&wq->mutex);
6811         }
6812 
6813 out_unlock:
6814         mutex_unlock(&wq_pool_mutex);
6815 }
6816 #endif /* CONFIG_FREEZER */
6817 
6818 static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
6819 {
6820         LIST_HEAD(ctxs);
6821         int ret = 0;
6822         struct workqueue_struct *wq;
6823         struct apply_wqattrs_ctx *ctx, *n;
6824 
6825         lockdep_assert_held(&wq_pool_mutex);
6826 
6827         list_for_each_entry(wq, &workqueues, list) {
6828                 if (!(wq->flags & WQ_UNBOUND) || (wq->flags & __WQ_DESTROYING))
6829                         continue;
6830 
6831                 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
6832                 if (IS_ERR(ctx)) {
6833                         ret = PTR_ERR(ctx);
6834                         break;
6835                 }
6836 
6837                 list_add_tail(&ctx->list, &ctxs);
6838         }
6839 
6840         list_for_each_entry_safe(ctx, n, &ctxs, list) {
6841                 if (!ret)
6842                         apply_wqattrs_commit(ctx);
6843                 apply_wqattrs_cleanup(ctx);
6844         }
6845 
6846         if (!ret) {
6847                 mutex_lock(&wq_pool_attach_mutex);
6848                 cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
6849                 mutex_unlock(&wq_pool_attach_mutex);
6850         }
6851         return ret;
6852 }
6853 
6854 /**
6855  * workqueue_unbound_exclude_cpumask - Exclude given CPUs from unbound cpumask
6856  * @exclude_cpumask: the cpumask to be excluded from wq_unbound_cpumask
6857  *
6858  * This function can be called from cpuset code to provide a set of isolated
6859  * CPUs that should be excluded from wq_unbound_cpumask.
6860  */
6861 int workqueue_unbound_exclude_cpumask(cpumask_var_t exclude_cpumask)
6862 {
6863         cpumask_var_t cpumask;
6864         int ret = 0;
6865 
6866         if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
6867                 return -ENOMEM;
6868 
6869         mutex_lock(&wq_pool_mutex);
6870 
6871         /*
6872          * If the operation fails, it will fall back to
6873          * wq_requested_unbound_cpumask which is initially set to
6874          * (HK_TYPE_WQ ∩ HK_TYPE_DOMAIN) house keeping mask and rewritten
6875          * by any subsequent write to workqueue/cpumask sysfs file.
6876          */
6877         if (!cpumask_andnot(cpumask, wq_requested_unbound_cpumask, exclude_cpumask))
6878                 cpumask_copy(cpumask, wq_requested_unbound_cpumask);
6879         if (!cpumask_equal(cpumask, wq_unbound_cpumask))
6880                 ret = workqueue_apply_unbound_cpumask(cpumask);
6881 
6882         /* Save the current isolated cpumask & export it via sysfs */
6883         if (!ret)
6884                 cpumask_copy(wq_isolated_cpumask, exclude_cpumask);
6885 
6886         mutex_unlock(&wq_pool_mutex);
6887         free_cpumask_var(cpumask);
6888         return ret;
6889 }
6890 
6891 static int parse_affn_scope(const char *val)
6892 {
6893         int i;
6894 
6895         for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) {
6896                 if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i])))
6897                         return i;
6898         }
6899         return -EINVAL;
6900 }
6901 
6902 static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
6903 {
6904         struct workqueue_struct *wq;
6905         int affn, cpu;
6906 
6907         affn = parse_affn_scope(val);
6908         if (affn < 0)
6909                 return affn;
6910         if (affn == WQ_AFFN_DFL)
6911                 return -EINVAL;
6912 
6913         cpus_read_lock();
6914         mutex_lock(&wq_pool_mutex);
6915 
6916         wq_affn_dfl = affn;
6917 
6918         list_for_each_entry(wq, &workqueues, list) {
6919                 for_each_online_cpu(cpu)
6920                         unbound_wq_update_pwq(wq, cpu);
6921         }
6922 
6923         mutex_unlock(&wq_pool_mutex);
6924         cpus_read_unlock();
6925 
6926         return 0;
6927 }
6928 
6929 static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp)
6930 {
6931         return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]);
6932 }
6933 
6934 static const struct kernel_param_ops wq_affn_dfl_ops = {
6935         .set    = wq_affn_dfl_set,
6936         .get    = wq_affn_dfl_get,
6937 };
6938 
6939 module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644);
6940 
6941 #ifdef CONFIG_SYSFS
6942 /*
6943  * Workqueues with WQ_SYSFS flag set is visible to userland via
6944  * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
6945  * following attributes.
6946  *
6947  *  per_cpu             RO bool : whether the workqueue is per-cpu or unbound
6948  *  max_active          RW int  : maximum number of in-flight work items
6949  *
6950  * Unbound workqueues have the following extra attributes.
6951  *
6952  *  nice                RW int  : nice value of the workers
6953  *  cpumask             RW mask : bitmask of allowed CPUs for the workers
6954  *  affinity_scope      RW str  : worker CPU affinity scope (cache, numa, none)
6955  *  affinity_strict     RW bool : worker CPU affinity is strict
6956  */
6957 struct wq_device {
6958         struct workqueue_struct         *wq;
6959         struct device                   dev;
6960 };
6961 
6962 static struct workqueue_struct *dev_to_wq(struct device *dev)
6963 {
6964         struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
6965 
6966         return wq_dev->wq;
6967 }
6968 
6969 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
6970                             char *buf)
6971 {
6972         struct workqueue_struct *wq = dev_to_wq(dev);
6973 
6974         return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
6975 }
6976 static DEVICE_ATTR_RO(per_cpu);
6977 
6978 static ssize_t max_active_show(struct device *dev,
6979                                struct device_attribute *attr, char *buf)
6980 {
6981         struct workqueue_struct *wq = dev_to_wq(dev);
6982 
6983         return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
6984 }
6985 
6986 static ssize_t max_active_store(struct device *dev,
6987                                 struct device_attribute *attr, const char *buf,
6988                                 size_t count)
6989 {
6990         struct workqueue_struct *wq = dev_to_wq(dev);
6991         int val;
6992 
6993         if (sscanf(buf, "%d", &val) != 1 || val <= 0)
6994                 return -EINVAL;
6995 
6996         workqueue_set_max_active(wq, val);
6997         return count;
6998 }
6999 static DEVICE_ATTR_RW(max_active);
7000 
7001 static struct attribute *wq_sysfs_attrs[] = {
7002         &dev_attr_per_cpu.attr,
7003         &dev_attr_max_active.attr,
7004         NULL,
7005 };
7006 ATTRIBUTE_GROUPS(wq_sysfs);
7007 
7008 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
7009                             char *buf)
7010 {
7011         struct workqueue_struct *wq = dev_to_wq(dev);
7012         int written;
7013 
7014         mutex_lock(&wq->mutex);
7015         written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
7016         mutex_unlock(&wq->mutex);
7017 
7018         return written;
7019 }
7020 
7021 /* prepare workqueue_attrs for sysfs store operations */
7022 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
7023 {
7024         struct workqueue_attrs *attrs;
7025 
7026         lockdep_assert_held(&wq_pool_mutex);
7027 
7028         attrs = alloc_workqueue_attrs();
7029         if (!attrs)
7030                 return NULL;
7031 
7032         copy_workqueue_attrs(attrs, wq->unbound_attrs);
7033         return attrs;
7034 }
7035 
7036 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
7037                              const char *buf, size_t count)
7038 {
7039         struct workqueue_struct *wq = dev_to_wq(dev);
7040         struct workqueue_attrs *attrs;
7041         int ret = -ENOMEM;
7042 
7043         apply_wqattrs_lock();
7044 
7045         attrs = wq_sysfs_prep_attrs(wq);
7046         if (!attrs)
7047                 goto out_unlock;
7048 
7049         if (sscanf(buf, "%d", &attrs->nice) == 1 &&
7050             attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
7051                 ret = apply_workqueue_attrs_locked(wq, attrs);
7052         else
7053                 ret = -EINVAL;
7054 
7055 out_unlock:
7056         apply_wqattrs_unlock();
7057         free_workqueue_attrs(attrs);
7058         return ret ?: count;
7059 }
7060 
7061 static ssize_t wq_cpumask_show(struct device *dev,
7062                                struct device_attribute *attr, char *buf)
7063 {
7064         struct workqueue_struct *wq = dev_to_wq(dev);
7065         int written;
7066 
7067         mutex_lock(&wq->mutex);
7068         written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
7069                             cpumask_pr_args(wq->unbound_attrs->cpumask));
7070         mutex_unlock(&wq->mutex);
7071         return written;
7072 }
7073 
7074 static ssize_t wq_cpumask_store(struct device *dev,
7075                                 struct device_attribute *attr,
7076                                 const char *buf, size_t count)
7077 {
7078         struct workqueue_struct *wq = dev_to_wq(dev);
7079         struct workqueue_attrs *attrs;
7080         int ret = -ENOMEM;
7081 
7082         apply_wqattrs_lock();
7083 
7084         attrs = wq_sysfs_prep_attrs(wq);
7085         if (!attrs)
7086                 goto out_unlock;
7087 
7088         ret = cpumask_parse(buf, attrs->cpumask);
7089         if (!ret)
7090                 ret = apply_workqueue_attrs_locked(wq, attrs);
7091 
7092 out_unlock:
7093         apply_wqattrs_unlock();
7094         free_workqueue_attrs(attrs);
7095         return ret ?: count;
7096 }
7097 
7098 static ssize_t wq_affn_scope_show(struct device *dev,
7099                                   struct device_attribute *attr, char *buf)
7100 {
7101         struct workqueue_struct *wq = dev_to_wq(dev);
7102         int written;
7103 
7104         mutex_lock(&wq->mutex);
7105         if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL)
7106                 written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n",
7107                                     wq_affn_names[WQ_AFFN_DFL],
7108                                     wq_affn_names[wq_affn_dfl]);
7109         else
7110                 written = scnprintf(buf, PAGE_SIZE, "%s\n",
7111                                     wq_affn_names[wq->unbound_attrs->affn_scope]);
7112         mutex_unlock(&wq->mutex);
7113 
7114         return written;
7115 }
7116 
7117 static ssize_t wq_affn_scope_store(struct device *dev,
7118                                    struct device_attribute *attr,
7119                                    const char *buf, size_t count)
7120 {
7121         struct workqueue_struct *wq = dev_to_wq(dev);
7122         struct workqueue_attrs *attrs;
7123         int affn, ret = -ENOMEM;
7124 
7125         affn = parse_affn_scope(buf);
7126         if (affn < 0)
7127                 return affn;
7128 
7129         apply_wqattrs_lock();
7130         attrs = wq_sysfs_prep_attrs(wq);
7131         if (attrs) {
7132                 attrs->affn_scope = affn;
7133                 ret = apply_workqueue_attrs_locked(wq, attrs);
7134         }
7135         apply_wqattrs_unlock();
7136         free_workqueue_attrs(attrs);
7137         return ret ?: count;
7138 }
7139 
7140 static ssize_t wq_affinity_strict_show(struct device *dev,
7141                                        struct device_attribute *attr, char *buf)
7142 {
7143         struct workqueue_struct *wq = dev_to_wq(dev);
7144 
7145         return scnprintf(buf, PAGE_SIZE, "%d\n",
7146                          wq->unbound_attrs->affn_strict);
7147 }
7148 
7149 static ssize_t wq_affinity_strict_store(struct device *dev,
7150                                         struct device_attribute *attr,
7151                                         const char *buf, size_t count)
7152 {
7153         struct workqueue_struct *wq = dev_to_wq(dev);
7154         struct workqueue_attrs *attrs;
7155         int v, ret = -ENOMEM;
7156 
7157         if (sscanf(buf, "%d", &v) != 1)
7158                 return -EINVAL;
7159 
7160         apply_wqattrs_lock();
7161         attrs = wq_sysfs_prep_attrs(wq);
7162         if (attrs) {
7163                 attrs->affn_strict = (bool)v;
7164                 ret = apply_workqueue_attrs_locked(wq, attrs);
7165         }
7166         apply_wqattrs_unlock();
7167         free_workqueue_attrs(attrs);
7168         return ret ?: count;
7169 }
7170 
7171 static struct device_attribute wq_sysfs_unbound_attrs[] = {
7172         __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
7173         __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
7174         __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store),
7175         __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store),
7176         __ATTR_NULL,
7177 };
7178 
7179 static const struct bus_type wq_subsys = {
7180         .name                           = "workqueue",
7181         .dev_groups                     = wq_sysfs_groups,
7182 };
7183 
7184 /**
7185  *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
7186  *  @cpumask: the cpumask to set
7187  *
7188  *  The low-level workqueues cpumask is a global cpumask that limits
7189  *  the affinity of all unbound workqueues.  This function check the @cpumask
7190  *  and apply it to all unbound workqueues and updates all pwqs of them.
7191  *
7192  *  Return:     0       - Success
7193  *              -EINVAL - Invalid @cpumask
7194  *              -ENOMEM - Failed to allocate memory for attrs or pwqs.
7195  */
7196 static int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
7197 {
7198         int ret = -EINVAL;
7199 
7200         /*
7201          * Not excluding isolated cpus on purpose.
7202          * If the user wishes to include them, we allow that.
7203          */
7204         cpumask_and(cpumask, cpumask, cpu_possible_mask);
7205         if (!cpumask_empty(cpumask)) {
7206                 ret = 0;
7207                 apply_wqattrs_lock();
7208                 if (!cpumask_equal(cpumask, wq_unbound_cpumask))
7209                         ret = workqueue_apply_unbound_cpumask(cpumask);
7210                 if (!ret)
7211                         cpumask_copy(wq_requested_unbound_cpumask, cpumask);
7212                 apply_wqattrs_unlock();
7213         }
7214 
7215         return ret;
7216 }
7217 
7218 static ssize_t __wq_cpumask_show(struct device *dev,
7219                 struct device_attribute *attr, char *buf, cpumask_var_t mask)
7220 {
7221         int written;
7222 
7223         mutex_lock(&wq_pool_mutex);
7224         written = scnprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
7225         mutex_unlock(&wq_pool_mutex);
7226 
7227         return written;
7228 }
7229 
7230 static ssize_t cpumask_requested_show(struct device *dev,
7231                 struct device_attribute *attr, char *buf)
7232 {
7233         return __wq_cpumask_show(dev, attr, buf, wq_requested_unbound_cpumask);
7234 }
7235 static DEVICE_ATTR_RO(cpumask_requested);
7236 
7237 static ssize_t cpumask_isolated_show(struct device *dev,
7238                 struct device_attribute *attr, char *buf)
7239 {
7240         return __wq_cpumask_show(dev, attr, buf, wq_isolated_cpumask);
7241 }
7242 static DEVICE_ATTR_RO(cpumask_isolated);
7243 
7244 static ssize_t cpumask_show(struct device *dev,
7245                 struct device_attribute *attr, char *buf)
7246 {
7247         return __wq_cpumask_show(dev, attr, buf, wq_unbound_cpumask);
7248 }
7249 
7250 static ssize_t cpumask_store(struct device *dev,
7251                 struct device_attribute *attr, const char *buf, size_t count)
7252 {
7253         cpumask_var_t cpumask;
7254         int ret;
7255 
7256         if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
7257                 return -ENOMEM;
7258 
7259         ret = cpumask_parse(buf, cpumask);
7260         if (!ret)
7261                 ret = workqueue_set_unbound_cpumask(cpumask);
7262 
7263         free_cpumask_var(cpumask);
7264         return ret ? ret : count;
7265 }
7266 static DEVICE_ATTR_RW(cpumask);
7267 
7268 static struct attribute *wq_sysfs_cpumask_attrs[] = {
7269         &dev_attr_cpumask.attr,
7270         &dev_attr_cpumask_requested.attr,
7271         &dev_attr_cpumask_isolated.attr,
7272         NULL,
7273 };
7274 ATTRIBUTE_GROUPS(wq_sysfs_cpumask);
7275 
7276 static int __init wq_sysfs_init(void)
7277 {
7278         return subsys_virtual_register(&wq_subsys, wq_sysfs_cpumask_groups);
7279 }
7280 core_initcall(wq_sysfs_init);
7281 
7282 static void wq_device_release(struct device *dev)
7283 {
7284         struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
7285 
7286         kfree(wq_dev);
7287 }
7288 
7289 /**
7290  * workqueue_sysfs_register - make a workqueue visible in sysfs
7291  * @wq: the workqueue to register
7292  *
7293  * Expose @wq in sysfs under /sys/bus/workqueue/devices.
7294  * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
7295  * which is the preferred method.
7296  *
7297  * Workqueue user should use this function directly iff it wants to apply
7298  * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
7299  * apply_workqueue_attrs() may race against userland updating the
7300  * attributes.
7301  *
7302  * Return: 0 on success, -errno on failure.
7303  */
7304 int workqueue_sysfs_register(struct workqueue_struct *wq)
7305 {
7306         struct wq_device *wq_dev;
7307         int ret;
7308 
7309         /*
7310          * Adjusting max_active breaks ordering guarantee.  Disallow exposing
7311          * ordered workqueues.
7312          */
7313         if (WARN_ON(wq->flags & __WQ_ORDERED))
7314                 return -EINVAL;
7315 
7316         wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
7317         if (!wq_dev)
7318                 return -ENOMEM;
7319 
7320         wq_dev->wq = wq;
7321         wq_dev->dev.bus = &wq_subsys;
7322         wq_dev->dev.release = wq_device_release;
7323         dev_set_name(&wq_dev->dev, "%s", wq->name);
7324 
7325         /*
7326          * unbound_attrs are created separately.  Suppress uevent until
7327          * everything is ready.
7328          */
7329         dev_set_uevent_suppress(&wq_dev->dev, true);
7330 
7331         ret = device_register(&wq_dev->dev);
7332         if (ret) {
7333                 put_device(&wq_dev->dev);
7334                 wq->wq_dev = NULL;
7335                 return ret;
7336         }
7337 
7338         if (wq->flags & WQ_UNBOUND) {
7339                 struct device_attribute *attr;
7340 
7341                 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
7342                         ret = device_create_file(&wq_dev->dev, attr);
7343                         if (ret) {
7344                                 device_unregister(&wq_dev->dev);
7345                                 wq->wq_dev = NULL;
7346                                 return ret;
7347                         }
7348                 }
7349         }
7350 
7351         dev_set_uevent_suppress(&wq_dev->dev, false);
7352         kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
7353         return 0;
7354 }
7355 
7356 /**
7357  * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
7358  * @wq: the workqueue to unregister
7359  *
7360  * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
7361  */
7362 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
7363 {
7364         struct wq_device *wq_dev = wq->wq_dev;
7365 
7366         if (!wq->wq_dev)
7367                 return;
7368 
7369         wq->wq_dev = NULL;
7370         device_unregister(&wq_dev->dev);
7371 }
7372 #else   /* CONFIG_SYSFS */
7373 static void workqueue_sysfs_unregister(struct workqueue_struct *wq)     { }
7374 #endif  /* CONFIG_SYSFS */
7375 
7376 /*
7377  * Workqueue watchdog.
7378  *
7379  * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
7380  * flush dependency, a concurrency managed work item which stays RUNNING
7381  * indefinitely.  Workqueue stalls can be very difficult to debug as the
7382  * usual warning mechanisms don't trigger and internal workqueue state is
7383  * largely opaque.
7384  *
7385  * Workqueue watchdog monitors all worker pools periodically and dumps
7386  * state if some pools failed to make forward progress for a while where
7387  * forward progress is defined as the first item on ->worklist changing.
7388  *
7389  * This mechanism is controlled through the kernel parameter
7390  * "workqueue.watchdog_thresh" which can be updated at runtime through the
7391  * corresponding sysfs parameter file.
7392  */
7393 #ifdef CONFIG_WQ_WATCHDOG
7394 
7395 static unsigned long wq_watchdog_thresh = 30;
7396 static struct timer_list wq_watchdog_timer;
7397 
7398 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
7399 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
7400 
7401 /*
7402  * Show workers that might prevent the processing of pending work items.
7403  * The only candidates are CPU-bound workers in the running state.
7404  * Pending work items should be handled by another idle worker
7405  * in all other situations.
7406  */
7407 static void show_cpu_pool_hog(struct worker_pool *pool)
7408 {
7409         struct worker *worker;
7410         unsigned long irq_flags;
7411         int bkt;
7412 
7413         raw_spin_lock_irqsave(&pool->lock, irq_flags);
7414 
7415         hash_for_each(pool->busy_hash, bkt, worker, hentry) {
7416                 if (task_is_running(worker->task)) {
7417                         /*
7418                          * Defer printing to avoid deadlocks in console
7419                          * drivers that queue work while holding locks
7420                          * also taken in their write paths.
7421                          */
7422                         printk_deferred_enter();
7423 
7424                         pr_info("pool %d:\n", pool->id);
7425                         sched_show_task(worker->task);
7426 
7427                         printk_deferred_exit();
7428                 }
7429         }
7430 
7431         raw_spin_unlock_irqrestore(&pool->lock, irq_flags);
7432 }
7433 
7434 static void show_cpu_pools_hogs(void)
7435 {
7436         struct worker_pool *pool;
7437         int pi;
7438 
7439         pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n");
7440 
7441         rcu_read_lock();
7442 
7443         for_each_pool(pool, pi) {
7444                 if (pool->cpu_stall)
7445                         show_cpu_pool_hog(pool);
7446 
7447         }
7448 
7449         rcu_read_unlock();
7450 }
7451 
7452 static void wq_watchdog_reset_touched(void)
7453 {
7454         int cpu;
7455 
7456         wq_watchdog_touched = jiffies;
7457         for_each_possible_cpu(cpu)
7458                 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
7459 }
7460 
7461 static void wq_watchdog_timer_fn(struct timer_list *unused)
7462 {
7463         unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
7464         bool lockup_detected = false;
7465         bool cpu_pool_stall = false;
7466         unsigned long now = jiffies;
7467         struct worker_pool *pool;
7468         int pi;
7469 
7470         if (!thresh)
7471                 return;
7472 
7473         rcu_read_lock();
7474 
7475         for_each_pool(pool, pi) {
7476                 unsigned long pool_ts, touched, ts;
7477 
7478                 pool->cpu_stall = false;
7479                 if (list_empty(&pool->worklist))
7480                         continue;
7481 
7482                 /*
7483                  * If a virtual machine is stopped by the host it can look to
7484                  * the watchdog like a stall.
7485                  */
7486                 kvm_check_and_clear_guest_paused();
7487 
7488                 /* get the latest of pool and touched timestamps */
7489                 if (pool->cpu >= 0)
7490                         touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
7491                 else
7492                         touched = READ_ONCE(wq_watchdog_touched);
7493                 pool_ts = READ_ONCE(pool->watchdog_ts);
7494 
7495                 if (time_after(pool_ts, touched))
7496                         ts = pool_ts;
7497                 else
7498                         ts = touched;
7499 
7500                 /* did we stall? */
7501                 if (time_after(now, ts + thresh)) {
7502                         lockup_detected = true;
7503                         if (pool->cpu >= 0 && !(pool->flags & POOL_BH)) {
7504                                 pool->cpu_stall = true;
7505                                 cpu_pool_stall = true;
7506                         }
7507                         pr_emerg("BUG: workqueue lockup - pool");
7508                         pr_cont_pool_info(pool);
7509                         pr_cont(" stuck for %us!\n",
7510                                 jiffies_to_msecs(now - pool_ts) / 1000);
7511                 }
7512 
7513 
7514         }
7515 
7516         rcu_read_unlock();
7517 
7518         if (lockup_detected)
7519                 show_all_workqueues();
7520 
7521         if (cpu_pool_stall)
7522                 show_cpu_pools_hogs();
7523 
7524         wq_watchdog_reset_touched();
7525         mod_timer(&wq_watchdog_timer, jiffies + thresh);
7526 }
7527 
7528 notrace void wq_watchdog_touch(int cpu)
7529 {
7530         unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
7531         unsigned long touch_ts = READ_ONCE(wq_watchdog_touched);
7532         unsigned long now = jiffies;
7533 
7534         if (cpu >= 0)
7535                 per_cpu(wq_watchdog_touched_cpu, cpu) = now;
7536         else
7537                 WARN_ONCE(1, "%s should be called with valid CPU", __func__);
7538 
7539         /* Don't unnecessarily store to global cacheline */
7540         if (time_after(now, touch_ts + thresh / 4))
7541                 WRITE_ONCE(wq_watchdog_touched, jiffies);
7542 }
7543 
7544 static void wq_watchdog_set_thresh(unsigned long thresh)
7545 {
7546         wq_watchdog_thresh = 0;
7547         del_timer_sync(&wq_watchdog_timer);
7548 
7549         if (thresh) {
7550                 wq_watchdog_thresh = thresh;
7551                 wq_watchdog_reset_touched();
7552                 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
7553         }
7554 }
7555 
7556 static int wq_watchdog_param_set_thresh(const char *val,
7557                                         const struct kernel_param *kp)
7558 {
7559         unsigned long thresh;
7560         int ret;
7561 
7562         ret = kstrtoul(val, 0, &thresh);
7563         if (ret)
7564                 return ret;
7565 
7566         if (system_wq)
7567                 wq_watchdog_set_thresh(thresh);
7568         else
7569                 wq_watchdog_thresh = thresh;
7570 
7571         return 0;
7572 }
7573 
7574 static const struct kernel_param_ops wq_watchdog_thresh_ops = {
7575         .set    = wq_watchdog_param_set_thresh,
7576         .get    = param_get_ulong,
7577 };
7578 
7579 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
7580                 0644);
7581 
7582 static void wq_watchdog_init(void)
7583 {
7584         timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
7585         wq_watchdog_set_thresh(wq_watchdog_thresh);
7586 }
7587 
7588 #else   /* CONFIG_WQ_WATCHDOG */
7589 
7590 static inline void wq_watchdog_init(void) { }
7591 
7592 #endif  /* CONFIG_WQ_WATCHDOG */
7593 
7594 static void bh_pool_kick_normal(struct irq_work *irq_work)
7595 {
7596         raise_softirq_irqoff(TASKLET_SOFTIRQ);
7597 }
7598 
7599 static void bh_pool_kick_highpri(struct irq_work *irq_work)
7600 {
7601         raise_softirq_irqoff(HI_SOFTIRQ);
7602 }
7603 
7604 static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask)
7605 {
7606         if (!cpumask_intersects(wq_unbound_cpumask, mask)) {
7607                 pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n",
7608                         cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask));
7609                 return;
7610         }
7611 
7612         cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask);
7613 }
7614 
7615 static void __init init_cpu_worker_pool(struct worker_pool *pool, int cpu, int nice)
7616 {
7617         BUG_ON(init_worker_pool(pool));
7618         pool->cpu = cpu;
7619         cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
7620         cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu));
7621         pool->attrs->nice = nice;
7622         pool->attrs->affn_strict = true;
7623         pool->node = cpu_to_node(cpu);
7624 
7625         /* alloc pool ID */
7626         mutex_lock(&wq_pool_mutex);
7627         BUG_ON(worker_pool_assign_id(pool));
7628         mutex_unlock(&wq_pool_mutex);
7629 }
7630 
7631 /**
7632  * workqueue_init_early - early init for workqueue subsystem
7633  *
7634  * This is the first step of three-staged workqueue subsystem initialization and
7635  * invoked as soon as the bare basics - memory allocation, cpumasks and idr are
7636  * up. It sets up all the data structures and system workqueues and allows early
7637  * boot code to create workqueues and queue/cancel work items. Actual work item
7638  * execution starts only after kthreads can be created and scheduled right
7639  * before early initcalls.
7640  */
7641 void __init workqueue_init_early(void)
7642 {
7643         struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM];
7644         int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
7645         void (*irq_work_fns[2])(struct irq_work *) = { bh_pool_kick_normal,
7646                                                        bh_pool_kick_highpri };
7647         int i, cpu;
7648 
7649         BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
7650 
7651         BUG_ON(!alloc_cpumask_var(&wq_online_cpumask, GFP_KERNEL));
7652         BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
7653         BUG_ON(!alloc_cpumask_var(&wq_requested_unbound_cpumask, GFP_KERNEL));
7654         BUG_ON(!zalloc_cpumask_var(&wq_isolated_cpumask, GFP_KERNEL));
7655 
7656         cpumask_copy(wq_online_cpumask, cpu_online_mask);
7657         cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
7658         restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
7659         restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
7660         if (!cpumask_empty(&wq_cmdline_cpumask))
7661                 restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
7662 
7663         cpumask_copy(wq_requested_unbound_cpumask, wq_unbound_cpumask);
7664 
7665         pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
7666 
7667         unbound_wq_update_pwq_attrs_buf = alloc_workqueue_attrs();
7668         BUG_ON(!unbound_wq_update_pwq_attrs_buf);
7669 
7670         /*
7671          * If nohz_full is enabled, set power efficient workqueue as unbound.
7672          * This allows workqueue items to be moved to HK CPUs.
7673          */
7674         if (housekeeping_enabled(HK_TYPE_TICK))
7675                 wq_power_efficient = true;
7676 
7677         /* initialize WQ_AFFN_SYSTEM pods */
7678         pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
7679         pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL);
7680         pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
7681         BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod);
7682 
7683         BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE));
7684 
7685         pt->nr_pods = 1;
7686         cpumask_copy(pt->pod_cpus[0], cpu_possible_mask);
7687         pt->pod_node[0] = NUMA_NO_NODE;
7688         pt->cpu_pod[0] = 0;
7689 
7690         /* initialize BH and CPU pools */
7691         for_each_possible_cpu(cpu) {
7692                 struct worker_pool *pool;
7693 
7694                 i = 0;
7695                 for_each_bh_worker_pool(pool, cpu) {
7696                         init_cpu_worker_pool(pool, cpu, std_nice[i]);
7697                         pool->flags |= POOL_BH;
7698                         init_irq_work(bh_pool_irq_work(pool), irq_work_fns[i]);
7699                         i++;
7700                 }
7701 
7702                 i = 0;
7703                 for_each_cpu_worker_pool(pool, cpu)
7704                         init_cpu_worker_pool(pool, cpu, std_nice[i++]);
7705         }
7706 
7707         /* create default unbound and ordered wq attrs */
7708         for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
7709                 struct workqueue_attrs *attrs;
7710 
7711                 BUG_ON(!(attrs = alloc_workqueue_attrs()));
7712                 attrs->nice = std_nice[i];
7713                 unbound_std_wq_attrs[i] = attrs;
7714 
7715                 /*
7716                  * An ordered wq should have only one pwq as ordering is
7717                  * guaranteed by max_active which is enforced by pwqs.
7718                  */
7719                 BUG_ON(!(attrs = alloc_workqueue_attrs()));
7720                 attrs->nice = std_nice[i];
7721                 attrs->ordered = true;
7722                 ordered_wq_attrs[i] = attrs;
7723         }
7724 
7725         system_wq = alloc_workqueue("events", 0, 0);
7726         system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
7727         system_long_wq = alloc_workqueue("events_long", 0, 0);
7728         system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
7729                                             WQ_MAX_ACTIVE);
7730         system_freezable_wq = alloc_workqueue("events_freezable",
7731                                               WQ_FREEZABLE, 0);
7732         system_power_efficient_wq = alloc_workqueue("events_power_efficient",
7733                                               WQ_POWER_EFFICIENT, 0);
7734         system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_pwr_efficient",
7735                                               WQ_FREEZABLE | WQ_POWER_EFFICIENT,
7736                                               0);
7737         system_bh_wq = alloc_workqueue("events_bh", WQ_BH, 0);
7738         system_bh_highpri_wq = alloc_workqueue("events_bh_highpri",
7739                                                WQ_BH | WQ_HIGHPRI, 0);
7740         BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
7741                !system_unbound_wq || !system_freezable_wq ||
7742                !system_power_efficient_wq ||
7743                !system_freezable_power_efficient_wq ||
7744                !system_bh_wq || !system_bh_highpri_wq);
7745 }
7746 
7747 static void __init wq_cpu_intensive_thresh_init(void)
7748 {
7749         unsigned long thresh;
7750         unsigned long bogo;
7751 
7752         pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
7753         BUG_ON(IS_ERR(pwq_release_worker));
7754 
7755         /* if the user set it to a specific value, keep it */
7756         if (wq_cpu_intensive_thresh_us != ULONG_MAX)
7757                 return;
7758 
7759         /*
7760          * The default of 10ms is derived from the fact that most modern (as of
7761          * 2023) processors can do a lot in 10ms and that it's just below what
7762          * most consider human-perceivable. However, the kernel also runs on a
7763          * lot slower CPUs including microcontrollers where the threshold is way
7764          * too low.
7765          *
7766          * Let's scale up the threshold upto 1 second if BogoMips is below 4000.
7767          * This is by no means accurate but it doesn't have to be. The mechanism
7768          * is still useful even when the threshold is fully scaled up. Also, as
7769          * the reports would usually be applicable to everyone, some machines
7770          * operating on longer thresholds won't significantly diminish their
7771          * usefulness.
7772          */
7773         thresh = 10 * USEC_PER_MSEC;
7774 
7775         /* see init/calibrate.c for lpj -> BogoMIPS calculation */
7776         bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1);
7777         if (bogo < 4000)
7778                 thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC);
7779 
7780         pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n",
7781                  loops_per_jiffy, bogo, thresh);
7782 
7783         wq_cpu_intensive_thresh_us = thresh;
7784 }
7785 
7786 /**
7787  * workqueue_init - bring workqueue subsystem fully online
7788  *
7789  * This is the second step of three-staged workqueue subsystem initialization
7790  * and invoked as soon as kthreads can be created and scheduled. Workqueues have
7791  * been created and work items queued on them, but there are no kworkers
7792  * executing the work items yet. Populate the worker pools with the initial
7793  * workers and enable future kworker creations.
7794  */
7795 void __init workqueue_init(void)
7796 {
7797         struct workqueue_struct *wq;
7798         struct worker_pool *pool;
7799         int cpu, bkt;
7800 
7801         wq_cpu_intensive_thresh_init();
7802 
7803         mutex_lock(&wq_pool_mutex);
7804 
7805         /*
7806          * Per-cpu pools created earlier could be missing node hint. Fix them
7807          * up. Also, create a rescuer for workqueues that requested it.
7808          */
7809         for_each_possible_cpu(cpu) {
7810                 for_each_bh_worker_pool(pool, cpu)
7811                         pool->node = cpu_to_node(cpu);
7812                 for_each_cpu_worker_pool(pool, cpu)
7813                         pool->node = cpu_to_node(cpu);
7814         }
7815 
7816         list_for_each_entry(wq, &workqueues, list) {
7817                 WARN(init_rescuer(wq),
7818                      "workqueue: failed to create early rescuer for %s",
7819                      wq->name);
7820         }
7821 
7822         mutex_unlock(&wq_pool_mutex);
7823 
7824         /*
7825          * Create the initial workers. A BH pool has one pseudo worker that
7826          * represents the shared BH execution context and thus doesn't get
7827          * affected by hotplug events. Create the BH pseudo workers for all
7828          * possible CPUs here.
7829          */
7830         for_each_possible_cpu(cpu)
7831                 for_each_bh_worker_pool(pool, cpu)
7832                         BUG_ON(!create_worker(pool));
7833 
7834         for_each_online_cpu(cpu) {
7835                 for_each_cpu_worker_pool(pool, cpu) {
7836                         pool->flags &= ~POOL_DISASSOCIATED;
7837                         BUG_ON(!create_worker(pool));
7838                 }
7839         }
7840 
7841         hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
7842                 BUG_ON(!create_worker(pool));
7843 
7844         wq_online = true;
7845         wq_watchdog_init();
7846 }
7847 
7848 /*
7849  * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to
7850  * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique
7851  * and consecutive pod ID. The rest of @pt is initialized accordingly.
7852  */
7853 static void __init init_pod_type(struct wq_pod_type *pt,
7854                                  bool (*cpus_share_pod)(int, int))
7855 {
7856         int cur, pre, cpu, pod;
7857 
7858         pt->nr_pods = 0;
7859 
7860         /* init @pt->cpu_pod[] according to @cpus_share_pod() */
7861         pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
7862         BUG_ON(!pt->cpu_pod);
7863 
7864         for_each_possible_cpu(cur) {
7865                 for_each_possible_cpu(pre) {
7866                         if (pre >= cur) {
7867                                 pt->cpu_pod[cur] = pt->nr_pods++;
7868                                 break;
7869                         }
7870                         if (cpus_share_pod(cur, pre)) {
7871                                 pt->cpu_pod[cur] = pt->cpu_pod[pre];
7872                                 break;
7873                         }
7874                 }
7875         }
7876 
7877         /* init the rest to match @pt->cpu_pod[] */
7878         pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
7879         pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL);
7880         BUG_ON(!pt->pod_cpus || !pt->pod_node);
7881 
7882         for (pod = 0; pod < pt->nr_pods; pod++)
7883                 BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL));
7884 
7885         for_each_possible_cpu(cpu) {
7886                 cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]);
7887                 pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu);
7888         }
7889 }
7890 
7891 static bool __init cpus_dont_share(int cpu0, int cpu1)
7892 {
7893         return false;
7894 }
7895 
7896 static bool __init cpus_share_smt(int cpu0, int cpu1)
7897 {
7898 #ifdef CONFIG_SCHED_SMT
7899         return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1));
7900 #else
7901         return false;
7902 #endif
7903 }
7904 
7905 static bool __init cpus_share_numa(int cpu0, int cpu1)
7906 {
7907         return cpu_to_node(cpu0) == cpu_to_node(cpu1);
7908 }
7909 
7910 /**
7911  * workqueue_init_topology - initialize CPU pods for unbound workqueues
7912  *
7913  * This is the third step of three-staged workqueue subsystem initialization and
7914  * invoked after SMP and topology information are fully initialized. It
7915  * initializes the unbound CPU pods accordingly.
7916  */
7917 void __init workqueue_init_topology(void)
7918 {
7919         struct workqueue_struct *wq;
7920         int cpu;
7921 
7922         init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
7923         init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
7924         init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
7925         init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
7926 
7927         wq_topo_initialized = true;
7928 
7929         mutex_lock(&wq_pool_mutex);
7930 
7931         /*
7932          * Workqueues allocated earlier would have all CPUs sharing the default
7933          * worker pool. Explicitly call unbound_wq_update_pwq() on all workqueue
7934          * and CPU combinations to apply per-pod sharing.
7935          */
7936         list_for_each_entry(wq, &workqueues, list) {
7937                 for_each_online_cpu(cpu)
7938                         unbound_wq_update_pwq(wq, cpu);
7939                 if (wq->flags & WQ_UNBOUND) {
7940                         mutex_lock(&wq->mutex);
7941                         wq_update_node_max_active(wq, -1);
7942                         mutex_unlock(&wq->mutex);
7943                 }
7944         }
7945 
7946         mutex_unlock(&wq_pool_mutex);
7947 }
7948 
7949 void __warn_flushing_systemwide_wq(void)
7950 {
7951         pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n");
7952         dump_stack();
7953 }
7954 EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
7955 
7956 static int __init workqueue_unbound_cpus_setup(char *str)
7957 {
7958         if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) {
7959                 cpumask_clear(&wq_cmdline_cpumask);
7960                 pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n");
7961         }
7962 
7963         return 1;
7964 }
7965 __setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup);
7966 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php