1 // SPDX-License-Identifier: GPL-2.0-only << 2 /* 1 /* 3 * kernel/workqueue.c - generic async executio 2 * kernel/workqueue.c - generic async execution with shared worker pool 4 * 3 * 5 * Copyright (C) 2002 Ingo Molnar 4 * Copyright (C) 2002 Ingo Molnar 6 * 5 * 7 * Derived from the taskqueue/keventd code b 6 * Derived from the taskqueue/keventd code by: 8 * David Woodhouse <dwmw2@infradead.org> 7 * David Woodhouse <dwmw2@infradead.org> 9 * Andrew Morton 8 * Andrew Morton 10 * Kai Petzke <wpp@marie.physik.tu-berlin. 9 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 11 * Theodore Ts'o <tytso@mit.edu> 10 * Theodore Ts'o <tytso@mit.edu> 12 * 11 * 13 * Made to use alloc_percpu by Christoph Lamet 12 * Made to use alloc_percpu by Christoph Lameter. 14 * 13 * 15 * Copyright (C) 2010 SUSE Linux Pro 14 * Copyright (C) 2010 SUSE Linux Products GmbH 16 * Copyright (C) 2010 Tejun Heo <tj@ 15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 17 * 16 * 18 * This is the generic async execution mechani 17 * This is the generic async execution mechanism. Work items as are 19 * executed in process context. The worker po 18 * executed in process context. The worker pool is shared and 20 * automatically managed. There are two worke 19 * automatically managed. There are two worker pools for each CPU (one for 21 * normal work items and the other for high pr 20 * normal work items and the other for high priority ones) and some extra 22 * pools for workqueues which are not bound to 21 * pools for workqueues which are not bound to any specific CPU - the 23 * number of these backing pools is dynamic. 22 * number of these backing pools is dynamic. 24 * 23 * 25 * Please read Documentation/core-api/workqueu 24 * Please read Documentation/core-api/workqueue.rst for details. 26 */ 25 */ 27 26 28 #include <linux/export.h> 27 #include <linux/export.h> 29 #include <linux/kernel.h> 28 #include <linux/kernel.h> 30 #include <linux/sched.h> 29 #include <linux/sched.h> 31 #include <linux/init.h> 30 #include <linux/init.h> 32 #include <linux/interrupt.h> << 33 #include <linux/signal.h> 31 #include <linux/signal.h> 34 #include <linux/completion.h> 32 #include <linux/completion.h> 35 #include <linux/workqueue.h> 33 #include <linux/workqueue.h> 36 #include <linux/slab.h> 34 #include <linux/slab.h> 37 #include <linux/cpu.h> 35 #include <linux/cpu.h> 38 #include <linux/notifier.h> 36 #include <linux/notifier.h> 39 #include <linux/kthread.h> 37 #include <linux/kthread.h> 40 #include <linux/hardirq.h> 38 #include <linux/hardirq.h> 41 #include <linux/mempolicy.h> 39 #include <linux/mempolicy.h> 42 #include <linux/freezer.h> 40 #include <linux/freezer.h> 43 #include <linux/debug_locks.h> 41 #include <linux/debug_locks.h> 44 #include <linux/lockdep.h> 42 #include <linux/lockdep.h> 45 #include <linux/idr.h> 43 #include <linux/idr.h> 46 #include <linux/jhash.h> 44 #include <linux/jhash.h> 47 #include <linux/hashtable.h> 45 #include <linux/hashtable.h> 48 #include <linux/rculist.h> 46 #include <linux/rculist.h> 49 #include <linux/nodemask.h> 47 #include <linux/nodemask.h> 50 #include <linux/moduleparam.h> 48 #include <linux/moduleparam.h> 51 #include <linux/uaccess.h> 49 #include <linux/uaccess.h> 52 #include <linux/sched/isolation.h> 50 #include <linux/sched/isolation.h> 53 #include <linux/sched/debug.h> << 54 #include <linux/nmi.h> 51 #include <linux/nmi.h> 55 #include <linux/kvm_para.h> << 56 #include <linux/delay.h> << 57 #include <linux/irq_work.h> << 58 52 59 #include "workqueue_internal.h" 53 #include "workqueue_internal.h" 60 54 61 enum worker_pool_flags { !! 55 enum { 62 /* 56 /* 63 * worker_pool flags 57 * worker_pool flags 64 * 58 * 65 * A bound pool is either associated o 59 * A bound pool is either associated or disassociated with its CPU. 66 * While associated (!DISASSOCIATED), 60 * While associated (!DISASSOCIATED), all workers are bound to the 67 * CPU and none has %WORKER_UNBOUND se 61 * CPU and none has %WORKER_UNBOUND set and concurrency management 68 * is in effect. 62 * is in effect. 69 * 63 * 70 * While DISASSOCIATED, the cpu may be 64 * While DISASSOCIATED, the cpu may be offline and all workers have 71 * %WORKER_UNBOUND set and concurrency 65 * %WORKER_UNBOUND set and concurrency management disabled, and may 72 * be executing on any CPU. The pool 66 * be executing on any CPU. The pool behaves as an unbound one. 73 * 67 * 74 * Note that DISASSOCIATED should be f 68 * Note that DISASSOCIATED should be flipped only while holding 75 * wq_pool_attach_mutex to avoid chang !! 69 * attach_mutex to avoid changing binding state while 76 * worker_attach_to_pool() is in progr 70 * worker_attach_to_pool() is in progress. 77 * << 78 * As there can only be one concurrent << 79 * BH pool is per-CPU and always DISAS << 80 */ 71 */ 81 POOL_BH = 1 << 0, !! 72 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ 82 POOL_MANAGER_ACTIVE = 1 << 1, << 83 POOL_DISASSOCIATED = 1 << 2, 73 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 84 POOL_BH_DRAINING = 1 << 3, << 85 }; << 86 74 87 enum worker_flags { << 88 /* worker flags */ 75 /* worker flags */ 89 WORKER_DIE = 1 << 1, 76 WORKER_DIE = 1 << 1, /* die die die */ 90 WORKER_IDLE = 1 << 2, 77 WORKER_IDLE = 1 << 2, /* is idle */ 91 WORKER_PREP = 1 << 3, 78 WORKER_PREP = 1 << 3, /* preparing to run works */ 92 WORKER_CPU_INTENSIVE = 1 << 6, 79 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 93 WORKER_UNBOUND = 1 << 7, 80 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 94 WORKER_REBOUND = 1 << 8, 81 WORKER_REBOUND = 1 << 8, /* worker was rebound */ 95 82 96 WORKER_NOT_RUNNING = WORKER_PREP 83 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | 97 WORKER_UNBOU 84 WORKER_UNBOUND | WORKER_REBOUND, 98 }; << 99 << 100 enum work_cancel_flags { << 101 WORK_CANCEL_DELAYED = 1 << 0, << 102 WORK_CANCEL_DISABLE = 1 << 1, << 103 }; << 104 85 105 enum wq_internal_consts { << 106 NR_STD_WORKER_POOLS = 2, 86 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 107 87 108 UNBOUND_POOL_HASH_ORDER = 6, 88 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 109 BUSY_WORKER_HASH_ORDER = 6, 89 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 110 90 111 MAX_IDLE_WORKERS_RATIO = 4, 91 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 112 IDLE_WORKER_TIMEOUT = 300 * HZ, 92 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 113 93 114 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 94 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 115 95 /* call for help after 10ms 116 96 (min two ticks) */ 117 MAYDAY_INTERVAL = HZ / 10, 97 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 118 CREATE_COOLDOWN = HZ, 98 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 119 99 120 /* 100 /* 121 * Rescue workers are used only on eme 101 * Rescue workers are used only on emergencies and shared by 122 * all cpus. Give MIN_NICE. 102 * all cpus. Give MIN_NICE. 123 */ 103 */ 124 RESCUER_NICE_LEVEL = MIN_NICE, 104 RESCUER_NICE_LEVEL = MIN_NICE, 125 HIGHPRI_NICE_LEVEL = MIN_NICE, 105 HIGHPRI_NICE_LEVEL = MIN_NICE, 126 106 127 WQ_NAME_LEN = 32, !! 107 WQ_NAME_LEN = 24, 128 WORKER_ID_LEN = 10 + WQ_NAME << 129 }; 108 }; 130 109 131 /* 110 /* 132 * We don't want to trap softirq for too long. << 133 * MAX_SOFTIRQ_RESTART in kernel/softirq.c. Th << 134 * msecs_to_jiffies() can't be an initializer. << 135 */ << 136 #define BH_WORKER_JIFFIES msecs_to_jiffi << 137 #define BH_WORKER_RESTARTS 10 << 138 << 139 /* << 140 * Structure fields follow one of the followin 111 * Structure fields follow one of the following exclusion rules. 141 * 112 * 142 * I: Modifiable by initialization/destruction 113 * I: Modifiable by initialization/destruction paths and read-only for 143 * everyone else. 114 * everyone else. 144 * 115 * 145 * P: Preemption protected. Disabling preempt 116 * P: Preemption protected. Disabling preemption is enough and should 146 * only be modified and accessed from the l 117 * only be modified and accessed from the local cpu. 147 * 118 * 148 * L: pool->lock protected. Access with pool- 119 * L: pool->lock protected. Access with pool->lock held. 149 * 120 * 150 * LN: pool->lock and wq_node_nr_active->lock !! 121 * X: During normal operation, modification requires pool->lock and should 151 * reads. !! 122 * be done only from local cpu. Either disabling preemption on local >> 123 * cpu or grabbing pool->lock is enough for read access. If >> 124 * POOL_DISASSOCIATED is set, it's identical to L. 152 * 125 * 153 * K: Only modified by worker while holding po !! 126 * A: pool->attach_mutex protected. 154 * self, while holding pool->lock or from I << 155 * kworker. << 156 * << 157 * S: Only modified by worker self. << 158 * << 159 * A: wq_pool_attach_mutex protected. << 160 * 127 * 161 * PL: wq_pool_mutex protected. 128 * PL: wq_pool_mutex protected. 162 * 129 * 163 * PR: wq_pool_mutex protected for writes. RC !! 130 * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. 164 * 131 * 165 * PW: wq_pool_mutex and wq->mutex protected f 132 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. 166 * 133 * 167 * PWR: wq_pool_mutex and wq->mutex protected 134 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or 168 * RCU for reads. !! 135 * sched-RCU for reads. 169 * 136 * 170 * WQ: wq->mutex protected. 137 * WQ: wq->mutex protected. 171 * 138 * 172 * WR: wq->mutex protected for writes. RCU pr !! 139 * WR: wq->mutex protected for writes. Sched-RCU protected for reads. 173 * << 174 * WO: wq->mutex protected for writes. Updated << 175 * with READ_ONCE() without locking. << 176 * 140 * 177 * MD: wq_mayday_lock protected. 141 * MD: wq_mayday_lock protected. 178 * << 179 * WD: Used internally by the watchdog. << 180 */ 142 */ 181 143 182 /* struct worker is defined in workqueue_inter 144 /* struct worker is defined in workqueue_internal.h */ 183 145 184 struct worker_pool { 146 struct worker_pool { 185 raw_spinlock_t lock; !! 147 spinlock_t lock; /* the pool lock */ 186 int cpu; 148 int cpu; /* I: the associated cpu */ 187 int node; 149 int node; /* I: the associated node ID */ 188 int id; 150 int id; /* I: pool ID */ 189 unsigned int flags; !! 151 unsigned int flags; /* X: flags */ 190 152 191 unsigned long watchdog_ts; 153 unsigned long watchdog_ts; /* L: watchdog timestamp */ 192 bool cpu_stall; << 193 << 194 /* << 195 * The counter is incremented in a pro << 196 * w/ preemption disabled, and decreme << 197 * but w/ pool->lock held. The readers << 198 * guaranteed to see if the counter re << 199 */ << 200 int nr_running; << 201 154 202 struct list_head worklist; 155 struct list_head worklist; /* L: list of pending works */ 203 << 204 int nr_workers; 156 int nr_workers; /* L: total number of workers */ 205 int nr_idle; << 206 157 207 struct list_head idle_list; !! 158 /* nr_idle includes the ones off idle_list for rebinding */ 208 struct timer_list idle_timer; !! 159 int nr_idle; /* L: currently idle ones */ 209 struct work_struct idle_cull_work << 210 160 211 struct timer_list mayday_timer; !! 161 struct list_head idle_list; /* X: list of idle workers */ >> 162 struct timer_list idle_timer; /* L: worker idle timeout */ >> 163 struct timer_list mayday_timer; /* L: SOS timer for workers */ 212 164 213 /* a workers is either on busy_hash or 165 /* a workers is either on busy_hash or idle_list, or the manager */ 214 DECLARE_HASHTABLE(busy_hash, BUSY_WORK 166 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 215 167 /* L: hash of busy workers */ 216 168 >> 169 /* see manage_workers() for details on the two manager mutexes */ 217 struct worker *manager; 170 struct worker *manager; /* L: purely informational */ >> 171 struct mutex attach_mutex; /* attach/detach exclusion */ 218 struct list_head workers; 172 struct list_head workers; /* A: attached workers */ >> 173 struct completion *detach_completion; /* all workers detached */ 219 174 220 struct ida worker_ida; 175 struct ida worker_ida; /* worker IDs for task name */ 221 176 222 struct workqueue_attrs *attrs; 177 struct workqueue_attrs *attrs; /* I: worker attributes */ 223 struct hlist_node hash_node; 178 struct hlist_node hash_node; /* PL: unbound_pool_hash node */ 224 int refcnt; 179 int refcnt; /* PL: refcnt for unbound pools */ 225 180 226 /* 181 /* 227 * Destruction of pool is RCU protecte !! 182 * The current concurrency level. As it's likely to be accessed >> 183 * from other CPUs during try_to_wake_up(), put it in a separate >> 184 * cacheline. >> 185 */ >> 186 atomic_t nr_running ____cacheline_aligned_in_smp; >> 187 >> 188 /* >> 189 * Destruction of pool is sched-RCU protected to allow dereferences 228 * from get_work_pool(). 190 * from get_work_pool(). 229 */ 191 */ 230 struct rcu_head rcu; 192 struct rcu_head rcu; 231 }; !! 193 } ____cacheline_aligned_in_smp; 232 194 233 /* 195 /* 234 * Per-pool_workqueue statistics. These can be !! 196 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 235 * tools/workqueue/wq_monitor.py. << 236 */ << 237 enum pool_workqueue_stats { << 238 PWQ_STAT_STARTED, /* work items << 239 PWQ_STAT_COMPLETED, /* work items << 240 PWQ_STAT_CPU_TIME, /* total CPU t << 241 PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_inte << 242 PWQ_STAT_CM_WAKEUP, /* concurrency << 243 PWQ_STAT_REPATRIATED, /* unbound wor << 244 PWQ_STAT_MAYDAY, /* maydays to << 245 PWQ_STAT_RESCUED, /* linked work << 246 << 247 PWQ_NR_STATS, << 248 }; << 249 << 250 /* << 251 * The per-pool workqueue. While queued, bits << 252 * of work_struct->data are used for flags and 197 * of work_struct->data are used for flags and the remaining high bits 253 * point to the pwq; thus, pwqs need to be ali 198 * point to the pwq; thus, pwqs need to be aligned at two's power of the 254 * number of flag bits. 199 * number of flag bits. 255 */ 200 */ 256 struct pool_workqueue { 201 struct pool_workqueue { 257 struct worker_pool *pool; 202 struct worker_pool *pool; /* I: the associated pool */ 258 struct workqueue_struct *wq; 203 struct workqueue_struct *wq; /* I: the owning workqueue */ 259 int work_color; 204 int work_color; /* L: current color */ 260 int flush_color; 205 int flush_color; /* L: flushing color */ 261 int refcnt; 206 int refcnt; /* L: reference count */ 262 int nr_in_flight[W 207 int nr_in_flight[WORK_NR_COLORS]; 263 208 /* L: nr of in_flight works */ 264 bool plugged; << 265 << 266 /* << 267 * nr_active management and WORK_STRUC << 268 * << 269 * When pwq->nr_active >= max_active, << 270 * pwq->inactive_works instead of pool << 271 * WORK_STRUCT_INACTIVE. << 272 * << 273 * All work items marked with WORK_STR << 274 * nr_active and all work items in pwq << 275 * WORK_STRUCT_INACTIVE. But not all W << 276 * in pwq->inactive_works. Some of the << 277 * pool->worklist or worker->scheduled << 278 * wq_barrier which is used for flush_ << 279 * in nr_active. For non-barrier work << 280 * WORK_STRUCT_INACTIVE iff it is in p << 281 */ << 282 int nr_active; 209 int nr_active; /* L: nr of active works */ 283 struct list_head inactive_works !! 210 int max_active; /* L: max active works */ 284 struct list_head pending_node; !! 211 struct list_head delayed_works; /* L: delayed works */ 285 struct list_head pwqs_node; 212 struct list_head pwqs_node; /* WR: node on wq->pwqs */ 286 struct list_head mayday_node; 213 struct list_head mayday_node; /* MD: node on wq->maydays */ 287 214 288 u64 stats[PWQ_NR_S << 289 << 290 /* 215 /* 291 * Release of unbound pwq is punted to !! 216 * Release of unbound pwq is punted to system_wq. See put_pwq() 292 * and pwq_release_workfn() for detail !! 217 * and pwq_unbound_release_workfn() for details. pool_workqueue 293 * RCU protected so that the first pwq !! 218 * itself is also sched-RCU protected so that the first pwq can be 294 * grabbing wq->mutex. !! 219 * determined without grabbing wq->mutex. 295 */ 220 */ 296 struct kthread_work release_work; !! 221 struct work_struct unbound_release_work; 297 struct rcu_head rcu; 222 struct rcu_head rcu; 298 } __aligned(1 << WORK_STRUCT_PWQ_SHIFT); !! 223 } __aligned(1 << WORK_STRUCT_FLAG_BITS); 299 224 300 /* 225 /* 301 * Structure used to wait for workqueue flush. 226 * Structure used to wait for workqueue flush. 302 */ 227 */ 303 struct wq_flusher { 228 struct wq_flusher { 304 struct list_head list; 229 struct list_head list; /* WQ: list of flushers */ 305 int flush_color; 230 int flush_color; /* WQ: flush color waiting for */ 306 struct completion done; 231 struct completion done; /* flush completion */ 307 }; 232 }; 308 233 309 struct wq_device; 234 struct wq_device; 310 235 311 /* 236 /* 312 * Unlike in a per-cpu workqueue where max_act << 313 * on each CPU, in an unbound workqueue, max_a << 314 * As sharing a single nr_active across multip << 315 * the counting and enforcement is per NUMA no << 316 * << 317 * The following struct is used to enforce per << 318 * to start executing a work item, it should i << 319 * tryinc_node_nr_active(). If acquisition fai << 320 * ->max, the pwq is queued on ->pending_pwqs. << 321 * and decrement ->nr, node_activate_pending_p << 322 * round-robin order. << 323 */ << 324 struct wq_node_nr_active { << 325 int max; << 326 atomic_t nr; << 327 raw_spinlock_t lock; << 328 struct list_head pending_pwqs; << 329 }; << 330 << 331 /* << 332 * The externally visible workqueue. It relay 237 * The externally visible workqueue. It relays the issued work items to 333 * the appropriate worker_pool through its poo 238 * the appropriate worker_pool through its pool_workqueues. 334 */ 239 */ 335 struct workqueue_struct { 240 struct workqueue_struct { 336 struct list_head pwqs; 241 struct list_head pwqs; /* WR: all pwqs of this wq */ 337 struct list_head list; 242 struct list_head list; /* PR: list of all workqueues */ 338 243 339 struct mutex mutex; 244 struct mutex mutex; /* protects this wq */ 340 int work_color; 245 int work_color; /* WQ: current work color */ 341 int flush_color; 246 int flush_color; /* WQ: current flush color */ 342 atomic_t nr_pwqs_to_flu 247 atomic_t nr_pwqs_to_flush; /* flush in progress */ 343 struct wq_flusher *first_flusher 248 struct wq_flusher *first_flusher; /* WQ: first flusher */ 344 struct list_head flusher_queue; 249 struct list_head flusher_queue; /* WQ: flush waiters */ 345 struct list_head flusher_overfl 250 struct list_head flusher_overflow; /* WQ: flush overflow list */ 346 251 347 struct list_head maydays; 252 struct list_head maydays; /* MD: pwqs requesting rescue */ 348 struct worker *rescuer; !! 253 struct worker *rescuer; /* I: rescue worker */ 349 254 350 int nr_drainers; 255 int nr_drainers; /* WQ: drain in progress */ 351 !! 256 int saved_max_active; /* WQ: saved pwq max_active */ 352 /* See alloc_workqueue() function comm << 353 int max_active; << 354 int min_active; << 355 int saved_max_acti << 356 int saved_min_acti << 357 257 358 struct workqueue_attrs *unbound_attrs 258 struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */ 359 struct pool_workqueue __rcu *dfl_pwq; !! 259 struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */ 360 260 361 #ifdef CONFIG_SYSFS 261 #ifdef CONFIG_SYSFS 362 struct wq_device *wq_dev; 262 struct wq_device *wq_dev; /* I: for sysfs interface */ 363 #endif 263 #endif 364 #ifdef CONFIG_LOCKDEP 264 #ifdef CONFIG_LOCKDEP 365 char *lock_name; << 366 struct lock_class_key key; << 367 struct lockdep_map lockdep_map; 265 struct lockdep_map lockdep_map; 368 #endif 266 #endif 369 char name[WQ_NAME_L 267 char name[WQ_NAME_LEN]; /* I: workqueue name */ 370 268 371 /* 269 /* 372 * Destruction of workqueue_struct is !! 270 * Destruction of workqueue_struct is sched-RCU protected to allow 373 * the workqueues list without grabbin !! 271 * walking the workqueues list without grabbing wq_pool_mutex. 374 * This is used to dump all workqueues 272 * This is used to dump all workqueues from sysrq. 375 */ 273 */ 376 struct rcu_head rcu; 274 struct rcu_head rcu; 377 275 378 /* hot fields used during command issu 276 /* hot fields used during command issue, aligned to cacheline */ 379 unsigned int flags ____cach 277 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ 380 struct pool_workqueue __rcu * __percpu !! 278 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */ 381 struct wq_node_nr_active *node_nr_acti !! 279 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */ 382 }; 280 }; 383 281 384 /* !! 282 static struct kmem_cache *pwq_cache; 385 * Each pod type describes how CPUs should be << 386 * See the comment above workqueue_attrs->affn << 387 */ << 388 struct wq_pod_type { << 389 int nr_pods; << 390 cpumask_var_t *pod_cpus; << 391 int *pod_node; << 392 int *cpu_pod; << 393 }; << 394 << 395 struct work_offq_data { << 396 u32 pool_id; << 397 u32 disable; << 398 u32 flags; << 399 }; << 400 283 401 static const char *wq_affn_names[WQ_AFFN_NR_TY !! 284 static cpumask_var_t *wq_numa_possible_cpumask; 402 [WQ_AFFN_DFL] = "default", !! 285 /* possible CPUs of each node */ 403 [WQ_AFFN_CPU] = "cpu", << 404 [WQ_AFFN_SMT] = "smt", << 405 [WQ_AFFN_CACHE] = "cache", << 406 [WQ_AFFN_NUMA] = "numa", << 407 [WQ_AFFN_SYSTEM] = "system", << 408 }; << 409 286 410 /* !! 287 static bool wq_disable_numa; 411 * Per-cpu work items which run for longer tha !! 288 module_param_named(disable_numa, wq_disable_numa, bool, 0444); 412 * automatically considered CPU intensive and << 413 * management to prevent them from noticeably << 414 * ULONG_MAX indicates that the user hasn't ov << 415 * The actual value is initialized in wq_cpu_i << 416 */ << 417 static unsigned long wq_cpu_intensive_thresh_u << 418 module_param_named(cpu_intensive_thresh_us, wq << 419 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT << 420 static unsigned int wq_cpu_intensive_warning_t << 421 module_param_named(cpu_intensive_warning_thres << 422 #endif << 423 289 424 /* see the comment above the definition of WQ_ 290 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 425 static bool wq_power_efficient = IS_ENABLED(CO 291 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); 426 module_param_named(power_efficient, wq_power_e 292 module_param_named(power_efficient, wq_power_efficient, bool, 0444); 427 293 428 static bool wq_online; /* can 294 static bool wq_online; /* can kworkers be created yet? */ 429 static bool wq_topo_initialized __read_mostly << 430 << 431 static struct kmem_cache *pwq_cache; << 432 295 433 static struct wq_pod_type wq_pod_types[WQ_AFFN !! 296 static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ 434 static enum wq_affn_scope wq_affn_dfl = WQ_AFF << 435 297 436 /* buf for wq_update_unbound_pod_attrs(), prot !! 298 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */ 437 static struct workqueue_attrs *unbound_wq_upda !! 299 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; 438 300 439 static DEFINE_MUTEX(wq_pool_mutex); /* pro 301 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 440 static DEFINE_MUTEX(wq_pool_attach_mutex); /* !! 302 static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 441 static DEFINE_RAW_SPINLOCK(wq_mayday_lock); !! 303 static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ 442 /* wait for manager to go away */ << 443 static struct rcuwait manager_wait = __RCUWAIT << 444 304 445 static LIST_HEAD(workqueues); /* PR: 305 static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 446 static bool workqueue_freezing; /* PL: 306 static bool workqueue_freezing; /* PL: have wqs started freezing? */ 447 307 448 /* PL: mirror the cpu_online_mask excluding th !! 308 /* PL: allowable cpus for unbound wqs and work items */ 449 static cpumask_var_t wq_online_cpumask; << 450 << 451 /* PL&A: allowable cpus for unbound wqs and wo << 452 static cpumask_var_t wq_unbound_cpumask; 309 static cpumask_var_t wq_unbound_cpumask; 453 310 454 /* PL: user requested unbound cpumask via sysf << 455 static cpumask_var_t wq_requested_unbound_cpum << 456 << 457 /* PL: isolated cpumask to be excluded from un << 458 static cpumask_var_t wq_isolated_cpumask; << 459 << 460 /* for further constrain wq_unbound_cpumask by << 461 static struct cpumask wq_cmdline_cpumask __ini << 462 << 463 /* CPU where unbound work was last round robin 311 /* CPU where unbound work was last round robin scheduled from this CPU */ 464 static DEFINE_PER_CPU(int, wq_rr_cpu_last); 312 static DEFINE_PER_CPU(int, wq_rr_cpu_last); 465 313 466 /* 314 /* 467 * Local execution of unbound work items is no 315 * Local execution of unbound work items is no longer guaranteed. The 468 * following always forces round-robin CPU sel 316 * following always forces round-robin CPU selection on unbound work items 469 * to uncover usages which depend on it. 317 * to uncover usages which depend on it. 470 */ 318 */ 471 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU 319 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU 472 static bool wq_debug_force_rr_cpu = true; 320 static bool wq_debug_force_rr_cpu = true; 473 #else 321 #else 474 static bool wq_debug_force_rr_cpu = false; 322 static bool wq_debug_force_rr_cpu = false; 475 #endif 323 #endif 476 module_param_named(debug_force_rr_cpu, wq_debu 324 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); 477 325 478 /* to raise softirq for the BH worker pools on << 479 static DEFINE_PER_CPU_SHARED_ALIGNED(struct ir << 480 bh_pool_i << 481 << 482 /* the BH worker pools */ << 483 static DEFINE_PER_CPU_SHARED_ALIGNED(struct wo << 484 bh_worker << 485 << 486 /* the per-cpu worker pools */ 326 /* the per-cpu worker pools */ 487 static DEFINE_PER_CPU_SHARED_ALIGNED(struct wo !! 327 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools); 488 cpu_worke << 489 328 490 static DEFINE_IDR(worker_pool_idr); /* PR: 329 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ 491 330 492 /* PL: hash of all unbound pools keyed by pool 331 /* PL: hash of all unbound pools keyed by pool->attrs */ 493 static DEFINE_HASHTABLE(unbound_pool_hash, UNB 332 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 494 333 495 /* I: attributes used when instantiating stand 334 /* I: attributes used when instantiating standard unbound pools on demand */ 496 static struct workqueue_attrs *unbound_std_wq_ 335 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 497 336 498 /* I: attributes used when instantiating order 337 /* I: attributes used when instantiating ordered pools on demand */ 499 static struct workqueue_attrs *ordered_wq_attr 338 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; 500 339 501 /* !! 340 struct workqueue_struct *system_wq __read_mostly; 502 * I: kthread_worker to release pwq's. pwq rel << 503 * process context while holding a pool lock. << 504 * worker to avoid A-A deadlocks. << 505 */ << 506 static struct kthread_worker *pwq_release_work << 507 << 508 struct workqueue_struct *system_wq __ro_after_ << 509 EXPORT_SYMBOL(system_wq); 341 EXPORT_SYMBOL(system_wq); 510 struct workqueue_struct *system_highpri_wq __r !! 342 struct workqueue_struct *system_highpri_wq __read_mostly; 511 EXPORT_SYMBOL_GPL(system_highpri_wq); 343 EXPORT_SYMBOL_GPL(system_highpri_wq); 512 struct workqueue_struct *system_long_wq __ro_a !! 344 struct workqueue_struct *system_long_wq __read_mostly; 513 EXPORT_SYMBOL_GPL(system_long_wq); 345 EXPORT_SYMBOL_GPL(system_long_wq); 514 struct workqueue_struct *system_unbound_wq __r !! 346 struct workqueue_struct *system_unbound_wq __read_mostly; 515 EXPORT_SYMBOL_GPL(system_unbound_wq); 347 EXPORT_SYMBOL_GPL(system_unbound_wq); 516 struct workqueue_struct *system_freezable_wq _ !! 348 struct workqueue_struct *system_freezable_wq __read_mostly; 517 EXPORT_SYMBOL_GPL(system_freezable_wq); 349 EXPORT_SYMBOL_GPL(system_freezable_wq); 518 struct workqueue_struct *system_power_efficien !! 350 struct workqueue_struct *system_power_efficient_wq __read_mostly; 519 EXPORT_SYMBOL_GPL(system_power_efficient_wq); 351 EXPORT_SYMBOL_GPL(system_power_efficient_wq); 520 struct workqueue_struct *system_freezable_powe !! 352 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; 521 EXPORT_SYMBOL_GPL(system_freezable_power_effic 353 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); 522 struct workqueue_struct *system_bh_wq; << 523 EXPORT_SYMBOL_GPL(system_bh_wq); << 524 struct workqueue_struct *system_bh_highpri_wq; << 525 EXPORT_SYMBOL_GPL(system_bh_highpri_wq); << 526 354 527 static int worker_thread(void *__worker); 355 static int worker_thread(void *__worker); 528 static void workqueue_sysfs_unregister(struct 356 static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 529 static void show_pwq(struct pool_workqueue *pw << 530 static void show_one_worker_pool(struct worker << 531 357 532 #define CREATE_TRACE_POINTS 358 #define CREATE_TRACE_POINTS 533 #include <trace/events/workqueue.h> 359 #include <trace/events/workqueue.h> 534 360 535 #define assert_rcu_or_pool_mutex() 361 #define assert_rcu_or_pool_mutex() \ 536 RCU_LOCKDEP_WARN(!rcu_read_lock_any_he !! 362 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ 537 !lockdep_is_held(&wq_ 363 !lockdep_is_held(&wq_pool_mutex), \ 538 "RCU or wq_pool_mutex !! 364 "sched RCU or wq_pool_mutex should be held") >> 365 >> 366 #define assert_rcu_or_wq_mutex(wq) \ >> 367 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ >> 368 !lockdep_is_held(&wq->mutex), \ >> 369 "sched RCU or wq->mutex should be held") 539 370 540 #define assert_rcu_or_wq_mutex_or_pool_mutex(w 371 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ 541 RCU_LOCKDEP_WARN(!rcu_read_lock_any_he !! 372 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ 542 !lockdep_is_held(&wq- 373 !lockdep_is_held(&wq->mutex) && \ 543 !lockdep_is_held(&wq_ 374 !lockdep_is_held(&wq_pool_mutex), \ 544 "RCU, wq->mutex or wq !! 375 "sched RCU, wq->mutex or wq_pool_mutex should be held") 545 << 546 #define for_each_bh_worker_pool(pool, cpu) << 547 for ((pool) = &per_cpu(bh_worker_pools << 548 (pool) < &per_cpu(bh_worker_pools << 549 (pool)++) << 550 376 551 #define for_each_cpu_worker_pool(pool, cpu) 377 #define for_each_cpu_worker_pool(pool, cpu) \ 552 for ((pool) = &per_cpu(cpu_worker_pool 378 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 553 (pool) < &per_cpu(cpu_worker_pool 379 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 554 (pool)++) 380 (pool)++) 555 381 556 /** 382 /** 557 * for_each_pool - iterate through all worker_ 383 * for_each_pool - iterate through all worker_pools in the system 558 * @pool: iteration cursor 384 * @pool: iteration cursor 559 * @pi: integer used for iteration 385 * @pi: integer used for iteration 560 * 386 * 561 * This must be called either with wq_pool_mut !! 387 * This must be called either with wq_pool_mutex held or sched RCU read 562 * locked. If the pool needs to be used beyon 388 * locked. If the pool needs to be used beyond the locking in effect, the 563 * caller is responsible for guaranteeing that 389 * caller is responsible for guaranteeing that the pool stays online. 564 * 390 * 565 * The if/else clause exists only for the lock 391 * The if/else clause exists only for the lockdep assertion and can be 566 * ignored. 392 * ignored. 567 */ 393 */ 568 #define for_each_pool(pool, pi) 394 #define for_each_pool(pool, pi) \ 569 idr_for_each_entry(&worker_pool_idr, p 395 idr_for_each_entry(&worker_pool_idr, pool, pi) \ 570 if (({ assert_rcu_or_pool_mute 396 if (({ assert_rcu_or_pool_mutex(); false; })) { } \ 571 else 397 else 572 398 573 /** 399 /** 574 * for_each_pool_worker - iterate through all 400 * for_each_pool_worker - iterate through all workers of a worker_pool 575 * @worker: iteration cursor 401 * @worker: iteration cursor 576 * @pool: worker_pool to iterate workers of 402 * @pool: worker_pool to iterate workers of 577 * 403 * 578 * This must be called with wq_pool_attach_mut !! 404 * This must be called with @pool->attach_mutex. 579 * 405 * 580 * The if/else clause exists only for the lock 406 * The if/else clause exists only for the lockdep assertion and can be 581 * ignored. 407 * ignored. 582 */ 408 */ 583 #define for_each_pool_worker(worker, pool) 409 #define for_each_pool_worker(worker, pool) \ 584 list_for_each_entry((worker), &(pool)- 410 list_for_each_entry((worker), &(pool)->workers, node) \ 585 if (({ lockdep_assert_held(&wq !! 411 if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \ 586 else 412 else 587 413 588 /** 414 /** 589 * for_each_pwq - iterate through all pool_wor 415 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 590 * @pwq: iteration cursor 416 * @pwq: iteration cursor 591 * @wq: the target workqueue 417 * @wq: the target workqueue 592 * 418 * 593 * This must be called either with wq->mutex h !! 419 * This must be called either with wq->mutex held or sched RCU read locked. 594 * If the pwq needs to be used beyond the lock 420 * If the pwq needs to be used beyond the locking in effect, the caller is 595 * responsible for guaranteeing that the pwq s 421 * responsible for guaranteeing that the pwq stays online. 596 * 422 * 597 * The if/else clause exists only for the lock 423 * The if/else clause exists only for the lockdep assertion and can be 598 * ignored. 424 * ignored. 599 */ 425 */ 600 #define for_each_pwq(pwq, wq) 426 #define for_each_pwq(pwq, wq) \ 601 list_for_each_entry_rcu((pwq), &(wq)-> !! 427 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ 602 lockdep_is_he !! 428 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ >> 429 else 603 430 604 #ifdef CONFIG_DEBUG_OBJECTS_WORK 431 #ifdef CONFIG_DEBUG_OBJECTS_WORK 605 432 606 static const struct debug_obj_descr work_debug !! 433 static struct debug_obj_descr work_debug_descr; 607 434 608 static void *work_debug_hint(void *addr) 435 static void *work_debug_hint(void *addr) 609 { 436 { 610 return ((struct work_struct *) addr)-> 437 return ((struct work_struct *) addr)->func; 611 } 438 } 612 439 613 static bool work_is_static_object(void *addr) 440 static bool work_is_static_object(void *addr) 614 { 441 { 615 struct work_struct *work = addr; 442 struct work_struct *work = addr; 616 443 617 return test_bit(WORK_STRUCT_STATIC_BIT 444 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); 618 } 445 } 619 446 620 /* 447 /* 621 * fixup_init is called when: 448 * fixup_init is called when: 622 * - an active object is initialized 449 * - an active object is initialized 623 */ 450 */ 624 static bool work_fixup_init(void *addr, enum d 451 static bool work_fixup_init(void *addr, enum debug_obj_state state) 625 { 452 { 626 struct work_struct *work = addr; 453 struct work_struct *work = addr; 627 454 628 switch (state) { 455 switch (state) { 629 case ODEBUG_STATE_ACTIVE: 456 case ODEBUG_STATE_ACTIVE: 630 cancel_work_sync(work); 457 cancel_work_sync(work); 631 debug_object_init(work, &work_ 458 debug_object_init(work, &work_debug_descr); 632 return true; 459 return true; 633 default: 460 default: 634 return false; 461 return false; 635 } 462 } 636 } 463 } 637 464 638 /* 465 /* 639 * fixup_free is called when: 466 * fixup_free is called when: 640 * - an active object is freed 467 * - an active object is freed 641 */ 468 */ 642 static bool work_fixup_free(void *addr, enum d 469 static bool work_fixup_free(void *addr, enum debug_obj_state state) 643 { 470 { 644 struct work_struct *work = addr; 471 struct work_struct *work = addr; 645 472 646 switch (state) { 473 switch (state) { 647 case ODEBUG_STATE_ACTIVE: 474 case ODEBUG_STATE_ACTIVE: 648 cancel_work_sync(work); 475 cancel_work_sync(work); 649 debug_object_free(work, &work_ 476 debug_object_free(work, &work_debug_descr); 650 return true; 477 return true; 651 default: 478 default: 652 return false; 479 return false; 653 } 480 } 654 } 481 } 655 482 656 static const struct debug_obj_descr work_debug !! 483 static struct debug_obj_descr work_debug_descr = { 657 .name = "work_struct", 484 .name = "work_struct", 658 .debug_hint = work_debug_hint, 485 .debug_hint = work_debug_hint, 659 .is_static_object = work_is_static_obj 486 .is_static_object = work_is_static_object, 660 .fixup_init = work_fixup_init, 487 .fixup_init = work_fixup_init, 661 .fixup_free = work_fixup_free, 488 .fixup_free = work_fixup_free, 662 }; 489 }; 663 490 664 static inline void debug_work_activate(struct 491 static inline void debug_work_activate(struct work_struct *work) 665 { 492 { 666 debug_object_activate(work, &work_debu 493 debug_object_activate(work, &work_debug_descr); 667 } 494 } 668 495 669 static inline void debug_work_deactivate(struc 496 static inline void debug_work_deactivate(struct work_struct *work) 670 { 497 { 671 debug_object_deactivate(work, &work_de 498 debug_object_deactivate(work, &work_debug_descr); 672 } 499 } 673 500 674 void __init_work(struct work_struct *work, int 501 void __init_work(struct work_struct *work, int onstack) 675 { 502 { 676 if (onstack) 503 if (onstack) 677 debug_object_init_on_stack(wor 504 debug_object_init_on_stack(work, &work_debug_descr); 678 else 505 else 679 debug_object_init(work, &work_ 506 debug_object_init(work, &work_debug_descr); 680 } 507 } 681 EXPORT_SYMBOL_GPL(__init_work); 508 EXPORT_SYMBOL_GPL(__init_work); 682 509 683 void destroy_work_on_stack(struct work_struct 510 void destroy_work_on_stack(struct work_struct *work) 684 { 511 { 685 debug_object_free(work, &work_debug_de 512 debug_object_free(work, &work_debug_descr); 686 } 513 } 687 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 514 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 688 515 689 void destroy_delayed_work_on_stack(struct dela 516 void destroy_delayed_work_on_stack(struct delayed_work *work) 690 { 517 { 691 destroy_timer_on_stack(&work->timer); 518 destroy_timer_on_stack(&work->timer); 692 debug_object_free(&work->work, &work_d 519 debug_object_free(&work->work, &work_debug_descr); 693 } 520 } 694 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stac 521 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack); 695 522 696 #else 523 #else 697 static inline void debug_work_activate(struct 524 static inline void debug_work_activate(struct work_struct *work) { } 698 static inline void debug_work_deactivate(struc 525 static inline void debug_work_deactivate(struct work_struct *work) { } 699 #endif 526 #endif 700 527 701 /** 528 /** 702 * worker_pool_assign_id - allocate ID and ass !! 529 * worker_pool_assign_id - allocate ID and assing it to @pool 703 * @pool: the pool pointer of interest 530 * @pool: the pool pointer of interest 704 * 531 * 705 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) 532 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned 706 * successfully, -errno on failure. 533 * successfully, -errno on failure. 707 */ 534 */ 708 static int worker_pool_assign_id(struct worker 535 static int worker_pool_assign_id(struct worker_pool *pool) 709 { 536 { 710 int ret; 537 int ret; 711 538 712 lockdep_assert_held(&wq_pool_mutex); 539 lockdep_assert_held(&wq_pool_mutex); 713 540 714 ret = idr_alloc(&worker_pool_idr, pool 541 ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE, 715 GFP_KERNEL); 542 GFP_KERNEL); 716 if (ret >= 0) { 543 if (ret >= 0) { 717 pool->id = ret; 544 pool->id = ret; 718 return 0; 545 return 0; 719 } 546 } 720 return ret; 547 return ret; 721 } 548 } 722 549 723 static struct pool_workqueue __rcu ** << 724 unbound_pwq_slot(struct workqueue_struct *wq, << 725 { << 726 if (cpu >= 0) << 727 return per_cpu_ptr(wq->cpu_pwq, << 728 else << 729 return &wq->dfl_pwq; << 730 } << 731 << 732 /* @cpu < 0 for dfl_pwq */ << 733 static struct pool_workqueue *unbound_pwq(stru << 734 { << 735 return rcu_dereference_check(*unbound_ << 736 lockdep_i << 737 lockdep_i << 738 } << 739 << 740 /** 550 /** 741 * unbound_effective_cpumask - effective cpuma !! 551 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node 742 * @wq: workqueue of interest !! 552 * @wq: the target workqueue >> 553 * @node: the node ID 743 * 554 * 744 * @wq->unbound_attrs->cpumask contains the cp !! 555 * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU 745 * is masked with wq_unbound_cpumask to determ !! 556 * read locked. 746 * default pwq is always mapped to the pool wi !! 557 * If the pwq needs to be used beyond the locking in effect, the caller is >> 558 * responsible for guaranteeing that the pwq stays online. >> 559 * >> 560 * Return: The unbound pool_workqueue for @node. 747 */ 561 */ 748 static struct cpumask *unbound_effective_cpuma !! 562 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, >> 563 int node) 749 { 564 { 750 return unbound_pwq(wq, -1)->pool->attr !! 565 assert_rcu_or_wq_mutex_or_pool_mutex(wq); >> 566 >> 567 /* >> 568 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a >> 569 * delayed item is pending. The plan is to keep CPU -> NODE >> 570 * mapping valid and stable across CPU on/offlines. Once that >> 571 * happens, this workaround can be removed. >> 572 */ >> 573 if (unlikely(node == NUMA_NO_NODE)) >> 574 return wq->dfl_pwq; >> 575 >> 576 return rcu_dereference_raw(wq->numa_pwq_tbl[node]); 751 } 577 } 752 578 753 static unsigned int work_color_to_flags(int co 579 static unsigned int work_color_to_flags(int color) 754 { 580 { 755 return color << WORK_STRUCT_COLOR_SHIF 581 return color << WORK_STRUCT_COLOR_SHIFT; 756 } 582 } 757 583 758 static int get_work_color(unsigned long work_d !! 584 static int get_work_color(struct work_struct *work) 759 { 585 { 760 return (work_data >> WORK_STRUCT_COLOR !! 586 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 761 ((1 << WORK_STRUCT_COLOR_BITS) 587 ((1 << WORK_STRUCT_COLOR_BITS) - 1); 762 } 588 } 763 589 764 static int work_next_color(int color) 590 static int work_next_color(int color) 765 { 591 { 766 return (color + 1) % WORK_NR_COLORS; 592 return (color + 1) % WORK_NR_COLORS; 767 } 593 } 768 594 769 static unsigned long pool_offq_flags(struct wo << 770 { << 771 return (pool->flags & POOL_BH) ? WORK_ << 772 } << 773 << 774 /* 595 /* 775 * While queued, %WORK_STRUCT_PWQ is set and n 596 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 776 * contain the pointer to the queued pwq. Onc 597 * contain the pointer to the queued pwq. Once execution starts, the flag 777 * is cleared and the high bits contain OFFQ f 598 * is cleared and the high bits contain OFFQ flags and pool ID. 778 * 599 * 779 * set_work_pwq(), set_work_pool_and_clear_pen !! 600 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 780 * can be used to set the pwq, pool or clear w !! 601 * and clear_work_data() can be used to set the pwq, pool or clear 781 * only be called while the work is owned - ie !! 602 * work->data. These functions should only be called while the work is >> 603 * owned - ie. while the PENDING bit is set. 782 * 604 * 783 * get_work_pool() and get_work_pwq() can be u 605 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 784 * corresponding to a work. Pool is available 606 * corresponding to a work. Pool is available once the work has been 785 * queued anywhere after initialization until 607 * queued anywhere after initialization until it is sync canceled. pwq is 786 * available only while the work item is queue 608 * available only while the work item is queued. >> 609 * >> 610 * %WORK_OFFQ_CANCELING is used to mark a work item which is being >> 611 * canceled. While being canceled, a work item may have its PENDING set >> 612 * but stay off timer and worklist for arbitrarily long and nobody should >> 613 * try to steal the PENDING bit. 787 */ 614 */ 788 static inline void set_work_data(struct work_s !! 615 static inline void set_work_data(struct work_struct *work, unsigned long data, >> 616 unsigned long flags) 789 { 617 { 790 WARN_ON_ONCE(!work_pending(work)); 618 WARN_ON_ONCE(!work_pending(work)); 791 atomic_long_set(&work->data, data | wo !! 619 atomic_long_set(&work->data, data | flags | work_static(work)); 792 } 620 } 793 621 794 static void set_work_pwq(struct work_struct *w 622 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 795 unsigned long flags) !! 623 unsigned long extra_flags) 796 { 624 { 797 set_work_data(work, (unsigned long)pwq !! 625 set_work_data(work, (unsigned long)pwq, 798 WORK_STRUCT_PWQ | flags) !! 626 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 799 } 627 } 800 628 801 static void set_work_pool_and_keep_pending(str 629 static void set_work_pool_and_keep_pending(struct work_struct *work, 802 int !! 630 int pool_id) 803 { 631 { 804 set_work_data(work, ((unsigned long)po !! 632 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 805 WORK_STRUCT_PENDING | fl !! 633 WORK_STRUCT_PENDING); 806 } 634 } 807 635 808 static void set_work_pool_and_clear_pending(st 636 static void set_work_pool_and_clear_pending(struct work_struct *work, 809 in !! 637 int pool_id) 810 { 638 { 811 /* 639 /* 812 * The following wmb is paired with th 640 * The following wmb is paired with the implied mb in 813 * test_and_set_bit(PENDING) and ensur 641 * test_and_set_bit(PENDING) and ensures all updates to @work made 814 * here are visible to and precede any 642 * here are visible to and precede any updates by the next PENDING 815 * owner. 643 * owner. 816 */ 644 */ 817 smp_wmb(); 645 smp_wmb(); 818 set_work_data(work, ((unsigned long)po !! 646 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 819 flags); << 820 /* 647 /* 821 * The following mb guarantees that pr 648 * The following mb guarantees that previous clear of a PENDING bit 822 * will not be reordered with any spec 649 * will not be reordered with any speculative LOADS or STORES from 823 * work->current_func, which is execut 650 * work->current_func, which is executed afterwards. This possible 824 * reordering can lead to a missed exe !! 651 * reordering can lead to a missed execution on attempt to qeueue 825 * the same @work. E.g. consider this 652 * the same @work. E.g. consider this case: 826 * 653 * 827 * CPU#0 CPU 654 * CPU#0 CPU#1 828 * ---------------------------- --- 655 * ---------------------------- -------------------------------- 829 * 656 * 830 * 1 STORE event_indicated 657 * 1 STORE event_indicated 831 * 2 queue_work_on() { 658 * 2 queue_work_on() { 832 * 3 test_and_set_bit(PENDING) 659 * 3 test_and_set_bit(PENDING) 833 * 4 } set 660 * 4 } set_..._and_clear_pending() { 834 * 5 s 661 * 5 set_work_data() # clear bit 835 * 6 s 662 * 6 smp_mb() 836 * 7 wor 663 * 7 work->current_func() { 837 * 8 664 * 8 LOAD event_indicated 838 * } 665 * } 839 * 666 * 840 * Without an explicit full barrier sp 667 * Without an explicit full barrier speculative LOAD on line 8 can 841 * be executed before CPU#0 does STORE 668 * be executed before CPU#0 does STORE on line 1. If that happens, 842 * CPU#0 observes the PENDING bit is s 669 * CPU#0 observes the PENDING bit is still set and new execution of 843 * a @work is not queued in a hope, th 670 * a @work is not queued in a hope, that CPU#1 will eventually 844 * finish the queued @work. Meanwhile 671 * finish the queued @work. Meanwhile CPU#1 does not see 845 * event_indicated is set, because spe 672 * event_indicated is set, because speculative LOAD was executed 846 * before actual STORE. 673 * before actual STORE. 847 */ 674 */ 848 smp_mb(); 675 smp_mb(); 849 } 676 } 850 677 851 static inline struct pool_workqueue *work_stru !! 678 static void clear_work_data(struct work_struct *work) 852 { 679 { 853 return (struct pool_workqueue *)(data !! 680 smp_wmb(); /* see set_work_pool_and_clear_pending() */ >> 681 set_work_data(work, WORK_STRUCT_NO_POOL, 0); 854 } 682 } 855 683 856 static struct pool_workqueue *get_work_pwq(str 684 static struct pool_workqueue *get_work_pwq(struct work_struct *work) 857 { 685 { 858 unsigned long data = atomic_long_read( 686 unsigned long data = atomic_long_read(&work->data); 859 687 860 if (data & WORK_STRUCT_PWQ) 688 if (data & WORK_STRUCT_PWQ) 861 return work_struct_pwq(data); !! 689 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 862 else 690 else 863 return NULL; 691 return NULL; 864 } 692 } 865 693 866 /** 694 /** 867 * get_work_pool - return the worker_pool a gi 695 * get_work_pool - return the worker_pool a given work was associated with 868 * @work: the work item of interest 696 * @work: the work item of interest 869 * 697 * 870 * Pools are created and destroyed under wq_po 698 * Pools are created and destroyed under wq_pool_mutex, and allows read 871 * access under RCU read lock. As such, this !! 699 * access under sched-RCU read lock. As such, this function should be 872 * called under wq_pool_mutex or inside of a r !! 700 * called under wq_pool_mutex or with preemption disabled. 873 * 701 * 874 * All fields of the returned pool are accessi 702 * All fields of the returned pool are accessible as long as the above 875 * mentioned locking is in effect. If the ret 703 * mentioned locking is in effect. If the returned pool needs to be used 876 * beyond the critical section, the caller is 704 * beyond the critical section, the caller is responsible for ensuring the 877 * returned pool is and stays online. 705 * returned pool is and stays online. 878 * 706 * 879 * Return: The worker_pool @work was last asso 707 * Return: The worker_pool @work was last associated with. %NULL if none. 880 */ 708 */ 881 static struct worker_pool *get_work_pool(struc 709 static struct worker_pool *get_work_pool(struct work_struct *work) 882 { 710 { 883 unsigned long data = atomic_long_read( 711 unsigned long data = atomic_long_read(&work->data); 884 int pool_id; 712 int pool_id; 885 713 886 assert_rcu_or_pool_mutex(); 714 assert_rcu_or_pool_mutex(); 887 715 888 if (data & WORK_STRUCT_PWQ) 716 if (data & WORK_STRUCT_PWQ) 889 return work_struct_pwq(data)-> !! 717 return ((struct pool_workqueue *) >> 718 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; 890 719 891 pool_id = data >> WORK_OFFQ_POOL_SHIFT 720 pool_id = data >> WORK_OFFQ_POOL_SHIFT; 892 if (pool_id == WORK_OFFQ_POOL_NONE) 721 if (pool_id == WORK_OFFQ_POOL_NONE) 893 return NULL; 722 return NULL; 894 723 895 return idr_find(&worker_pool_idr, pool 724 return idr_find(&worker_pool_idr, pool_id); 896 } 725 } 897 726 898 static unsigned long shift_and_mask(unsigned l !! 727 /** >> 728 * get_work_pool_id - return the worker pool ID a given work is associated with >> 729 * @work: the work item of interest >> 730 * >> 731 * Return: The worker_pool ID @work was last associated with. >> 732 * %WORK_OFFQ_POOL_NONE if none. >> 733 */ >> 734 static int get_work_pool_id(struct work_struct *work) 899 { 735 { 900 return (v >> shift) & ((1U << bits) - !! 736 unsigned long data = atomic_long_read(&work->data); >> 737 >> 738 if (data & WORK_STRUCT_PWQ) >> 739 return ((struct pool_workqueue *) >> 740 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; >> 741 >> 742 return data >> WORK_OFFQ_POOL_SHIFT; 901 } 743 } 902 744 903 static void work_offqd_unpack(struct work_offq !! 745 static void mark_work_canceling(struct work_struct *work) 904 { 746 { 905 WARN_ON_ONCE(data & WORK_STRUCT_PWQ); !! 747 unsigned long pool_id = get_work_pool_id(work); 906 748 907 offqd->pool_id = shift_and_mask(data, !! 749 pool_id <<= WORK_OFFQ_POOL_SHIFT; 908 WORK_O !! 750 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); 909 offqd->disable = shift_and_mask(data, << 910 WORK_O << 911 offqd->flags = data & WORK_OFFQ_FLAG_M << 912 } 751 } 913 752 914 static unsigned long work_offqd_pack_flags(str !! 753 static bool work_is_canceling(struct work_struct *work) 915 { 754 { 916 return ((unsigned long)offqd->disable !! 755 unsigned long data = atomic_long_read(&work->data); 917 ((unsigned long)offqd->flags); !! 756 >> 757 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 918 } 758 } 919 759 920 /* 760 /* 921 * Policy functions. These define the policie 761 * Policy functions. These define the policies on how the global worker 922 * pools are managed. Unless noted otherwise, 762 * pools are managed. Unless noted otherwise, these functions assume that 923 * they're being called with pool->lock held. 763 * they're being called with pool->lock held. 924 */ 764 */ 925 765 >> 766 static bool __need_more_worker(struct worker_pool *pool) >> 767 { >> 768 return !atomic_read(&pool->nr_running); >> 769 } >> 770 926 /* 771 /* 927 * Need to wake up a worker? Called from anyt 772 * Need to wake up a worker? Called from anything but currently 928 * running workers. 773 * running workers. 929 * 774 * 930 * Note that, because unbound workers never co 775 * Note that, because unbound workers never contribute to nr_running, this 931 * function will always return %true for unbou 776 * function will always return %true for unbound pools as long as the 932 * worklist isn't empty. 777 * worklist isn't empty. 933 */ 778 */ 934 static bool need_more_worker(struct worker_poo 779 static bool need_more_worker(struct worker_pool *pool) 935 { 780 { 936 return !list_empty(&pool->worklist) && !! 781 return !list_empty(&pool->worklist) && __need_more_worker(pool); 937 } 782 } 938 783 939 /* Can I start working? Called from busy but 784 /* Can I start working? Called from busy but !running workers. */ 940 static bool may_start_working(struct worker_po 785 static bool may_start_working(struct worker_pool *pool) 941 { 786 { 942 return pool->nr_idle; 787 return pool->nr_idle; 943 } 788 } 944 789 945 /* Do I need to keep working? Called from cur 790 /* Do I need to keep working? Called from currently running workers. */ 946 static bool keep_working(struct worker_pool *p 791 static bool keep_working(struct worker_pool *pool) 947 { 792 { 948 return !list_empty(&pool->worklist) && !! 793 return !list_empty(&pool->worklist) && >> 794 atomic_read(&pool->nr_running) <= 1; 949 } 795 } 950 796 951 /* Do we need a new worker? Called from manag 797 /* Do we need a new worker? Called from manager. */ 952 static bool need_to_create_worker(struct worke 798 static bool need_to_create_worker(struct worker_pool *pool) 953 { 799 { 954 return need_more_worker(pool) && !may_ 800 return need_more_worker(pool) && !may_start_working(pool); 955 } 801 } 956 802 957 /* Do we have too many workers and should some 803 /* Do we have too many workers and should some go away? */ 958 static bool too_many_workers(struct worker_poo 804 static bool too_many_workers(struct worker_pool *pool) 959 { 805 { 960 bool managing = pool->flags & POOL_MAN 806 bool managing = pool->flags & POOL_MANAGER_ACTIVE; 961 int nr_idle = pool->nr_idle + managing 807 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 962 int nr_busy = pool->nr_workers - nr_id 808 int nr_busy = pool->nr_workers - nr_idle; 963 809 964 return nr_idle > 2 && (nr_idle - 2) * 810 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 965 } 811 } 966 812 >> 813 /* >> 814 * Wake up functions. >> 815 */ >> 816 >> 817 /* Return the first idle worker. Safe with preemption disabled */ >> 818 static struct worker *first_idle_worker(struct worker_pool *pool) >> 819 { >> 820 if (unlikely(list_empty(&pool->idle_list))) >> 821 return NULL; >> 822 >> 823 return list_first_entry(&pool->idle_list, struct worker, entry); >> 824 } >> 825 >> 826 /** >> 827 * wake_up_worker - wake up an idle worker >> 828 * @pool: worker pool to wake worker from >> 829 * >> 830 * Wake up the first idle worker of @pool. >> 831 * >> 832 * CONTEXT: >> 833 * spin_lock_irq(pool->lock). >> 834 */ >> 835 static void wake_up_worker(struct worker_pool *pool) >> 836 { >> 837 struct worker *worker = first_idle_worker(pool); >> 838 >> 839 if (likely(worker)) >> 840 wake_up_process(worker->task); >> 841 } >> 842 >> 843 /** >> 844 * wq_worker_waking_up - a worker is waking up >> 845 * @task: task waking up >> 846 * @cpu: CPU @task is waking up to >> 847 * >> 848 * This function is called during try_to_wake_up() when a worker is >> 849 * being awoken. >> 850 * >> 851 * CONTEXT: >> 852 * spin_lock_irq(rq->lock) >> 853 */ >> 854 void wq_worker_waking_up(struct task_struct *task, int cpu) >> 855 { >> 856 struct worker *worker = kthread_data(task); >> 857 >> 858 if (!(worker->flags & WORKER_NOT_RUNNING)) { >> 859 WARN_ON_ONCE(worker->pool->cpu != cpu); >> 860 atomic_inc(&worker->pool->nr_running); >> 861 } >> 862 } >> 863 >> 864 /** >> 865 * wq_worker_sleeping - a worker is going to sleep >> 866 * @task: task going to sleep >> 867 * >> 868 * This function is called during schedule() when a busy worker is >> 869 * going to sleep. Worker on the same cpu can be woken up by >> 870 * returning pointer to its task. >> 871 * >> 872 * CONTEXT: >> 873 * spin_lock_irq(rq->lock) >> 874 * >> 875 * Return: >> 876 * Worker task on @cpu to wake up, %NULL if none. >> 877 */ >> 878 struct task_struct *wq_worker_sleeping(struct task_struct *task) >> 879 { >> 880 struct worker *worker = kthread_data(task), *to_wakeup = NULL; >> 881 struct worker_pool *pool; >> 882 >> 883 /* >> 884 * Rescuers, which may not have all the fields set up like normal >> 885 * workers, also reach here, let's not access anything before >> 886 * checking NOT_RUNNING. >> 887 */ >> 888 if (worker->flags & WORKER_NOT_RUNNING) >> 889 return NULL; >> 890 >> 891 pool = worker->pool; >> 892 >> 893 /* this can only happen on the local cpu */ >> 894 if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id())) >> 895 return NULL; >> 896 >> 897 /* >> 898 * The counterpart of the following dec_and_test, implied mb, >> 899 * worklist not empty test sequence is in insert_work(). >> 900 * Please read comment there. >> 901 * >> 902 * NOT_RUNNING is clear. This means that we're bound to and >> 903 * running on the local cpu w/ rq lock held and preemption >> 904 * disabled, which in turn means that none else could be >> 905 * manipulating idle_list, so dereferencing idle_list without pool >> 906 * lock is safe. >> 907 */ >> 908 if (atomic_dec_and_test(&pool->nr_running) && >> 909 !list_empty(&pool->worklist)) >> 910 to_wakeup = first_idle_worker(pool); >> 911 return to_wakeup ? to_wakeup->task : NULL; >> 912 } >> 913 967 /** 914 /** 968 * worker_set_flags - set worker flags and adj 915 * worker_set_flags - set worker flags and adjust nr_running accordingly 969 * @worker: self 916 * @worker: self 970 * @flags: flags to set 917 * @flags: flags to set 971 * 918 * 972 * Set @flags in @worker->flags and adjust nr_ 919 * Set @flags in @worker->flags and adjust nr_running accordingly. >> 920 * >> 921 * CONTEXT: >> 922 * spin_lock_irq(pool->lock) 973 */ 923 */ 974 static inline void worker_set_flags(struct wor 924 static inline void worker_set_flags(struct worker *worker, unsigned int flags) 975 { 925 { 976 struct worker_pool *pool = worker->poo 926 struct worker_pool *pool = worker->pool; 977 927 978 lockdep_assert_held(&pool->lock); !! 928 WARN_ON_ONCE(worker->task != current); 979 929 980 /* If transitioning into NOT_RUNNING, 930 /* If transitioning into NOT_RUNNING, adjust nr_running. */ 981 if ((flags & WORKER_NOT_RUNNING) && 931 if ((flags & WORKER_NOT_RUNNING) && 982 !(worker->flags & WORKER_NOT_RUNNI 932 !(worker->flags & WORKER_NOT_RUNNING)) { 983 pool->nr_running--; !! 933 atomic_dec(&pool->nr_running); 984 } 934 } 985 935 986 worker->flags |= flags; 936 worker->flags |= flags; 987 } 937 } 988 938 989 /** 939 /** 990 * worker_clr_flags - clear worker flags and a 940 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 991 * @worker: self 941 * @worker: self 992 * @flags: flags to clear 942 * @flags: flags to clear 993 * 943 * 994 * Clear @flags in @worker->flags and adjust n 944 * Clear @flags in @worker->flags and adjust nr_running accordingly. >> 945 * >> 946 * CONTEXT: >> 947 * spin_lock_irq(pool->lock) 995 */ 948 */ 996 static inline void worker_clr_flags(struct wor 949 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 997 { 950 { 998 struct worker_pool *pool = worker->poo 951 struct worker_pool *pool = worker->pool; 999 unsigned int oflags = worker->flags; 952 unsigned int oflags = worker->flags; 1000 953 1001 lockdep_assert_held(&pool->lock); !! 954 WARN_ON_ONCE(worker->task != current); 1002 955 1003 worker->flags &= ~flags; 956 worker->flags &= ~flags; 1004 957 1005 /* 958 /* 1006 * If transitioning out of NOT_RUNNIN 959 * If transitioning out of NOT_RUNNING, increment nr_running. Note 1007 * that the nested NOT_RUNNING is not 960 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask 1008 * of multiple flags, not a single fl 961 * of multiple flags, not a single flag. 1009 */ 962 */ 1010 if ((flags & WORKER_NOT_RUNNING) && ( 963 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 1011 if (!(worker->flags & WORKER_ 964 if (!(worker->flags & WORKER_NOT_RUNNING)) 1012 pool->nr_running++; !! 965 atomic_inc(&pool->nr_running); 1013 } << 1014 << 1015 /* Return the first idle worker. Called with << 1016 static struct worker *first_idle_worker(struc << 1017 { << 1018 if (unlikely(list_empty(&pool->idle_l << 1019 return NULL; << 1020 << 1021 return list_first_entry(&pool->idle_l << 1022 } << 1023 << 1024 /** << 1025 * worker_enter_idle - enter idle state << 1026 * @worker: worker which is entering idle sta << 1027 * << 1028 * @worker is entering idle state. Update st << 1029 * necessary. << 1030 * << 1031 * LOCKING: << 1032 * raw_spin_lock_irq(pool->lock). << 1033 */ << 1034 static void worker_enter_idle(struct worker * << 1035 { << 1036 struct worker_pool *pool = worker->po << 1037 << 1038 if (WARN_ON_ONCE(worker->flags & WORK << 1039 WARN_ON_ONCE(!list_empty(&worker- << 1040 (worker->hentry.next << 1041 return; << 1042 << 1043 /* can't use worker_set_flags(), also << 1044 worker->flags |= WORKER_IDLE; << 1045 pool->nr_idle++; << 1046 worker->last_active = jiffies; << 1047 << 1048 /* idle_list is LIFO */ << 1049 list_add(&worker->entry, &pool->idle_ << 1050 << 1051 if (too_many_workers(pool) && !timer_ << 1052 mod_timer(&pool->idle_timer, << 1053 << 1054 /* Sanity check nr_running. */ << 1055 WARN_ON_ONCE(pool->nr_workers == pool << 1056 } << 1057 << 1058 /** << 1059 * worker_leave_idle - leave idle state << 1060 * @worker: worker which is leaving idle stat << 1061 * << 1062 * @worker is leaving idle state. Update sta << 1063 * << 1064 * LOCKING: << 1065 * raw_spin_lock_irq(pool->lock). << 1066 */ << 1067 static void worker_leave_idle(struct worker * << 1068 { << 1069 struct worker_pool *pool = worker->po << 1070 << 1071 if (WARN_ON_ONCE(!(worker->flags & WO << 1072 return; << 1073 worker_clr_flags(worker, WORKER_IDLE) << 1074 pool->nr_idle--; << 1075 list_del_init(&worker->entry); << 1076 } 966 } 1077 967 1078 /** 968 /** 1079 * find_worker_executing_work - find worker w 969 * find_worker_executing_work - find worker which is executing a work 1080 * @pool: pool of interest 970 * @pool: pool of interest 1081 * @work: work to find worker for 971 * @work: work to find worker for 1082 * 972 * 1083 * Find a worker which is executing @work on 973 * Find a worker which is executing @work on @pool by searching 1084 * @pool->busy_hash which is keyed by the add 974 * @pool->busy_hash which is keyed by the address of @work. For a worker 1085 * to match, its current execution should mat 975 * to match, its current execution should match the address of @work and 1086 * its work function. This is to avoid unwan 976 * its work function. This is to avoid unwanted dependency between 1087 * unrelated work executions through a work i 977 * unrelated work executions through a work item being recycled while still 1088 * being executed. 978 * being executed. 1089 * 979 * 1090 * This is a bit tricky. A work item may be 980 * This is a bit tricky. A work item may be freed once its execution 1091 * starts and nothing prevents the freed area 981 * starts and nothing prevents the freed area from being recycled for 1092 * another work item. If the same work item 982 * another work item. If the same work item address ends up being reused 1093 * before the original execution finishes, wo 983 * before the original execution finishes, workqueue will identify the 1094 * recycled work item as currently executing 984 * recycled work item as currently executing and make it wait until the 1095 * current execution finishes, introducing an 985 * current execution finishes, introducing an unwanted dependency. 1096 * 986 * 1097 * This function checks the work item address 987 * This function checks the work item address and work function to avoid 1098 * false positives. Note that this isn't com 988 * false positives. Note that this isn't complete as one may construct a 1099 * work function which can introduce dependen 989 * work function which can introduce dependency onto itself through a 1100 * recycled work item. Well, if somebody wan 990 * recycled work item. Well, if somebody wants to shoot oneself in the 1101 * foot that badly, there's only so much we c 991 * foot that badly, there's only so much we can do, and if such deadlock 1102 * actually occurs, it should be easy to loca 992 * actually occurs, it should be easy to locate the culprit work function. 1103 * 993 * 1104 * CONTEXT: 994 * CONTEXT: 1105 * raw_spin_lock_irq(pool->lock). !! 995 * spin_lock_irq(pool->lock). 1106 * 996 * 1107 * Return: 997 * Return: 1108 * Pointer to worker which is executing @work 998 * Pointer to worker which is executing @work if found, %NULL 1109 * otherwise. 999 * otherwise. 1110 */ 1000 */ 1111 static struct worker *find_worker_executing_w 1001 static struct worker *find_worker_executing_work(struct worker_pool *pool, 1112 1002 struct work_struct *work) 1113 { 1003 { 1114 struct worker *worker; 1004 struct worker *worker; 1115 1005 1116 hash_for_each_possible(pool->busy_has 1006 hash_for_each_possible(pool->busy_hash, worker, hentry, 1117 (unsigned long 1007 (unsigned long)work) 1118 if (worker->current_work == w 1008 if (worker->current_work == work && 1119 worker->current_func == w 1009 worker->current_func == work->func) 1120 return worker; 1010 return worker; 1121 1011 1122 return NULL; 1012 return NULL; 1123 } 1013 } 1124 1014 1125 /** 1015 /** 1126 * move_linked_works - move linked works to a 1016 * move_linked_works - move linked works to a list 1127 * @work: start of series of works to be sche 1017 * @work: start of series of works to be scheduled 1128 * @head: target list to append @work to 1018 * @head: target list to append @work to 1129 * @nextp: out parameter for nested worklist 1019 * @nextp: out parameter for nested worklist walking 1130 * 1020 * 1131 * Schedule linked works starting from @work !! 1021 * Schedule linked works starting from @work to @head. Work series to 1132 * scheduled starts at @work and includes any !! 1022 * be scheduled starts at @work and includes any consecutive work with 1133 * WORK_STRUCT_LINKED set in its predecessor. !! 1023 * WORK_STRUCT_LINKED set in its predecessor. 1134 * @nextp. !! 1024 * >> 1025 * If @nextp is not NULL, it's updated to point to the next work of >> 1026 * the last scheduled work. This allows move_linked_works() to be >> 1027 * nested inside outer list_for_each_entry_safe(). 1135 * 1028 * 1136 * CONTEXT: 1029 * CONTEXT: 1137 * raw_spin_lock_irq(pool->lock). !! 1030 * spin_lock_irq(pool->lock). 1138 */ 1031 */ 1139 static void move_linked_works(struct work_str 1032 static void move_linked_works(struct work_struct *work, struct list_head *head, 1140 struct work_str 1033 struct work_struct **nextp) 1141 { 1034 { 1142 struct work_struct *n; 1035 struct work_struct *n; 1143 1036 1144 /* 1037 /* 1145 * Linked worklist will always end be 1038 * Linked worklist will always end before the end of the list, 1146 * use NULL for list head. 1039 * use NULL for list head. 1147 */ 1040 */ 1148 list_for_each_entry_safe_from(work, n 1041 list_for_each_entry_safe_from(work, n, NULL, entry) { 1149 list_move_tail(&work->entry, 1042 list_move_tail(&work->entry, head); 1150 if (!(*work_data_bits(work) & 1043 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 1151 break; 1044 break; 1152 } 1045 } 1153 1046 1154 /* 1047 /* 1155 * If we're already inside safe list 1048 * If we're already inside safe list traversal and have moved 1156 * multiple works to the scheduled qu 1049 * multiple works to the scheduled queue, the next position 1157 * needs to be updated. 1050 * needs to be updated. 1158 */ 1051 */ 1159 if (nextp) 1052 if (nextp) 1160 *nextp = n; 1053 *nextp = n; 1161 } 1054 } 1162 1055 1163 /** 1056 /** 1164 * assign_work - assign a work item and its l << 1165 * @work: work to assign << 1166 * @worker: worker to assign to << 1167 * @nextp: out parameter for nested worklist << 1168 * << 1169 * Assign @work and its linked work items to << 1170 * executed by another worker in the same poo << 1171 * << 1172 * If @nextp is not NULL, it's updated to poi << 1173 * scheduled work. This allows assign_work() << 1174 * list_for_each_entry_safe(). << 1175 * << 1176 * Returns %true if @work was successfully as << 1177 * was punted to another worker already execu << 1178 */ << 1179 static bool assign_work(struct work_struct *w << 1180 struct work_struct ** << 1181 { << 1182 struct worker_pool *pool = worker->po << 1183 struct worker *collision; << 1184 << 1185 lockdep_assert_held(&pool->lock); << 1186 << 1187 /* << 1188 * A single work shouldn't be execute << 1189 * __queue_work() ensures that @work << 1190 * while still running in the previou << 1191 * @work is not executed concurrently << 1192 * pool. Check whether anyone is alre << 1193 * defer the work to the currently ex << 1194 */ << 1195 collision = find_worker_executing_wor << 1196 if (unlikely(collision)) { << 1197 move_linked_works(work, &coll << 1198 return false; << 1199 } << 1200 << 1201 move_linked_works(work, &worker->sche << 1202 return true; << 1203 } << 1204 << 1205 static struct irq_work *bh_pool_irq_work(stru << 1206 { << 1207 int high = pool->attrs->nice == HIGHP << 1208 << 1209 return &per_cpu(bh_pool_irq_works, po << 1210 } << 1211 << 1212 static void kick_bh_pool(struct worker_pool * << 1213 { << 1214 #ifdef CONFIG_SMP << 1215 /* see drain_dead_softirq_workfn() fo << 1216 if (unlikely(pool->cpu != smp_process << 1217 !(pool->flags & POOL_BH_ << 1218 irq_work_queue_on(bh_pool_irq << 1219 return; << 1220 } << 1221 #endif << 1222 if (pool->attrs->nice == HIGHPRI_NICE << 1223 raise_softirq_irqoff(HI_SOFTI << 1224 else << 1225 raise_softirq_irqoff(TASKLET_ << 1226 } << 1227 << 1228 /** << 1229 * kick_pool - wake up an idle worker if nece << 1230 * @pool: pool to kick << 1231 * << 1232 * @pool may have pending work items. Wake up << 1233 * whether a worker was woken up. << 1234 */ << 1235 static bool kick_pool(struct worker_pool *poo << 1236 { << 1237 struct worker *worker = first_idle_wo << 1238 struct task_struct *p; << 1239 << 1240 lockdep_assert_held(&pool->lock); << 1241 << 1242 if (!need_more_worker(pool) || !worke << 1243 return false; << 1244 << 1245 if (pool->flags & POOL_BH) { << 1246 kick_bh_pool(pool); << 1247 return true; << 1248 } << 1249 << 1250 p = worker->task; << 1251 << 1252 #ifdef CONFIG_SMP << 1253 /* << 1254 * Idle @worker is about to execute @ << 1255 * opportunity to migrate @worker at << 1256 * wake_cpu field. Let's see if we wa << 1257 * execution locality. << 1258 * << 1259 * We're waking the worker that went << 1260 * chance that @worker is marked idle << 1261 * so, setting the wake_cpu won't do << 1262 * optimization and the race window i << 1263 * now. If this becomes pronounced, w << 1264 * still on cpu when picking an idle << 1265 * << 1266 * If @pool has non-strict affinity, << 1267 * its affinity scope. Repatriate. << 1268 */ << 1269 if (!pool->attrs->affn_strict && << 1270 !cpumask_test_cpu(p->wake_cpu, po << 1271 struct work_struct *work = li << 1272 << 1273 int wake_cpu = cpumask_any_an << 1274 << 1275 if (wake_cpu < nr_cpu_ids) { << 1276 p->wake_cpu = wake_cp << 1277 get_work_pwq(work)->s << 1278 } << 1279 } << 1280 #endif << 1281 wake_up_process(p); << 1282 return true; << 1283 } << 1284 << 1285 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT << 1286 << 1287 /* << 1288 * Concurrency-managed per-cpu work items tha << 1289 * wq_cpu_intensive_thresh_us trigger the aut << 1290 * which prevents them from stalling other co << 1291 * work function keeps triggering this mechan << 1292 * should be using an unbound workqueue inste << 1293 * << 1294 * wq_cpu_intensive_report() tracks work func << 1295 * and report them so that they can be examin << 1296 * workqueues as appropriate. To avoid floodi << 1297 * function is tracked and reported with expo << 1298 */ << 1299 #define WCI_MAX_ENTS 128 << 1300 << 1301 struct wci_ent { << 1302 work_func_t func; << 1303 atomic64_t cnt; << 1304 struct hlist_node hash_node; << 1305 }; << 1306 << 1307 static struct wci_ent wci_ents[WCI_MAX_ENTS]; << 1308 static int wci_nr_ents; << 1309 static DEFINE_RAW_SPINLOCK(wci_lock); << 1310 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_M << 1311 << 1312 static struct wci_ent *wci_find_ent(work_func << 1313 { << 1314 struct wci_ent *ent; << 1315 << 1316 hash_for_each_possible_rcu(wci_hash, << 1317 (unsigned << 1318 if (ent->func == func) << 1319 return ent; << 1320 } << 1321 return NULL; << 1322 } << 1323 << 1324 static void wq_cpu_intensive_report(work_func << 1325 { << 1326 struct wci_ent *ent; << 1327 << 1328 restart: << 1329 ent = wci_find_ent(func); << 1330 if (ent) { << 1331 u64 cnt; << 1332 << 1333 /* << 1334 * Start reporting from the w << 1335 * exponentially. << 1336 */ << 1337 cnt = atomic64_inc_return_rel << 1338 if (wq_cpu_intensive_warning_ << 1339 cnt >= wq_cpu_intensive_w << 1340 is_power_of_2(cnt + 1 - w << 1341 printk_deferred(KERN_ << 1342 ent-> << 1343 atomi << 1344 return; << 1345 } << 1346 << 1347 /* << 1348 * @func is a new violation. Allocate << 1349 * is exhausted, something went reall << 1350 * noise already. << 1351 */ << 1352 if (wci_nr_ents >= WCI_MAX_ENTS) << 1353 return; << 1354 << 1355 raw_spin_lock(&wci_lock); << 1356 << 1357 if (wci_nr_ents >= WCI_MAX_ENTS) { << 1358 raw_spin_unlock(&wci_lock); << 1359 return; << 1360 } << 1361 << 1362 if (wci_find_ent(func)) { << 1363 raw_spin_unlock(&wci_lock); << 1364 goto restart; << 1365 } << 1366 << 1367 ent = &wci_ents[wci_nr_ents++]; << 1368 ent->func = func; << 1369 atomic64_set(&ent->cnt, 0); << 1370 hash_add_rcu(wci_hash, &ent->hash_nod << 1371 << 1372 raw_spin_unlock(&wci_lock); << 1373 << 1374 goto restart; << 1375 } << 1376 << 1377 #else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ << 1378 static void wq_cpu_intensive_report(work_func << 1379 #endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ << 1380 << 1381 /** << 1382 * wq_worker_running - a worker is running ag << 1383 * @task: task waking up << 1384 * << 1385 * This function is called when a worker retu << 1386 */ << 1387 void wq_worker_running(struct task_struct *ta << 1388 { << 1389 struct worker *worker = kthread_data( << 1390 << 1391 if (!READ_ONCE(worker->sleeping)) << 1392 return; << 1393 << 1394 /* << 1395 * If preempted by unbind_workers() b << 1396 * and the nr_running increment below << 1397 * and leave with an unexpected pool- << 1398 * pool. Protect against such race. << 1399 */ << 1400 preempt_disable(); << 1401 if (!(worker->flags & WORKER_NOT_RUNN << 1402 worker->pool->nr_running++; << 1403 preempt_enable(); << 1404 << 1405 /* << 1406 * CPU intensive auto-detection cares << 1407 * CPU without sleeping. Reset the st << 1408 */ << 1409 worker->current_at = worker->task->se << 1410 << 1411 WRITE_ONCE(worker->sleeping, 0); << 1412 } << 1413 << 1414 /** << 1415 * wq_worker_sleeping - a worker is going to << 1416 * @task: task going to sleep << 1417 * << 1418 * This function is called from schedule() wh << 1419 * going to sleep. << 1420 */ << 1421 void wq_worker_sleeping(struct task_struct *t << 1422 { << 1423 struct worker *worker = kthread_data( << 1424 struct worker_pool *pool; << 1425 << 1426 /* << 1427 * Rescuers, which may not have all t << 1428 * workers, also reach here, let's no << 1429 * checking NOT_RUNNING. << 1430 */ << 1431 if (worker->flags & WORKER_NOT_RUNNIN << 1432 return; << 1433 << 1434 pool = worker->pool; << 1435 << 1436 /* Return if preempted before wq_work << 1437 if (READ_ONCE(worker->sleeping)) << 1438 return; << 1439 << 1440 WRITE_ONCE(worker->sleeping, 1); << 1441 raw_spin_lock_irq(&pool->lock); << 1442 << 1443 /* << 1444 * Recheck in case unbind_workers() p << 1445 * want to decrement nr_running after << 1446 * and nr_running has been reset. << 1447 */ << 1448 if (worker->flags & WORKER_NOT_RUNNIN << 1449 raw_spin_unlock_irq(&pool->lo << 1450 return; << 1451 } << 1452 << 1453 pool->nr_running--; << 1454 if (kick_pool(pool)) << 1455 worker->current_pwq->stats[PW << 1456 << 1457 raw_spin_unlock_irq(&pool->lock); << 1458 } << 1459 << 1460 /** << 1461 * wq_worker_tick - a scheduler tick occurred << 1462 * @task: task currently running << 1463 * << 1464 * Called from sched_tick(). We're in the IRQ << 1465 * worker's fields which follow the 'K' locki << 1466 */ << 1467 void wq_worker_tick(struct task_struct *task) << 1468 { << 1469 struct worker *worker = kthread_data( << 1470 struct pool_workqueue *pwq = worker-> << 1471 struct worker_pool *pool = worker->po << 1472 << 1473 if (!pwq) << 1474 return; << 1475 << 1476 pwq->stats[PWQ_STAT_CPU_TIME] += TICK << 1477 << 1478 if (!wq_cpu_intensive_thresh_us) << 1479 return; << 1480 << 1481 /* << 1482 * If the current worker is concurren << 1483 * longer than wq_cpu_intensive_thres << 1484 * CPU_INTENSIVE to avoid stalling ot << 1485 * << 1486 * Set @worker->sleeping means that @ << 1487 * switching out voluntarily and won' << 1488 * @pool->nr_running until it wakes u << 1489 * decrements ->nr_running, setting C << 1490 * double decrements. The task is rel << 1491 * We probably want to make this pret << 1492 */ << 1493 if ((worker->flags & WORKER_NOT_RUNNI << 1494 worker->task->se.sum_exec_runtime << 1495 wq_cpu_intensive_thresh_us * NSEC << 1496 return; << 1497 << 1498 raw_spin_lock(&pool->lock); << 1499 << 1500 worker_set_flags(worker, WORKER_CPU_I << 1501 wq_cpu_intensive_report(worker->curre << 1502 pwq->stats[PWQ_STAT_CPU_INTENSIVE]++; << 1503 << 1504 if (kick_pool(pool)) << 1505 pwq->stats[PWQ_STAT_CM_WAKEUP << 1506 << 1507 raw_spin_unlock(&pool->lock); << 1508 } << 1509 << 1510 /** << 1511 * wq_worker_last_func - retrieve worker's la << 1512 * @task: Task to retrieve last work function << 1513 * << 1514 * Determine the last function a worker execu << 1515 * the scheduler to get a worker's last known << 1516 * << 1517 * CONTEXT: << 1518 * raw_spin_lock_irq(rq->lock) << 1519 * << 1520 * This function is called during schedule() << 1521 * to sleep. It's used by psi to identify agg << 1522 * dequeuing, to allow periodic aggregation t << 1523 * worker is the last task in the system or c << 1524 * << 1525 * As this function doesn't involve any workq << 1526 * only returns stable values when called fro << 1527 * queuing and dequeuing paths, when @task, w << 1528 * is guaranteed to not be processing any wor << 1529 * << 1530 * Return: << 1531 * The last work function %current executed a << 1532 * hasn't executed any work yet. << 1533 */ << 1534 work_func_t wq_worker_last_func(struct task_s << 1535 { << 1536 struct worker *worker = kthread_data( << 1537 << 1538 return worker->last_func; << 1539 } << 1540 << 1541 /** << 1542 * wq_node_nr_active - Determine wq_node_nr_a << 1543 * @wq: workqueue of interest << 1544 * @node: NUMA node, can be %NUMA_NO_NODE << 1545 * << 1546 * Determine wq_node_nr_active to use for @wq << 1547 * << 1548 * - %NULL for per-cpu workqueues as they don << 1549 * << 1550 * - node_nr_active[nr_node_ids] if @node is << 1551 * << 1552 * - Otherwise, node_nr_active[@node]. << 1553 */ << 1554 static struct wq_node_nr_active *wq_node_nr_a << 1555 << 1556 { << 1557 if (!(wq->flags & WQ_UNBOUND)) << 1558 return NULL; << 1559 << 1560 if (node == NUMA_NO_NODE) << 1561 node = nr_node_ids; << 1562 << 1563 return wq->node_nr_active[node]; << 1564 } << 1565 << 1566 /** << 1567 * wq_update_node_max_active - Update per-nod << 1568 * @wq: workqueue to update << 1569 * @off_cpu: CPU that's going down, -1 if a C << 1570 * << 1571 * Update @wq->node_nr_active[]->max. @wq mus << 1572 * distributed among nodes according to the p << 1573 * cpus. The result is always between @wq->mi << 1574 */ << 1575 static void wq_update_node_max_active(struct << 1576 { << 1577 struct cpumask *effective = unbound_e << 1578 int min_active = READ_ONCE(wq->min_ac << 1579 int max_active = READ_ONCE(wq->max_ac << 1580 int total_cpus, node; << 1581 << 1582 lockdep_assert_held(&wq->mutex); << 1583 << 1584 if (!wq_topo_initialized) << 1585 return; << 1586 << 1587 if (off_cpu >= 0 && !cpumask_test_cpu << 1588 off_cpu = -1; << 1589 << 1590 total_cpus = cpumask_weight_and(effec << 1591 if (off_cpu >= 0) << 1592 total_cpus--; << 1593 << 1594 /* If all CPUs of the wq get offline, << 1595 if (unlikely(!total_cpus)) { << 1596 for_each_node(node) << 1597 wq_node_nr_active(wq, << 1598 << 1599 wq_node_nr_active(wq, NUMA_NO << 1600 return; << 1601 } << 1602 << 1603 for_each_node(node) { << 1604 int node_cpus; << 1605 << 1606 node_cpus = cpumask_weight_an << 1607 if (off_cpu >= 0 && cpu_to_no << 1608 node_cpus--; << 1609 << 1610 wq_node_nr_active(wq, node)-> << 1611 clamp(DIV_ROUND_UP(ma << 1612 min_active, max << 1613 } << 1614 << 1615 wq_node_nr_active(wq, NUMA_NO_NODE)-> << 1616 } << 1617 << 1618 /** << 1619 * get_pwq - get an extra reference on the sp 1057 * get_pwq - get an extra reference on the specified pool_workqueue 1620 * @pwq: pool_workqueue to get 1058 * @pwq: pool_workqueue to get 1621 * 1059 * 1622 * Obtain an extra reference on @pwq. The ca 1060 * Obtain an extra reference on @pwq. The caller should guarantee that 1623 * @pwq has positive refcnt and be holding th 1061 * @pwq has positive refcnt and be holding the matching pool->lock. 1624 */ 1062 */ 1625 static void get_pwq(struct pool_workqueue *pw 1063 static void get_pwq(struct pool_workqueue *pwq) 1626 { 1064 { 1627 lockdep_assert_held(&pwq->pool->lock) 1065 lockdep_assert_held(&pwq->pool->lock); 1628 WARN_ON_ONCE(pwq->refcnt <= 0); 1066 WARN_ON_ONCE(pwq->refcnt <= 0); 1629 pwq->refcnt++; 1067 pwq->refcnt++; 1630 } 1068 } 1631 1069 1632 /** 1070 /** 1633 * put_pwq - put a pool_workqueue reference 1071 * put_pwq - put a pool_workqueue reference 1634 * @pwq: pool_workqueue to put 1072 * @pwq: pool_workqueue to put 1635 * 1073 * 1636 * Drop a reference of @pwq. If its refcnt r 1074 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 1637 * destruction. The caller should be holding 1075 * destruction. The caller should be holding the matching pool->lock. 1638 */ 1076 */ 1639 static void put_pwq(struct pool_workqueue *pw 1077 static void put_pwq(struct pool_workqueue *pwq) 1640 { 1078 { 1641 lockdep_assert_held(&pwq->pool->lock) 1079 lockdep_assert_held(&pwq->pool->lock); 1642 if (likely(--pwq->refcnt)) 1080 if (likely(--pwq->refcnt)) 1643 return; 1081 return; >> 1082 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) >> 1083 return; 1644 /* 1084 /* 1645 * @pwq can't be released under pool- !! 1085 * @pwq can't be released under pool->lock, bounce to 1646 * kthread_worker to avoid A-A deadlo !! 1086 * pwq_unbound_release_workfn(). This never recurses on the same >> 1087 * pool->lock as this path is taken only for unbound workqueues and >> 1088 * the release work item is scheduled on a per-cpu workqueue. To >> 1089 * avoid lockdep warning, unbound pool->locks are given lockdep >> 1090 * subclass of 1 in get_unbound_pool(). 1647 */ 1091 */ 1648 kthread_queue_work(pwq_release_worker !! 1092 schedule_work(&pwq->unbound_release_work); 1649 } 1093 } 1650 1094 1651 /** 1095 /** 1652 * put_pwq_unlocked - put_pwq() with surround 1096 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock 1653 * @pwq: pool_workqueue to put (can be %NULL) 1097 * @pwq: pool_workqueue to put (can be %NULL) 1654 * 1098 * 1655 * put_pwq() with locking. This function als 1099 * put_pwq() with locking. This function also allows %NULL @pwq. 1656 */ 1100 */ 1657 static void put_pwq_unlocked(struct pool_work 1101 static void put_pwq_unlocked(struct pool_workqueue *pwq) 1658 { 1102 { 1659 if (pwq) { 1103 if (pwq) { 1660 /* 1104 /* 1661 * As both pwqs and pools are !! 1105 * As both pwqs and pools are sched-RCU protected, the 1662 * following lock operations 1106 * following lock operations are safe. 1663 */ 1107 */ 1664 raw_spin_lock_irq(&pwq->pool- !! 1108 spin_lock_irq(&pwq->pool->lock); 1665 put_pwq(pwq); 1109 put_pwq(pwq); 1666 raw_spin_unlock_irq(&pwq->poo !! 1110 spin_unlock_irq(&pwq->pool->lock); 1667 } 1111 } 1668 } 1112 } 1669 1113 1670 static bool pwq_is_empty(struct pool_workqueu !! 1114 static void pwq_activate_delayed_work(struct work_struct *work) 1671 { << 1672 return !pwq->nr_active && list_empty( << 1673 } << 1674 << 1675 static void __pwq_activate_work(struct pool_w << 1676 struct work_s << 1677 { 1115 { 1678 unsigned long *wdb = work_data_bits(w !! 1116 struct pool_workqueue *pwq = get_work_pwq(work); 1679 1117 1680 WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INA << 1681 trace_workqueue_activate_work(work); 1118 trace_workqueue_activate_work(work); 1682 if (list_empty(&pwq->pool->worklist)) 1119 if (list_empty(&pwq->pool->worklist)) 1683 pwq->pool->watchdog_ts = jiff 1120 pwq->pool->watchdog_ts = jiffies; 1684 move_linked_works(work, &pwq->pool->w 1121 move_linked_works(work, &pwq->pool->worklist, NULL); 1685 __clear_bit(WORK_STRUCT_INACTIVE_BIT, !! 1122 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); >> 1123 pwq->nr_active++; 1686 } 1124 } 1687 1125 1688 static bool tryinc_node_nr_active(struct wq_n !! 1126 static void pwq_activate_first_delayed(struct pool_workqueue *pwq) 1689 { 1127 { 1690 int max = READ_ONCE(nna->max); !! 1128 struct work_struct *work = list_first_entry(&pwq->delayed_works, 1691 !! 1129 struct work_struct, entry); 1692 while (true) { << 1693 int old, tmp; << 1694 1130 1695 old = atomic_read(&nna->nr); !! 1131 pwq_activate_delayed_work(work); 1696 if (old >= max) << 1697 return false; << 1698 tmp = atomic_cmpxchg_relaxed( << 1699 if (tmp == old) << 1700 return true; << 1701 } << 1702 } << 1703 << 1704 /** << 1705 * pwq_tryinc_nr_active - Try to increment nr << 1706 * @pwq: pool_workqueue of interest << 1707 * @fill: max_active may have increased, try << 1708 * << 1709 * Try to increment nr_active for @pwq. Retur << 1710 * successfully obtained. %false otherwise. << 1711 */ << 1712 static bool pwq_tryinc_nr_active(struct pool_ << 1713 { << 1714 struct workqueue_struct *wq = pwq->wq << 1715 struct worker_pool *pool = pwq->pool; << 1716 struct wq_node_nr_active *nna = wq_no << 1717 bool obtained = false; << 1718 << 1719 lockdep_assert_held(&pool->lock); << 1720 << 1721 if (!nna) { << 1722 /* BH or per-cpu workqueue, p << 1723 obtained = pwq->nr_active < R << 1724 goto out; << 1725 } << 1726 << 1727 if (unlikely(pwq->plugged)) << 1728 return false; << 1729 << 1730 /* << 1731 * Unbound workqueue uses per-node sh << 1732 * already waiting on $nna, pwq_dec_n << 1733 * concurrency level. Don't jump the << 1734 * << 1735 * We need to ignore the pending test << 1736 * pwq_dec_nr_active() can only maint << 1737 * increase it. This is indicated by << 1738 */ << 1739 if (!list_empty(&pwq->pending_node) & << 1740 goto out; << 1741 << 1742 obtained = tryinc_node_nr_active(nna) << 1743 if (obtained) << 1744 goto out; << 1745 << 1746 /* << 1747 * Lockless acquisition failed. Lock, << 1748 * and try again. The smp_mb() is pai << 1749 * of atomic_dec_return() in pwq_dec_ << 1750 * we see the decremented $nna->nr or << 1751 * $nna->pending_pwqs. << 1752 */ << 1753 raw_spin_lock(&nna->lock); << 1754 << 1755 if (list_empty(&pwq->pending_node)) << 1756 list_add_tail(&pwq->pending_n << 1757 else if (likely(!fill)) << 1758 goto out_unlock; << 1759 << 1760 smp_mb(); << 1761 << 1762 obtained = tryinc_node_nr_active(nna) << 1763 << 1764 /* << 1765 * If @fill, @pwq might have already << 1766 * pending in cold paths doesn't affe << 1767 */ << 1768 if (obtained && likely(!fill)) << 1769 list_del_init(&pwq->pending_n << 1770 << 1771 out_unlock: << 1772 raw_spin_unlock(&nna->lock); << 1773 out: << 1774 if (obtained) << 1775 pwq->nr_active++; << 1776 return obtained; << 1777 } << 1778 << 1779 /** << 1780 * pwq_activate_first_inactive - Activate the << 1781 * @pwq: pool_workqueue of interest << 1782 * @fill: max_active may have increased, try << 1783 * << 1784 * Activate the first inactive work item of @ << 1785 * max_active limit. << 1786 * << 1787 * Returns %true if an inactive work item has << 1788 * inactive work item is found or max_active << 1789 */ << 1790 static bool pwq_activate_first_inactive(struc << 1791 { << 1792 struct work_struct *work = << 1793 list_first_entry_or_null(&pwq << 1794 stru << 1795 << 1796 if (work && pwq_tryinc_nr_active(pwq, << 1797 __pwq_activate_work(pwq, work << 1798 return true; << 1799 } else { << 1800 return false; << 1801 } << 1802 } << 1803 << 1804 /** << 1805 * unplug_oldest_pwq - unplug the oldest pool << 1806 * @wq: workqueue_struct where its oldest pwq << 1807 * << 1808 * This function should only be called for or << 1809 * oldest pwq is unplugged, the others are pl << 1810 * ensure proper work item ordering:: << 1811 * << 1812 * dfl_pwq --------------+ [P] - plugg << 1813 * | << 1814 * v << 1815 * pwqs -> A -> B [P] -> C [P] (newest) << 1816 * | | | << 1817 * 1 3 5 << 1818 * | | | << 1819 * 2 4 6 << 1820 * << 1821 * When the oldest pwq is drained and removed << 1822 * to unplug the next oldest one to start its << 1823 * pwq's are linked into wq->pwqs with the ol << 1824 * the list is the oldest. << 1825 */ << 1826 static void unplug_oldest_pwq(struct workqueu << 1827 { << 1828 struct pool_workqueue *pwq; << 1829 << 1830 lockdep_assert_held(&wq->mutex); << 1831 << 1832 /* Caller should make sure that pwqs << 1833 pwq = list_first_entry_or_null(&wq->p << 1834 pwqs_n << 1835 raw_spin_lock_irq(&pwq->pool->lock); << 1836 if (pwq->plugged) { << 1837 pwq->plugged = false; << 1838 if (pwq_activate_first_inacti << 1839 kick_pool(pwq->pool); << 1840 } << 1841 raw_spin_unlock_irq(&pwq->pool->lock) << 1842 } << 1843 << 1844 /** << 1845 * node_activate_pending_pwq - Activate a pen << 1846 * @nna: wq_node_nr_active to activate a pend << 1847 * @caller_pool: worker_pool the caller is lo << 1848 * << 1849 * Activate a pwq in @nna->pending_pwqs. Call << 1850 * @caller_pool may be unlocked and relocked << 1851 */ << 1852 static void node_activate_pending_pwq(struct << 1853 struct << 1854 { << 1855 struct worker_pool *locked_pool = cal << 1856 struct pool_workqueue *pwq; << 1857 struct work_struct *work; << 1858 << 1859 lockdep_assert_held(&caller_pool->loc << 1860 << 1861 raw_spin_lock(&nna->lock); << 1862 retry: << 1863 pwq = list_first_entry_or_null(&nna-> << 1864 struct << 1865 if (!pwq) << 1866 goto out_unlock; << 1867 << 1868 /* << 1869 * If @pwq is for a different pool th << 1870 * @pwq->pool->lock. Let's trylock fi << 1871 * / lock dance. For that, we also ne << 1872 * nested inside pool locks. << 1873 */ << 1874 if (pwq->pool != locked_pool) { << 1875 raw_spin_unlock(&locked_pool- << 1876 locked_pool = pwq->pool; << 1877 if (!raw_spin_trylock(&locked << 1878 raw_spin_unlock(&nna- << 1879 raw_spin_lock(&locked << 1880 raw_spin_lock(&nna->l << 1881 goto retry; << 1882 } << 1883 } << 1884 << 1885 /* << 1886 * $pwq may not have any inactive wor << 1887 * Drop it from pending_pwqs and see << 1888 */ << 1889 work = list_first_entry_or_null(&pwq- << 1890 struc << 1891 if (!work) { << 1892 list_del_init(&pwq->pending_n << 1893 goto retry; << 1894 } << 1895 << 1896 /* << 1897 * Acquire an nr_active count and act << 1898 * $pwq still has inactive work items << 1899 * pending_pwqs so that we round-robi << 1900 * inactive work items are not activa << 1901 * given that there has never been an << 1902 */ << 1903 if (likely(tryinc_node_nr_active(nna) << 1904 pwq->nr_active++; << 1905 __pwq_activate_work(pwq, work << 1906 << 1907 if (list_empty(&pwq->inactive << 1908 list_del_init(&pwq->p << 1909 else << 1910 list_move_tail(&pwq-> << 1911 << 1912 /* if activating a foreign po << 1913 if (pwq->pool != caller_pool) << 1914 kick_pool(pwq->pool); << 1915 } << 1916 << 1917 out_unlock: << 1918 raw_spin_unlock(&nna->lock); << 1919 if (locked_pool != caller_pool) { << 1920 raw_spin_unlock(&locked_pool- << 1921 raw_spin_lock(&caller_pool->l << 1922 } << 1923 } << 1924 << 1925 /** << 1926 * pwq_dec_nr_active - Retire an active count << 1927 * @pwq: pool_workqueue of interest << 1928 * << 1929 * Decrement @pwq's nr_active and try to acti << 1930 * For unbound workqueues, this function may << 1931 */ << 1932 static void pwq_dec_nr_active(struct pool_wor << 1933 { << 1934 struct worker_pool *pool = pwq->pool; << 1935 struct wq_node_nr_active *nna = wq_no << 1936 << 1937 lockdep_assert_held(&pool->lock); << 1938 << 1939 /* << 1940 * @pwq->nr_active should be decremen << 1941 * workqueues. << 1942 */ << 1943 pwq->nr_active--; << 1944 << 1945 /* << 1946 * For a percpu workqueue, it's simpl << 1947 * inactive work item on @pwq itself. << 1948 */ << 1949 if (!nna) { << 1950 pwq_activate_first_inactive(p << 1951 return; << 1952 } << 1953 << 1954 /* << 1955 * If @pwq is for an unbound workqueu << 1956 * multiple pwqs and pools may be sha << 1957 * pwq needs to wait for an nr_active << 1958 * $nna->pending_pwqs. The following << 1959 * memory barrier is paired with smp_ << 1960 * guarantee that either we see non-e << 1961 * decremented $nna->nr. << 1962 * << 1963 * $nna->max may change as CPUs come << 1964 * max_active gets updated. However, << 1965 * larger than @pwq->wq->min_active w << 1966 * This maintains the forward progres << 1967 */ << 1968 if (atomic_dec_return(&nna->nr) >= RE << 1969 return; << 1970 << 1971 if (!list_empty(&nna->pending_pwqs)) << 1972 node_activate_pending_pwq(nna << 1973 } 1132 } 1974 1133 1975 /** 1134 /** 1976 * pwq_dec_nr_in_flight - decrement pwq's nr_ 1135 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 1977 * @pwq: pwq of interest 1136 * @pwq: pwq of interest 1978 * @work_data: work_data of work which left t !! 1137 * @color: color of work which left the queue 1979 * 1138 * 1980 * A work either has completed or is removed 1139 * A work either has completed or is removed from pending queue, 1981 * decrement nr_in_flight of its pwq and hand 1140 * decrement nr_in_flight of its pwq and handle workqueue flushing. 1982 * 1141 * 1983 * NOTE: << 1984 * For unbound workqueues, this function may << 1985 * and thus should be called after all other << 1986 * work item is complete. << 1987 * << 1988 * CONTEXT: 1142 * CONTEXT: 1989 * raw_spin_lock_irq(pool->lock). !! 1143 * spin_lock_irq(pool->lock). 1990 */ 1144 */ 1991 static void pwq_dec_nr_in_flight(struct pool_ !! 1145 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) 1992 { 1146 { 1993 int color = get_work_color(work_data) !! 1147 /* uncolored work items don't participate in flushing or nr_active */ 1994 !! 1148 if (color == WORK_NO_COLOR) 1995 if (!(work_data & WORK_STRUCT_INACTIV !! 1149 goto out_put; 1996 pwq_dec_nr_active(pwq); << 1997 1150 1998 pwq->nr_in_flight[color]--; 1151 pwq->nr_in_flight[color]--; 1999 1152 >> 1153 pwq->nr_active--; >> 1154 if (!list_empty(&pwq->delayed_works)) { >> 1155 /* one down, submit a delayed one */ >> 1156 if (pwq->nr_active < pwq->max_active) >> 1157 pwq_activate_first_delayed(pwq); >> 1158 } >> 1159 2000 /* is flush in progress and are we at 1160 /* is flush in progress and are we at the flushing tip? */ 2001 if (likely(pwq->flush_color != color) 1161 if (likely(pwq->flush_color != color)) 2002 goto out_put; 1162 goto out_put; 2003 1163 2004 /* are there still in-flight works? * 1164 /* are there still in-flight works? */ 2005 if (pwq->nr_in_flight[color]) 1165 if (pwq->nr_in_flight[color]) 2006 goto out_put; 1166 goto out_put; 2007 1167 2008 /* this pwq is done, clear flush_colo 1168 /* this pwq is done, clear flush_color */ 2009 pwq->flush_color = -1; 1169 pwq->flush_color = -1; 2010 1170 2011 /* 1171 /* 2012 * If this was the last pwq, wake up 1172 * If this was the last pwq, wake up the first flusher. It 2013 * will handle the rest. 1173 * will handle the rest. 2014 */ 1174 */ 2015 if (atomic_dec_and_test(&pwq->wq->nr_ 1175 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 2016 complete(&pwq->wq->first_flus 1176 complete(&pwq->wq->first_flusher->done); 2017 out_put: 1177 out_put: 2018 put_pwq(pwq); 1178 put_pwq(pwq); 2019 } 1179 } 2020 1180 2021 /** 1181 /** 2022 * try_to_grab_pending - steal work item from 1182 * try_to_grab_pending - steal work item from worklist and disable irq 2023 * @work: work item to steal 1183 * @work: work item to steal 2024 * @cflags: %WORK_CANCEL_ flags !! 1184 * @is_dwork: @work is a delayed_work 2025 * @irq_flags: place to store irq state !! 1185 * @flags: place to store irq state 2026 * 1186 * 2027 * Try to grab PENDING bit of @work. This fu 1187 * Try to grab PENDING bit of @work. This function can handle @work in any 2028 * stable state - idle, on timer or on workli 1188 * stable state - idle, on timer or on worklist. 2029 * 1189 * 2030 * Return: 1190 * Return: 2031 * << 2032 * ======== ============================= << 2033 * 1 if @work was pending and we s 1191 * 1 if @work was pending and we successfully stole PENDING 2034 * 0 if @work was idle and we clai 1192 * 0 if @work was idle and we claimed PENDING 2035 * -EAGAIN if PENDING couldn't be grabbe 1193 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 2036 * ======== ============================= !! 1194 * -ENOENT if someone else is canceling @work, this state may persist >> 1195 * for arbitrarily long 2037 * 1196 * 2038 * Note: 1197 * Note: 2039 * On >= 0 return, the caller owns @work's PE 1198 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 2040 * interrupted while holding PENDING and @wor 1199 * interrupted while holding PENDING and @work off queue, irq must be 2041 * disabled on entry. This, combined with de 1200 * disabled on entry. This, combined with delayed_work->timer being 2042 * irqsafe, ensures that we return -EAGAIN fo 1201 * irqsafe, ensures that we return -EAGAIN for finite short period of time. 2043 * 1202 * 2044 * On successful return, >= 0, irq is disable 1203 * On successful return, >= 0, irq is disabled and the caller is 2045 * responsible for releasing it using local_i !! 1204 * responsible for releasing it using local_irq_restore(*@flags). 2046 * 1205 * 2047 * This function is safe to call from any con 1206 * This function is safe to call from any context including IRQ handler. 2048 */ 1207 */ 2049 static int try_to_grab_pending(struct work_st !! 1208 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 2050 unsigned long !! 1209 unsigned long *flags) 2051 { 1210 { 2052 struct worker_pool *pool; 1211 struct worker_pool *pool; 2053 struct pool_workqueue *pwq; 1212 struct pool_workqueue *pwq; 2054 1213 2055 local_irq_save(*irq_flags); !! 1214 local_irq_save(*flags); 2056 1215 2057 /* try to steal the timer if it exist 1216 /* try to steal the timer if it exists */ 2058 if (cflags & WORK_CANCEL_DELAYED) { !! 1217 if (is_dwork) { 2059 struct delayed_work *dwork = 1218 struct delayed_work *dwork = to_delayed_work(work); 2060 1219 2061 /* 1220 /* 2062 * dwork->timer is irqsafe. 1221 * dwork->timer is irqsafe. If del_timer() fails, it's 2063 * guaranteed that the timer 1222 * guaranteed that the timer is not queued anywhere and not 2064 * running on the local CPU. 1223 * running on the local CPU. 2065 */ 1224 */ 2066 if (likely(del_timer(&dwork-> 1225 if (likely(del_timer(&dwork->timer))) 2067 return 1; 1226 return 1; 2068 } 1227 } 2069 1228 2070 /* try to claim PENDING the normal wa 1229 /* try to claim PENDING the normal way */ 2071 if (!test_and_set_bit(WORK_STRUCT_PEN 1230 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 2072 return 0; 1231 return 0; 2073 1232 2074 rcu_read_lock(); << 2075 /* 1233 /* 2076 * The queueing is in progress, or it 1234 * The queueing is in progress, or it is already queued. Try to 2077 * steal it from ->worklist without c 1235 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 2078 */ 1236 */ 2079 pool = get_work_pool(work); 1237 pool = get_work_pool(work); 2080 if (!pool) 1238 if (!pool) 2081 goto fail; 1239 goto fail; 2082 1240 2083 raw_spin_lock(&pool->lock); !! 1241 spin_lock(&pool->lock); 2084 /* 1242 /* 2085 * work->data is guaranteed to point 1243 * work->data is guaranteed to point to pwq only while the work 2086 * item is queued on pwq->wq, and bot 1244 * item is queued on pwq->wq, and both updating work->data to point 2087 * to pwq on queueing and to pool on 1245 * to pwq on queueing and to pool on dequeueing are done under 2088 * pwq->pool->lock. This in turn gua 1246 * pwq->pool->lock. This in turn guarantees that, if work->data 2089 * points to pwq which is associated 1247 * points to pwq which is associated with a locked pool, the work 2090 * item is currently queued on that p 1248 * item is currently queued on that pool. 2091 */ 1249 */ 2092 pwq = get_work_pwq(work); 1250 pwq = get_work_pwq(work); 2093 if (pwq && pwq->pool == pool) { 1251 if (pwq && pwq->pool == pool) { 2094 unsigned long work_data = *wo << 2095 << 2096 debug_work_deactivate(work); 1252 debug_work_deactivate(work); 2097 1253 2098 /* 1254 /* 2099 * A cancelable inactive work !! 1255 * A delayed work item cannot be grabbed directly because 2100 * pwq->inactive_works since !! 1256 * it might have linked NO_COLOR work items which, if left 2101 * canceled (see the comments !! 1257 * on the delayed_list, will confuse pwq->nr_active 2102 * !! 1258 * management later on and cause stall. Make sure the work 2103 * An inactive work item cann !! 1259 * item is activated before grabbing. 2104 * it might have linked barri << 2105 * on the inactive_works list << 2106 * management later on and ca << 2107 * barrier work items to the << 2108 * item. Also keep WORK_STRUC << 2109 * it doesn't participate in << 2110 * pwq_dec_nr_in_flight(). << 2111 */ 1260 */ 2112 if (work_data & WORK_STRUCT_I !! 1261 if (*work_data_bits(work) & WORK_STRUCT_DELAYED) 2113 move_linked_works(wor !! 1262 pwq_activate_delayed_work(work); 2114 1263 2115 list_del_init(&work->entry); 1264 list_del_init(&work->entry); >> 1265 pwq_dec_nr_in_flight(pwq, get_work_color(work)); 2116 1266 2117 /* !! 1267 /* work->data points to pwq iff queued, point to pool */ 2118 * work->data points to pwq i !! 1268 set_work_pool_and_keep_pending(work, pool->id); 2119 * this destroys work->data n << 2120 */ << 2121 set_work_pool_and_keep_pendin << 2122 << 2123 << 2124 /* must be the last step, see << 2125 pwq_dec_nr_in_flight(pwq, wor << 2126 1269 2127 raw_spin_unlock(&pool->lock); !! 1270 spin_unlock(&pool->lock); 2128 rcu_read_unlock(); << 2129 return 1; 1271 return 1; 2130 } 1272 } 2131 raw_spin_unlock(&pool->lock); !! 1273 spin_unlock(&pool->lock); 2132 fail: 1274 fail: 2133 rcu_read_unlock(); !! 1275 local_irq_restore(*flags); 2134 local_irq_restore(*irq_flags); !! 1276 if (work_is_canceling(work)) >> 1277 return -ENOENT; >> 1278 cpu_relax(); 2135 return -EAGAIN; 1279 return -EAGAIN; 2136 } 1280 } 2137 1281 2138 /** 1282 /** 2139 * work_grab_pending - steal work item from w << 2140 * @work: work item to steal << 2141 * @cflags: %WORK_CANCEL_ flags << 2142 * @irq_flags: place to store IRQ state << 2143 * << 2144 * Grab PENDING bit of @work. @work can be in << 2145 * or on worklist. << 2146 * << 2147 * Can be called from any context. IRQ is dis << 2148 * stored in *@irq_flags. The caller is respo << 2149 * local_irq_restore(). << 2150 * << 2151 * Returns %true if @work was pending. %false << 2152 */ << 2153 static bool work_grab_pending(struct work_str << 2154 unsigned long * << 2155 { << 2156 int ret; << 2157 << 2158 while (true) { << 2159 ret = try_to_grab_pending(wor << 2160 if (ret >= 0) << 2161 return ret; << 2162 cpu_relax(); << 2163 } << 2164 } << 2165 << 2166 /** << 2167 * insert_work - insert a work into a pool 1283 * insert_work - insert a work into a pool 2168 * @pwq: pwq @work belongs to 1284 * @pwq: pwq @work belongs to 2169 * @work: work to insert 1285 * @work: work to insert 2170 * @head: insertion point 1286 * @head: insertion point 2171 * @extra_flags: extra WORK_STRUCT_* flags to 1287 * @extra_flags: extra WORK_STRUCT_* flags to set 2172 * 1288 * 2173 * Insert @work which belongs to @pwq after @ 1289 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 2174 * work_struct flags. 1290 * work_struct flags. 2175 * 1291 * 2176 * CONTEXT: 1292 * CONTEXT: 2177 * raw_spin_lock_irq(pool->lock). !! 1293 * spin_lock_irq(pool->lock). 2178 */ 1294 */ 2179 static void insert_work(struct pool_workqueue 1295 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 2180 struct list_head *hea 1296 struct list_head *head, unsigned int extra_flags) 2181 { 1297 { 2182 debug_work_activate(work); !! 1298 struct worker_pool *pool = pwq->pool; 2183 << 2184 /* record the work call stack in orde << 2185 kasan_record_aux_stack_noalloc(work); << 2186 1299 2187 /* we own @work, set data and link */ 1300 /* we own @work, set data and link */ 2188 set_work_pwq(work, pwq, extra_flags); 1301 set_work_pwq(work, pwq, extra_flags); 2189 list_add_tail(&work->entry, head); 1302 list_add_tail(&work->entry, head); 2190 get_pwq(pwq); 1303 get_pwq(pwq); >> 1304 >> 1305 /* >> 1306 * Ensure either wq_worker_sleeping() sees the above >> 1307 * list_add_tail() or we see zero nr_running to avoid workers lying >> 1308 * around lazily while there are works to be processed. >> 1309 */ >> 1310 smp_mb(); >> 1311 >> 1312 if (__need_more_worker(pool)) >> 1313 wake_up_worker(pool); 2191 } 1314 } 2192 1315 2193 /* 1316 /* 2194 * Test whether @work is being queued from an 1317 * Test whether @work is being queued from another work executing on the 2195 * same workqueue. 1318 * same workqueue. 2196 */ 1319 */ 2197 static bool is_chained_work(struct workqueue_ 1320 static bool is_chained_work(struct workqueue_struct *wq) 2198 { 1321 { 2199 struct worker *worker; 1322 struct worker *worker; 2200 1323 2201 worker = current_wq_worker(); 1324 worker = current_wq_worker(); 2202 /* 1325 /* 2203 * Return %true iff I'm a worker exec !! 1326 * Return %true iff I'm a worker execuing a work item on @wq. If 2204 * I'm @worker, it's safe to derefere 1327 * I'm @worker, it's safe to dereference it without locking. 2205 */ 1328 */ 2206 return worker && worker->current_pwq- 1329 return worker && worker->current_pwq->wq == wq; 2207 } 1330 } 2208 1331 2209 /* 1332 /* 2210 * When queueing an unbound work item to a wq 1333 * When queueing an unbound work item to a wq, prefer local CPU if allowed 2211 * by wq_unbound_cpumask. Otherwise, round r 1334 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to 2212 * avoid perturbing sensitive tasks. 1335 * avoid perturbing sensitive tasks. 2213 */ 1336 */ 2214 static int wq_select_unbound_cpu(int cpu) 1337 static int wq_select_unbound_cpu(int cpu) 2215 { 1338 { >> 1339 static bool printed_dbg_warning; 2216 int new_cpu; 1340 int new_cpu; 2217 1341 2218 if (likely(!wq_debug_force_rr_cpu)) { 1342 if (likely(!wq_debug_force_rr_cpu)) { 2219 if (cpumask_test_cpu(cpu, wq_ 1343 if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) 2220 return cpu; 1344 return cpu; 2221 } else { !! 1345 } else if (!printed_dbg_warning) { 2222 pr_warn_once("workqueue: roun !! 1346 pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n"); >> 1347 printed_dbg_warning = true; 2223 } 1348 } 2224 1349 >> 1350 if (cpumask_empty(wq_unbound_cpumask)) >> 1351 return cpu; >> 1352 2225 new_cpu = __this_cpu_read(wq_rr_cpu_l 1353 new_cpu = __this_cpu_read(wq_rr_cpu_last); 2226 new_cpu = cpumask_next_and(new_cpu, w 1354 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); 2227 if (unlikely(new_cpu >= nr_cpu_ids)) 1355 if (unlikely(new_cpu >= nr_cpu_ids)) { 2228 new_cpu = cpumask_first_and(w 1356 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); 2229 if (unlikely(new_cpu >= nr_cp 1357 if (unlikely(new_cpu >= nr_cpu_ids)) 2230 return cpu; 1358 return cpu; 2231 } 1359 } 2232 __this_cpu_write(wq_rr_cpu_last, new_ 1360 __this_cpu_write(wq_rr_cpu_last, new_cpu); 2233 1361 2234 return new_cpu; 1362 return new_cpu; 2235 } 1363 } 2236 1364 2237 static void __queue_work(int cpu, struct work 1365 static void __queue_work(int cpu, struct workqueue_struct *wq, 2238 struct work_struct * 1366 struct work_struct *work) 2239 { 1367 { 2240 struct pool_workqueue *pwq; 1368 struct pool_workqueue *pwq; 2241 struct worker_pool *last_pool, *pool; !! 1369 struct worker_pool *last_pool; >> 1370 struct list_head *worklist; 2242 unsigned int work_flags; 1371 unsigned int work_flags; 2243 unsigned int req_cpu = cpu; 1372 unsigned int req_cpu = cpu; 2244 1373 2245 /* 1374 /* 2246 * While a work item is PENDING && of 1375 * While a work item is PENDING && off queue, a task trying to 2247 * steal the PENDING will busy-loop w 1376 * steal the PENDING will busy-loop waiting for it to either get 2248 * queued or lose PENDING. Grabbing 1377 * queued or lose PENDING. Grabbing PENDING and queueing should 2249 * happen with IRQ disabled. 1378 * happen with IRQ disabled. 2250 */ 1379 */ 2251 lockdep_assert_irqs_disabled(); 1380 lockdep_assert_irqs_disabled(); 2252 1381 2253 /* !! 1382 debug_work_activate(work); 2254 * For a draining wq, only works from !! 1383 2255 * allowed. The __WQ_DESTROYING helps !! 1384 /* if draining, only works from the same workqueue are allowed */ 2256 * queues a new work item to a wq aft !! 1385 if (unlikely(wq->flags & __WQ_DRAINING) && 2257 */ !! 1386 WARN_ON_ONCE(!is_chained_work(wq))) 2258 if (unlikely(wq->flags & (__WQ_DESTRO << 2259 WARN_ON_ONCE(!is_chained << 2260 return; 1387 return; 2261 rcu_read_lock(); << 2262 retry: 1388 retry: 2263 /* pwq which will be used unless @wor !! 1389 if (req_cpu == WORK_CPU_UNBOUND) 2264 if (req_cpu == WORK_CPU_UNBOUND) { !! 1390 cpu = wq_select_unbound_cpu(raw_smp_processor_id()); 2265 if (wq->flags & WQ_UNBOUND) << 2266 cpu = wq_select_unbou << 2267 else << 2268 cpu = raw_smp_process << 2269 } << 2270 1391 2271 pwq = rcu_dereference(*per_cpu_ptr(wq !! 1392 /* pwq which will be used unless @work is executing elsewhere */ 2272 pool = pwq->pool; !! 1393 if (!(wq->flags & WQ_UNBOUND)) >> 1394 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); >> 1395 else >> 1396 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); 2273 1397 2274 /* 1398 /* 2275 * If @work was previously on a diffe 1399 * If @work was previously on a different pool, it might still be 2276 * running there, in which case the w 1400 * running there, in which case the work needs to be queued on that 2277 * pool to guarantee non-reentrancy. 1401 * pool to guarantee non-reentrancy. 2278 * << 2279 * For ordered workqueue, work items << 2280 * for accurate order management. Gu << 2281 * non-reentrancy. See the comments << 2282 */ 1402 */ 2283 last_pool = get_work_pool(work); 1403 last_pool = get_work_pool(work); 2284 if (last_pool && last_pool != pool && !! 1404 if (last_pool && last_pool != pwq->pool) { 2285 struct worker *worker; 1405 struct worker *worker; 2286 1406 2287 raw_spin_lock(&last_pool->loc !! 1407 spin_lock(&last_pool->lock); 2288 1408 2289 worker = find_worker_executin 1409 worker = find_worker_executing_work(last_pool, work); 2290 1410 2291 if (worker && worker->current 1411 if (worker && worker->current_pwq->wq == wq) { 2292 pwq = worker->current 1412 pwq = worker->current_pwq; 2293 pool = pwq->pool; << 2294 WARN_ON_ONCE(pool != << 2295 } else { 1413 } else { 2296 /* meh... not running 1414 /* meh... not running there, queue here */ 2297 raw_spin_unlock(&last !! 1415 spin_unlock(&last_pool->lock); 2298 raw_spin_lock(&pool-> !! 1416 spin_lock(&pwq->pool->lock); 2299 } 1417 } 2300 } else { 1418 } else { 2301 raw_spin_lock(&pool->lock); !! 1419 spin_lock(&pwq->pool->lock); 2302 } 1420 } 2303 1421 2304 /* 1422 /* 2305 * pwq is determined and locked. For !! 1423 * pwq is determined and locked. For unbound pools, we could have 2306 * with pwq release and it could alre !! 1424 * raced with pwq release and it could already be dead. If its 2307 * repeat pwq selection. Note that un !! 1425 * refcnt is zero, repeat pwq selection. Note that pwqs never die 2308 * another pwq replacing it in cpu_pw !! 1426 * without another pwq replacing it in the numa_pwq_tbl or while 2309 * on it, so the retrying is guarante !! 1427 * work items are executing on it, so the retrying is guaranteed to >> 1428 * make forward-progress. 2310 */ 1429 */ 2311 if (unlikely(!pwq->refcnt)) { 1430 if (unlikely(!pwq->refcnt)) { 2312 if (wq->flags & WQ_UNBOUND) { 1431 if (wq->flags & WQ_UNBOUND) { 2313 raw_spin_unlock(&pool !! 1432 spin_unlock(&pwq->pool->lock); 2314 cpu_relax(); 1433 cpu_relax(); 2315 goto retry; 1434 goto retry; 2316 } 1435 } 2317 /* oops */ 1436 /* oops */ 2318 WARN_ONCE(true, "workqueue: p 1437 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 2319 wq->name, cpu); 1438 wq->name, cpu); 2320 } 1439 } 2321 1440 2322 /* pwq determined, queue */ 1441 /* pwq determined, queue */ 2323 trace_workqueue_queue_work(req_cpu, p 1442 trace_workqueue_queue_work(req_cpu, pwq, work); 2324 1443 2325 if (WARN_ON(!list_empty(&work->entry) !! 1444 if (WARN_ON(!list_empty(&work->entry))) { 2326 goto out; !! 1445 spin_unlock(&pwq->pool->lock); >> 1446 return; >> 1447 } 2327 1448 2328 pwq->nr_in_flight[pwq->work_color]++; 1449 pwq->nr_in_flight[pwq->work_color]++; 2329 work_flags = work_color_to_flags(pwq- 1450 work_flags = work_color_to_flags(pwq->work_color); 2330 1451 2331 /* !! 1452 if (likely(pwq->nr_active < pwq->max_active)) { 2332 * Limit the number of concurrently a << 2333 * @work must also queue behind exist << 2334 * ordering when max_active changes. << 2335 */ << 2336 if (list_empty(&pwq->inactive_works) << 2337 if (list_empty(&pool->worklis << 2338 pool->watchdog_ts = j << 2339 << 2340 trace_workqueue_activate_work 1453 trace_workqueue_activate_work(work); 2341 insert_work(pwq, work, &pool- !! 1454 pwq->nr_active++; 2342 kick_pool(pool); !! 1455 worklist = &pwq->pool->worklist; >> 1456 if (list_empty(worklist)) >> 1457 pwq->pool->watchdog_ts = jiffies; 2343 } else { 1458 } else { 2344 work_flags |= WORK_STRUCT_INA !! 1459 work_flags |= WORK_STRUCT_DELAYED; 2345 insert_work(pwq, work, &pwq-> !! 1460 worklist = &pwq->delayed_works; 2346 } 1461 } 2347 1462 2348 out: !! 1463 insert_work(pwq, work, worklist, work_flags); 2349 raw_spin_unlock(&pool->lock); << 2350 rcu_read_unlock(); << 2351 } << 2352 << 2353 static bool clear_pending_if_disabled(struct << 2354 { << 2355 unsigned long data = *work_data_bits( << 2356 struct work_offq_data offqd; << 2357 << 2358 if (likely((data & WORK_STRUCT_PWQ) | << 2359 !(data & WORK_OFFQ_DISABLE << 2360 return false; << 2361 1464 2362 work_offqd_unpack(&offqd, data); !! 1465 spin_unlock(&pwq->pool->lock); 2363 set_work_pool_and_clear_pending(work, << 2364 work_ << 2365 return true; << 2366 } 1466 } 2367 1467 2368 /** 1468 /** 2369 * queue_work_on - queue work on specific cpu 1469 * queue_work_on - queue work on specific cpu 2370 * @cpu: CPU number to execute work on 1470 * @cpu: CPU number to execute work on 2371 * @wq: workqueue to use 1471 * @wq: workqueue to use 2372 * @work: work to queue 1472 * @work: work to queue 2373 * 1473 * 2374 * We queue the work to a specific CPU, the c 1474 * We queue the work to a specific CPU, the caller must ensure it 2375 * can't go away. Callers that fail to ensur !! 1475 * can't go away. 2376 * CPU cannot go away will execute on a rando << 2377 * But note well that callers specifying a CP << 2378 * online will get a splat. << 2379 * 1476 * 2380 * Return: %false if @work was already on a q 1477 * Return: %false if @work was already on a queue, %true otherwise. 2381 */ 1478 */ 2382 bool queue_work_on(int cpu, struct workqueue_ 1479 bool queue_work_on(int cpu, struct workqueue_struct *wq, 2383 struct work_struct *work) 1480 struct work_struct *work) 2384 { 1481 { 2385 bool ret = false; 1482 bool ret = false; 2386 unsigned long irq_flags; !! 1483 unsigned long flags; 2387 1484 2388 local_irq_save(irq_flags); !! 1485 local_irq_save(flags); 2389 1486 2390 if (!test_and_set_bit(WORK_STRUCT_PEN !! 1487 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2391 !clear_pending_if_disabled(work)) << 2392 __queue_work(cpu, wq, work); 1488 __queue_work(cpu, wq, work); 2393 ret = true; 1489 ret = true; 2394 } 1490 } 2395 1491 2396 local_irq_restore(irq_flags); !! 1492 local_irq_restore(flags); 2397 return ret; 1493 return ret; 2398 } 1494 } 2399 EXPORT_SYMBOL(queue_work_on); 1495 EXPORT_SYMBOL(queue_work_on); 2400 1496 2401 /** << 2402 * select_numa_node_cpu - Select a CPU based << 2403 * @node: NUMA node ID that we want to select << 2404 * << 2405 * This function will attempt to find a "rand << 2406 * node. If there are no CPUs available on th << 2407 * WORK_CPU_UNBOUND indicating that we should << 2408 * available CPU if we need to schedule this << 2409 */ << 2410 static int select_numa_node_cpu(int node) << 2411 { << 2412 int cpu; << 2413 << 2414 /* Delay binding to CPU if node is no << 2415 if (node < 0 || node >= MAX_NUMNODES << 2416 return WORK_CPU_UNBOUND; << 2417 << 2418 /* Use local node/cpu if we are alrea << 2419 cpu = raw_smp_processor_id(); << 2420 if (node == cpu_to_node(cpu)) << 2421 return cpu; << 2422 << 2423 /* Use "random" otherwise know as "fi << 2424 cpu = cpumask_any_and(cpumask_of_node << 2425 << 2426 /* If CPU is valid return that, other << 2427 return cpu < nr_cpu_ids ? cpu : WORK_ << 2428 } << 2429 << 2430 /** << 2431 * queue_work_node - queue work on a "random" << 2432 * @node: NUMA node that we are targeting the << 2433 * @wq: workqueue to use << 2434 * @work: work to queue << 2435 * << 2436 * We queue the work to a "random" CPU within << 2437 * idea here is to provide a way to somehow a << 2438 * NUMA node. << 2439 * << 2440 * This function will only make a best effort << 2441 * the right NUMA node. If no node is request << 2442 * offline then we just fall back to standard << 2443 * << 2444 * Currently the "random" CPU ends up being t << 2445 * intersection of cpu_online_mask and the cp << 2446 * are running on the node. In that case we j << 2447 * << 2448 * Return: %false if @work was already on a q << 2449 */ << 2450 bool queue_work_node(int node, struct workque << 2451 struct work_struct *work << 2452 { << 2453 unsigned long irq_flags; << 2454 bool ret = false; << 2455 << 2456 /* << 2457 * This current implementation is spe << 2458 * Specifically we only return the fi << 2459 * node instead of cycling through in << 2460 * << 2461 * If this is used with a per-cpu wor << 2462 * workqueue_select_cpu_near would ne << 2463 * some round robin type logic. << 2464 */ << 2465 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND << 2466 << 2467 local_irq_save(irq_flags); << 2468 << 2469 if (!test_and_set_bit(WORK_STRUCT_PEN << 2470 !clear_pending_if_disabled(work)) << 2471 int cpu = select_numa_node_cp << 2472 << 2473 __queue_work(cpu, wq, work); << 2474 ret = true; << 2475 } << 2476 << 2477 local_irq_restore(irq_flags); << 2478 return ret; << 2479 } << 2480 EXPORT_SYMBOL_GPL(queue_work_node); << 2481 << 2482 void delayed_work_timer_fn(struct timer_list 1497 void delayed_work_timer_fn(struct timer_list *t) 2483 { 1498 { 2484 struct delayed_work *dwork = from_tim 1499 struct delayed_work *dwork = from_timer(dwork, t, timer); 2485 1500 2486 /* should have been called from irqsa 1501 /* should have been called from irqsafe timer with irq already off */ 2487 __queue_work(dwork->cpu, dwork->wq, & 1502 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 2488 } 1503 } 2489 EXPORT_SYMBOL(delayed_work_timer_fn); 1504 EXPORT_SYMBOL(delayed_work_timer_fn); 2490 1505 2491 static void __queue_delayed_work(int cpu, str 1506 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 2492 struct delaye 1507 struct delayed_work *dwork, unsigned long delay) 2493 { 1508 { 2494 struct timer_list *timer = &dwork->ti 1509 struct timer_list *timer = &dwork->timer; 2495 struct work_struct *work = &dwork->wo 1510 struct work_struct *work = &dwork->work; 2496 1511 2497 WARN_ON_ONCE(!wq); 1512 WARN_ON_ONCE(!wq); 2498 WARN_ON_ONCE(timer->function != delay 1513 WARN_ON_ONCE(timer->function != delayed_work_timer_fn); 2499 WARN_ON_ONCE(timer_pending(timer)); 1514 WARN_ON_ONCE(timer_pending(timer)); 2500 WARN_ON_ONCE(!list_empty(&work->entry 1515 WARN_ON_ONCE(!list_empty(&work->entry)); 2501 1516 2502 /* 1517 /* 2503 * If @delay is 0, queue @dwork->work 1518 * If @delay is 0, queue @dwork->work immediately. This is for 2504 * both optimization and correctness. 1519 * both optimization and correctness. The earliest @timer can 2505 * expire is on the closest next tick 1520 * expire is on the closest next tick and delayed_work users depend 2506 * on that there's no such delay when 1521 * on that there's no such delay when @delay is 0. 2507 */ 1522 */ 2508 if (!delay) { 1523 if (!delay) { 2509 __queue_work(cpu, wq, &dwork- 1524 __queue_work(cpu, wq, &dwork->work); 2510 return; 1525 return; 2511 } 1526 } 2512 1527 2513 dwork->wq = wq; 1528 dwork->wq = wq; 2514 dwork->cpu = cpu; 1529 dwork->cpu = cpu; 2515 timer->expires = jiffies + delay; 1530 timer->expires = jiffies + delay; 2516 1531 2517 if (housekeeping_enabled(HK_TYPE_TIME !! 1532 if (unlikely(cpu != WORK_CPU_UNBOUND)) 2518 /* If the current cpu is a ho << 2519 cpu = smp_processor_id(); << 2520 if (!housekeeping_test_cpu(cp << 2521 cpu = housekeeping_an << 2522 add_timer_on(timer, cpu); 1533 add_timer_on(timer, cpu); 2523 } else { !! 1534 else 2524 if (likely(cpu == WORK_CPU_UN !! 1535 add_timer(timer); 2525 add_timer_global(time << 2526 else << 2527 add_timer_on(timer, c << 2528 } << 2529 } 1536 } 2530 1537 2531 /** 1538 /** 2532 * queue_delayed_work_on - queue work on spec 1539 * queue_delayed_work_on - queue work on specific CPU after delay 2533 * @cpu: CPU number to execute work on 1540 * @cpu: CPU number to execute work on 2534 * @wq: workqueue to use 1541 * @wq: workqueue to use 2535 * @dwork: work to queue 1542 * @dwork: work to queue 2536 * @delay: number of jiffies to wait before q 1543 * @delay: number of jiffies to wait before queueing 2537 * 1544 * 2538 * Return: %false if @work was already on a q 1545 * Return: %false if @work was already on a queue, %true otherwise. If 2539 * @delay is zero and @dwork is idle, it will 1546 * @delay is zero and @dwork is idle, it will be scheduled for immediate 2540 * execution. 1547 * execution. 2541 */ 1548 */ 2542 bool queue_delayed_work_on(int cpu, struct wo 1549 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 2543 struct delayed_wor 1550 struct delayed_work *dwork, unsigned long delay) 2544 { 1551 { 2545 struct work_struct *work = &dwork->wo 1552 struct work_struct *work = &dwork->work; 2546 bool ret = false; 1553 bool ret = false; 2547 unsigned long irq_flags; !! 1554 unsigned long flags; 2548 1555 2549 /* read the comment in __queue_work() 1556 /* read the comment in __queue_work() */ 2550 local_irq_save(irq_flags); !! 1557 local_irq_save(flags); 2551 1558 2552 if (!test_and_set_bit(WORK_STRUCT_PEN !! 1559 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2553 !clear_pending_if_disabled(work)) << 2554 __queue_delayed_work(cpu, wq, 1560 __queue_delayed_work(cpu, wq, dwork, delay); 2555 ret = true; 1561 ret = true; 2556 } 1562 } 2557 1563 2558 local_irq_restore(irq_flags); !! 1564 local_irq_restore(flags); 2559 return ret; 1565 return ret; 2560 } 1566 } 2561 EXPORT_SYMBOL(queue_delayed_work_on); 1567 EXPORT_SYMBOL(queue_delayed_work_on); 2562 1568 2563 /** 1569 /** 2564 * mod_delayed_work_on - modify delay of or q 1570 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 2565 * @cpu: CPU number to execute work on 1571 * @cpu: CPU number to execute work on 2566 * @wq: workqueue to use 1572 * @wq: workqueue to use 2567 * @dwork: work to queue 1573 * @dwork: work to queue 2568 * @delay: number of jiffies to wait before q 1574 * @delay: number of jiffies to wait before queueing 2569 * 1575 * 2570 * If @dwork is idle, equivalent to queue_del 1576 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 2571 * modify @dwork's timer so that it expires a 1577 * modify @dwork's timer so that it expires after @delay. If @delay is 2572 * zero, @work is guaranteed to be scheduled 1578 * zero, @work is guaranteed to be scheduled immediately regardless of its 2573 * current state. 1579 * current state. 2574 * 1580 * 2575 * Return: %false if @dwork was idle and queu 1581 * Return: %false if @dwork was idle and queued, %true if @dwork was 2576 * pending and its timer was modified. 1582 * pending and its timer was modified. 2577 * 1583 * 2578 * This function is safe to call from any con 1584 * This function is safe to call from any context including IRQ handler. 2579 * See try_to_grab_pending() for details. 1585 * See try_to_grab_pending() for details. 2580 */ 1586 */ 2581 bool mod_delayed_work_on(int cpu, struct work 1587 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 2582 struct delayed_work 1588 struct delayed_work *dwork, unsigned long delay) 2583 { 1589 { 2584 unsigned long irq_flags; !! 1590 unsigned long flags; 2585 bool ret; !! 1591 int ret; 2586 1592 2587 ret = work_grab_pending(&dwork->work, !! 1593 do { >> 1594 ret = try_to_grab_pending(&dwork->work, true, &flags); >> 1595 } while (unlikely(ret == -EAGAIN)); 2588 1596 2589 if (!clear_pending_if_disabled(&dwork !! 1597 if (likely(ret >= 0)) { 2590 __queue_delayed_work(cpu, wq, 1598 __queue_delayed_work(cpu, wq, dwork, delay); >> 1599 local_irq_restore(flags); >> 1600 } 2591 1601 2592 local_irq_restore(irq_flags); !! 1602 /* -ENOENT from try_to_grab_pending() becomes %true */ 2593 return ret; 1603 return ret; 2594 } 1604 } 2595 EXPORT_SYMBOL_GPL(mod_delayed_work_on); 1605 EXPORT_SYMBOL_GPL(mod_delayed_work_on); 2596 1606 2597 static void rcu_work_rcufn(struct rcu_head *r !! 1607 /** >> 1608 * worker_enter_idle - enter idle state >> 1609 * @worker: worker which is entering idle state >> 1610 * >> 1611 * @worker is entering idle state. Update stats and idle timer if >> 1612 * necessary. >> 1613 * >> 1614 * LOCKING: >> 1615 * spin_lock_irq(pool->lock). >> 1616 */ >> 1617 static void worker_enter_idle(struct worker *worker) 2598 { 1618 { 2599 struct rcu_work *rwork = container_of !! 1619 struct worker_pool *pool = worker->pool; 2600 1620 2601 /* read the comment in __queue_work() !! 1621 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 2602 local_irq_disable(); !! 1622 WARN_ON_ONCE(!list_empty(&worker->entry) && 2603 __queue_work(WORK_CPU_UNBOUND, rwork- !! 1623 (worker->hentry.next || worker->hentry.pprev))) 2604 local_irq_enable(); !! 1624 return; >> 1625 >> 1626 /* can't use worker_set_flags(), also called from create_worker() */ >> 1627 worker->flags |= WORKER_IDLE; >> 1628 pool->nr_idle++; >> 1629 worker->last_active = jiffies; >> 1630 >> 1631 /* idle_list is LIFO */ >> 1632 list_add(&worker->entry, &pool->idle_list); >> 1633 >> 1634 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) >> 1635 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); >> 1636 >> 1637 /* >> 1638 * Sanity check nr_running. Because unbind_workers() releases >> 1639 * pool->lock between setting %WORKER_UNBOUND and zapping >> 1640 * nr_running, the warning may trigger spuriously. Check iff >> 1641 * unbind is not in progress. >> 1642 */ >> 1643 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && >> 1644 pool->nr_workers == pool->nr_idle && >> 1645 atomic_read(&pool->nr_running)); 2605 } 1646 } 2606 1647 2607 /** 1648 /** 2608 * queue_rcu_work - queue work after a RCU gr !! 1649 * worker_leave_idle - leave idle state 2609 * @wq: workqueue to use !! 1650 * @worker: worker which is leaving idle state 2610 * @rwork: work to queue !! 1651 * >> 1652 * @worker is leaving idle state. Update stats. 2611 * 1653 * 2612 * Return: %false if @rwork was already pendi !! 1654 * LOCKING: 2613 * that a full RCU grace period is guaranteed !! 1655 * spin_lock_irq(pool->lock). 2614 * While @rwork is guaranteed to be executed << 2615 * execution may happen before a full RCU gra << 2616 */ 1656 */ 2617 bool queue_rcu_work(struct workqueue_struct * !! 1657 static void worker_leave_idle(struct worker *worker) 2618 { 1658 { 2619 struct work_struct *work = &rwork->wo !! 1659 struct worker_pool *pool = worker->pool; 2620 << 2621 /* << 2622 * rcu_work can't be canceled or disa << 2623 * inside @rwork and disabled the inn << 2624 */ << 2625 if (!test_and_set_bit(WORK_STRUCT_PEN << 2626 !WARN_ON_ONCE(clear_pending_if_di << 2627 rwork->wq = wq; << 2628 call_rcu_hurry(&rwork->rcu, r << 2629 return true; << 2630 } << 2631 1660 2632 return false; !! 1661 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) >> 1662 return; >> 1663 worker_clr_flags(worker, WORKER_IDLE); >> 1664 pool->nr_idle--; >> 1665 list_del_init(&worker->entry); 2633 } 1666 } 2634 EXPORT_SYMBOL(queue_rcu_work); << 2635 1667 2636 static struct worker *alloc_worker(int node) 1668 static struct worker *alloc_worker(int node) 2637 { 1669 { 2638 struct worker *worker; 1670 struct worker *worker; 2639 1671 2640 worker = kzalloc_node(sizeof(*worker) 1672 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node); 2641 if (worker) { 1673 if (worker) { 2642 INIT_LIST_HEAD(&worker->entry 1674 INIT_LIST_HEAD(&worker->entry); 2643 INIT_LIST_HEAD(&worker->sched 1675 INIT_LIST_HEAD(&worker->scheduled); 2644 INIT_LIST_HEAD(&worker->node) 1676 INIT_LIST_HEAD(&worker->node); 2645 /* on creation a worker is in 1677 /* on creation a worker is in !idle && prep state */ 2646 worker->flags = WORKER_PREP; 1678 worker->flags = WORKER_PREP; 2647 } 1679 } 2648 return worker; 1680 return worker; 2649 } 1681 } 2650 1682 2651 static cpumask_t *pool_allowed_cpus(struct wo << 2652 { << 2653 if (pool->cpu < 0 && pool->attrs->aff << 2654 return pool->attrs->__pod_cpu << 2655 else << 2656 return pool->attrs->cpumask; << 2657 } << 2658 << 2659 /** 1683 /** 2660 * worker_attach_to_pool() - attach a worker 1684 * worker_attach_to_pool() - attach a worker to a pool 2661 * @worker: worker to be attached 1685 * @worker: worker to be attached 2662 * @pool: the target pool 1686 * @pool: the target pool 2663 * 1687 * 2664 * Attach @worker to @pool. Once attached, t 1688 * Attach @worker to @pool. Once attached, the %WORKER_UNBOUND flag and 2665 * cpu-binding of @worker are kept coordinate 1689 * cpu-binding of @worker are kept coordinated with the pool across 2666 * cpu-[un]hotplugs. 1690 * cpu-[un]hotplugs. 2667 */ 1691 */ 2668 static void worker_attach_to_pool(struct work 1692 static void worker_attach_to_pool(struct worker *worker, 2669 struct work !! 1693 struct worker_pool *pool) 2670 { 1694 { 2671 mutex_lock(&wq_pool_attach_mutex); !! 1695 mutex_lock(&pool->attach_mutex); 2672 1696 2673 /* 1697 /* 2674 * The wq_pool_attach_mutex ensures % !! 1698 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any 2675 * across this function. See the comm !! 1699 * online CPUs. It'll be re-applied when any of the CPUs come up. 2676 * details. BH workers are, while per << 2677 */ 1700 */ 2678 if (pool->flags & POOL_DISASSOCIATED) !! 1701 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); 2679 worker->flags |= WORKER_UNBOU << 2680 } else { << 2681 WARN_ON_ONCE(pool->flags & PO << 2682 kthread_set_per_cpu(worker->t << 2683 } << 2684 1702 2685 if (worker->rescue_wq) !! 1703 /* 2686 set_cpus_allowed_ptr(worker-> !! 1704 * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains >> 1705 * stable across this function. See the comments above the >> 1706 * flag definition for details. >> 1707 */ >> 1708 if (pool->flags & POOL_DISASSOCIATED) >> 1709 worker->flags |= WORKER_UNBOUND; 2687 1710 2688 list_add_tail(&worker->node, &pool->w 1711 list_add_tail(&worker->node, &pool->workers); 2689 worker->pool = pool; << 2690 << 2691 mutex_unlock(&wq_pool_attach_mutex); << 2692 } << 2693 << 2694 static void unbind_worker(struct worker *work << 2695 { << 2696 lockdep_assert_held(&wq_pool_attach_m << 2697 << 2698 kthread_set_per_cpu(worker->task, -1) << 2699 if (cpumask_intersects(wq_unbound_cpu << 2700 WARN_ON_ONCE(set_cpus_allowed << 2701 else << 2702 WARN_ON_ONCE(set_cpus_allowed << 2703 } << 2704 << 2705 1712 2706 static void detach_worker(struct worker *work !! 1713 mutex_unlock(&pool->attach_mutex); 2707 { << 2708 lockdep_assert_held(&wq_pool_attach_m << 2709 << 2710 unbind_worker(worker); << 2711 list_del(&worker->node); << 2712 } 1714 } 2713 1715 2714 /** 1716 /** 2715 * worker_detach_from_pool() - detach a worke 1717 * worker_detach_from_pool() - detach a worker from its pool 2716 * @worker: worker which is attached to its p 1718 * @worker: worker which is attached to its pool >> 1719 * @pool: the pool @worker is attached to 2717 * 1720 * 2718 * Undo the attaching which had been done in 1721 * Undo the attaching which had been done in worker_attach_to_pool(). The 2719 * caller worker shouldn't access to the pool 1722 * caller worker shouldn't access to the pool after detached except it has 2720 * other reference to the pool. 1723 * other reference to the pool. 2721 */ 1724 */ 2722 static void worker_detach_from_pool(struct wo !! 1725 static void worker_detach_from_pool(struct worker *worker, >> 1726 struct worker_pool *pool) 2723 { 1727 { 2724 struct worker_pool *pool = worker->po !! 1728 struct completion *detach_completion = NULL; 2725 1729 2726 /* there is one permanent BH worker p !! 1730 mutex_lock(&pool->attach_mutex); 2727 WARN_ON_ONCE(pool->flags & POOL_BH); !! 1731 list_del(&worker->node); 2728 !! 1732 if (list_empty(&pool->workers)) 2729 mutex_lock(&wq_pool_attach_mutex); !! 1733 detach_completion = pool->detach_completion; 2730 detach_worker(worker); !! 1734 mutex_unlock(&pool->attach_mutex); 2731 worker->pool = NULL; << 2732 mutex_unlock(&wq_pool_attach_mutex); << 2733 1735 2734 /* clear leftover flags without pool- 1736 /* clear leftover flags without pool->lock after it is detached */ 2735 worker->flags &= ~(WORKER_UNBOUND | W 1737 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 2736 } << 2737 1738 2738 static int format_worker_id(char *buf, size_t !! 1739 if (detach_completion) 2739 struct worker_poo !! 1740 complete(detach_completion); 2740 { << 2741 if (worker->rescue_wq) << 2742 return scnprintf(buf, size, " << 2743 worker->resc << 2744 << 2745 if (pool) { << 2746 if (pool->cpu >= 0) << 2747 return scnprintf(buf, << 2748 pool << 2749 pool << 2750 else << 2751 return scnprintf(buf, << 2752 pool << 2753 } else { << 2754 return scnprintf(buf, size, " << 2755 } << 2756 } 1741 } 2757 1742 2758 /** 1743 /** 2759 * create_worker - create a new workqueue wor 1744 * create_worker - create a new workqueue worker 2760 * @pool: pool the new worker will belong to 1745 * @pool: pool the new worker will belong to 2761 * 1746 * 2762 * Create and start a new worker which is att 1747 * Create and start a new worker which is attached to @pool. 2763 * 1748 * 2764 * CONTEXT: 1749 * CONTEXT: 2765 * Might sleep. Does GFP_KERNEL allocations. 1750 * Might sleep. Does GFP_KERNEL allocations. 2766 * 1751 * 2767 * Return: 1752 * Return: 2768 * Pointer to the newly created worker. 1753 * Pointer to the newly created worker. 2769 */ 1754 */ 2770 static struct worker *create_worker(struct wo 1755 static struct worker *create_worker(struct worker_pool *pool) 2771 { 1756 { 2772 struct worker *worker; !! 1757 struct worker *worker = NULL; 2773 int id; !! 1758 int id = -1; >> 1759 char id_buf[16]; 2774 1760 2775 /* ID is needed to determine kthread 1761 /* ID is needed to determine kthread name */ 2776 id = ida_alloc(&pool->worker_ida, GFP !! 1762 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL); 2777 if (id < 0) { !! 1763 if (id < 0) 2778 pr_err_once("workqueue: Faile !! 1764 goto fail; 2779 ERR_PTR(id)); << 2780 return NULL; << 2781 } << 2782 1765 2783 worker = alloc_worker(pool->node); 1766 worker = alloc_worker(pool->node); 2784 if (!worker) { !! 1767 if (!worker) 2785 pr_err_once("workqueue: Faile << 2786 goto fail; 1768 goto fail; 2787 } << 2788 1769 >> 1770 worker->pool = pool; 2789 worker->id = id; 1771 worker->id = id; 2790 1772 2791 if (!(pool->flags & POOL_BH)) { !! 1773 if (pool->cpu >= 0) 2792 char id_buf[WORKER_ID_LEN]; !! 1774 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, >> 1775 pool->attrs->nice < 0 ? "H" : ""); >> 1776 else >> 1777 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 2793 1778 2794 format_worker_id(id_buf, size !! 1779 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, 2795 worker->task = kthread_create !! 1780 "kworker/%s", id_buf); 2796 !! 1781 if (IS_ERR(worker->task)) 2797 if (IS_ERR(worker->task)) { !! 1782 goto fail; 2798 if (PTR_ERR(worker->t << 2799 pr_err("workq << 2800 id_buf << 2801 } else { << 2802 pr_err_once(" << 2803 w << 2804 } << 2805 goto fail; << 2806 } << 2807 1783 2808 set_user_nice(worker->task, p !! 1784 set_user_nice(worker->task, pool->attrs->nice); 2809 kthread_bind_mask(worker->tas !! 1785 kthread_bind_mask(worker->task, pool->attrs->cpumask); 2810 } << 2811 1786 2812 /* successful, attach the worker to t 1787 /* successful, attach the worker to the pool */ 2813 worker_attach_to_pool(worker, pool); 1788 worker_attach_to_pool(worker, pool); 2814 1789 2815 /* start the newly created worker */ 1790 /* start the newly created worker */ 2816 raw_spin_lock_irq(&pool->lock); !! 1791 spin_lock_irq(&pool->lock); 2817 << 2818 worker->pool->nr_workers++; 1792 worker->pool->nr_workers++; 2819 worker_enter_idle(worker); 1793 worker_enter_idle(worker); 2820 !! 1794 wake_up_process(worker->task); 2821 /* !! 1795 spin_unlock_irq(&pool->lock); 2822 * @worker is waiting on a completion << 2823 * check if not woken up soon. As kic << 2824 * wake it up explicitly. << 2825 */ << 2826 if (worker->task) << 2827 wake_up_process(worker->task) << 2828 << 2829 raw_spin_unlock_irq(&pool->lock); << 2830 1796 2831 return worker; 1797 return worker; 2832 1798 2833 fail: 1799 fail: 2834 ida_free(&pool->worker_ida, id); !! 1800 if (id >= 0) >> 1801 ida_simple_remove(&pool->worker_ida, id); 2835 kfree(worker); 1802 kfree(worker); 2836 return NULL; 1803 return NULL; 2837 } 1804 } 2838 1805 2839 static void detach_dying_workers(struct list_ << 2840 { << 2841 struct worker *worker; << 2842 << 2843 list_for_each_entry(worker, cull_list << 2844 detach_worker(worker); << 2845 } << 2846 << 2847 static void reap_dying_workers(struct list_he << 2848 { << 2849 struct worker *worker, *tmp; << 2850 << 2851 list_for_each_entry_safe(worker, tmp, << 2852 list_del_init(&worker->entry) << 2853 kthread_stop_put(worker->task << 2854 kfree(worker); << 2855 } << 2856 } << 2857 << 2858 /** 1806 /** 2859 * set_worker_dying - Tag a worker for destru !! 1807 * destroy_worker - destroy a workqueue worker 2860 * @worker: worker to be destroyed 1808 * @worker: worker to be destroyed 2861 * @list: transfer worker away from its pool- << 2862 * 1809 * 2863 * Tag @worker for destruction and adjust @po !! 1810 * Destroy @worker and adjust @pool stats accordingly. The worker should 2864 * should be idle. !! 1811 * be idle. 2865 * 1812 * 2866 * CONTEXT: 1813 * CONTEXT: 2867 * raw_spin_lock_irq(pool->lock). !! 1814 * spin_lock_irq(pool->lock). 2868 */ 1815 */ 2869 static void set_worker_dying(struct worker *w !! 1816 static void destroy_worker(struct worker *worker) 2870 { 1817 { 2871 struct worker_pool *pool = worker->po 1818 struct worker_pool *pool = worker->pool; 2872 1819 2873 lockdep_assert_held(&pool->lock); 1820 lockdep_assert_held(&pool->lock); 2874 lockdep_assert_held(&wq_pool_attach_m << 2875 1821 2876 /* sanity check frenzy */ 1822 /* sanity check frenzy */ 2877 if (WARN_ON(worker->current_work) || 1823 if (WARN_ON(worker->current_work) || 2878 WARN_ON(!list_empty(&worker->sche 1824 WARN_ON(!list_empty(&worker->scheduled)) || 2879 WARN_ON(!(worker->flags & WORKER_ 1825 WARN_ON(!(worker->flags & WORKER_IDLE))) 2880 return; 1826 return; 2881 1827 2882 pool->nr_workers--; 1828 pool->nr_workers--; 2883 pool->nr_idle--; 1829 pool->nr_idle--; 2884 1830 >> 1831 list_del_init(&worker->entry); 2885 worker->flags |= WORKER_DIE; 1832 worker->flags |= WORKER_DIE; 2886 !! 1833 wake_up_process(worker->task); 2887 list_move(&worker->entry, list); << 2888 << 2889 /* get an extra task struct reference << 2890 get_task_struct(worker->task); << 2891 } 1834 } 2892 1835 2893 /** << 2894 * idle_worker_timeout - check if some idle w << 2895 * @t: The pool's idle_timer that just expire << 2896 * << 2897 * The timer is armed in worker_enter_idle(). << 2898 * worker_leave_idle(), as a worker flicking << 2899 * pool is at the too_many_workers() tipping << 2900 * housekeeping overhead. Since IDLE_WORKER_T << 2901 * it expire and re-evaluate things from ther << 2902 */ << 2903 static void idle_worker_timeout(struct timer_ 1836 static void idle_worker_timeout(struct timer_list *t) 2904 { 1837 { 2905 struct worker_pool *pool = from_timer 1838 struct worker_pool *pool = from_timer(pool, t, idle_timer); 2906 bool do_cull = false; << 2907 << 2908 if (work_pending(&pool->idle_cull_wor << 2909 return; << 2910 << 2911 raw_spin_lock_irq(&pool->lock); << 2912 << 2913 if (too_many_workers(pool)) { << 2914 struct worker *worker; << 2915 unsigned long expires; << 2916 << 2917 /* idle_list is kept in LIFO << 2918 worker = list_last_entry(&poo << 2919 expires = worker->last_active << 2920 do_cull = !time_before(jiffie << 2921 << 2922 if (!do_cull) << 2923 mod_timer(&pool->idle << 2924 } << 2925 raw_spin_unlock_irq(&pool->lock); << 2926 1839 2927 if (do_cull) !! 1840 spin_lock_irq(&pool->lock); 2928 queue_work(system_unbound_wq, << 2929 } << 2930 << 2931 /** << 2932 * idle_cull_fn - cull workers that have been << 2933 * @work: the pool's work for handling these << 2934 * << 2935 * This goes through a pool's idle workers an << 2936 * idle for at least IDLE_WORKER_TIMEOUT seco << 2937 * << 2938 * We don't want to disturb isolated CPUs bec << 2939 * culled, so this also resets worker affinit << 2940 * context, hence the split between timer cal << 2941 */ << 2942 static void idle_cull_fn(struct work_struct * << 2943 { << 2944 struct worker_pool *pool = container_ << 2945 LIST_HEAD(cull_list); << 2946 << 2947 /* << 2948 * Grabbing wq_pool_attach_mutex here << 2949 * cannot proceed beyong set_pf_worke << 2950 * This is required as a previously-p << 2951 * set_worker_dying() has happened bu << 2952 */ << 2953 mutex_lock(&wq_pool_attach_mutex); << 2954 raw_spin_lock_irq(&pool->lock); << 2955 1841 2956 while (too_many_workers(pool)) { 1842 while (too_many_workers(pool)) { 2957 struct worker *worker; 1843 struct worker *worker; 2958 unsigned long expires; 1844 unsigned long expires; 2959 1845 2960 worker = list_last_entry(&poo !! 1846 /* idle_list is kept in LIFO order, check the last one */ >> 1847 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2961 expires = worker->last_active 1848 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2962 1849 2963 if (time_before(jiffies, expi 1850 if (time_before(jiffies, expires)) { 2964 mod_timer(&pool->idle 1851 mod_timer(&pool->idle_timer, expires); 2965 break; 1852 break; 2966 } 1853 } 2967 1854 2968 set_worker_dying(worker, &cul !! 1855 destroy_worker(worker); 2969 } 1856 } 2970 1857 2971 raw_spin_unlock_irq(&pool->lock); !! 1858 spin_unlock_irq(&pool->lock); 2972 detach_dying_workers(&cull_list); << 2973 mutex_unlock(&wq_pool_attach_mutex); << 2974 << 2975 reap_dying_workers(&cull_list); << 2976 } 1859 } 2977 1860 2978 static void send_mayday(struct work_struct *w 1861 static void send_mayday(struct work_struct *work) 2979 { 1862 { 2980 struct pool_workqueue *pwq = get_work 1863 struct pool_workqueue *pwq = get_work_pwq(work); 2981 struct workqueue_struct *wq = pwq->wq 1864 struct workqueue_struct *wq = pwq->wq; 2982 1865 2983 lockdep_assert_held(&wq_mayday_lock); 1866 lockdep_assert_held(&wq_mayday_lock); 2984 1867 2985 if (!wq->rescuer) 1868 if (!wq->rescuer) 2986 return; 1869 return; 2987 1870 2988 /* mayday mayday mayday */ 1871 /* mayday mayday mayday */ 2989 if (list_empty(&pwq->mayday_node)) { 1872 if (list_empty(&pwq->mayday_node)) { 2990 /* 1873 /* 2991 * If @pwq is for an unbound 1874 * If @pwq is for an unbound wq, its base ref may be put at 2992 * any time due to an attribu 1875 * any time due to an attribute change. Pin @pwq until the 2993 * rescuer is done with it. 1876 * rescuer is done with it. 2994 */ 1877 */ 2995 get_pwq(pwq); 1878 get_pwq(pwq); 2996 list_add_tail(&pwq->mayday_no 1879 list_add_tail(&pwq->mayday_node, &wq->maydays); 2997 wake_up_process(wq->rescuer-> 1880 wake_up_process(wq->rescuer->task); 2998 pwq->stats[PWQ_STAT_MAYDAY]++ << 2999 } 1881 } 3000 } 1882 } 3001 1883 3002 static void pool_mayday_timeout(struct timer_ 1884 static void pool_mayday_timeout(struct timer_list *t) 3003 { 1885 { 3004 struct worker_pool *pool = from_timer 1886 struct worker_pool *pool = from_timer(pool, t, mayday_timer); 3005 struct work_struct *work; 1887 struct work_struct *work; 3006 1888 3007 raw_spin_lock_irq(&pool->lock); !! 1889 spin_lock_irq(&pool->lock); 3008 raw_spin_lock(&wq_mayday_lock); !! 1890 spin_lock(&wq_mayday_lock); /* for wq->maydays */ 3009 1891 3010 if (need_to_create_worker(pool)) { 1892 if (need_to_create_worker(pool)) { 3011 /* 1893 /* 3012 * We've been trying to creat 1894 * We've been trying to create a new worker but 3013 * haven't been successful. 1895 * haven't been successful. We might be hitting an 3014 * allocation deadlock. Send 1896 * allocation deadlock. Send distress signals to 3015 * rescuers. 1897 * rescuers. 3016 */ 1898 */ 3017 list_for_each_entry(work, &po 1899 list_for_each_entry(work, &pool->worklist, entry) 3018 send_mayday(work); 1900 send_mayday(work); 3019 } 1901 } 3020 1902 3021 raw_spin_unlock(&wq_mayday_lock); !! 1903 spin_unlock(&wq_mayday_lock); 3022 raw_spin_unlock_irq(&pool->lock); !! 1904 spin_unlock_irq(&pool->lock); 3023 1905 3024 mod_timer(&pool->mayday_timer, jiffie 1906 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 3025 } 1907 } 3026 1908 3027 /** 1909 /** 3028 * maybe_create_worker - create a new worker 1910 * maybe_create_worker - create a new worker if necessary 3029 * @pool: pool to create a new worker for 1911 * @pool: pool to create a new worker for 3030 * 1912 * 3031 * Create a new worker for @pool if necessary 1913 * Create a new worker for @pool if necessary. @pool is guaranteed to 3032 * have at least one idle worker on return fr 1914 * have at least one idle worker on return from this function. If 3033 * creating a new worker takes longer than MA 1915 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 3034 * sent to all rescuers with works scheduled 1916 * sent to all rescuers with works scheduled on @pool to resolve 3035 * possible allocation deadlock. 1917 * possible allocation deadlock. 3036 * 1918 * 3037 * On return, need_to_create_worker() is guar 1919 * On return, need_to_create_worker() is guaranteed to be %false and 3038 * may_start_working() %true. 1920 * may_start_working() %true. 3039 * 1921 * 3040 * LOCKING: 1922 * LOCKING: 3041 * raw_spin_lock_irq(pool->lock) which may be !! 1923 * spin_lock_irq(pool->lock) which may be released and regrabbed 3042 * multiple times. Does GFP_KERNEL allocatio 1924 * multiple times. Does GFP_KERNEL allocations. Called only from 3043 * manager. 1925 * manager. 3044 */ 1926 */ 3045 static void maybe_create_worker(struct worker 1927 static void maybe_create_worker(struct worker_pool *pool) 3046 __releases(&pool->lock) 1928 __releases(&pool->lock) 3047 __acquires(&pool->lock) 1929 __acquires(&pool->lock) 3048 { 1930 { 3049 restart: 1931 restart: 3050 raw_spin_unlock_irq(&pool->lock); !! 1932 spin_unlock_irq(&pool->lock); 3051 1933 3052 /* if we don't make progress in MAYDA 1934 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 3053 mod_timer(&pool->mayday_timer, jiffie 1935 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 3054 1936 3055 while (true) { 1937 while (true) { 3056 if (create_worker(pool) || !n 1938 if (create_worker(pool) || !need_to_create_worker(pool)) 3057 break; 1939 break; 3058 1940 3059 schedule_timeout_interruptibl 1941 schedule_timeout_interruptible(CREATE_COOLDOWN); 3060 1942 3061 if (!need_to_create_worker(po 1943 if (!need_to_create_worker(pool)) 3062 break; 1944 break; 3063 } 1945 } 3064 1946 3065 del_timer_sync(&pool->mayday_timer); 1947 del_timer_sync(&pool->mayday_timer); 3066 raw_spin_lock_irq(&pool->lock); !! 1948 spin_lock_irq(&pool->lock); 3067 /* 1949 /* 3068 * This is necessary even after a new 1950 * This is necessary even after a new worker was just successfully 3069 * created as @pool->lock was dropped 1951 * created as @pool->lock was dropped and the new worker might have 3070 * already become busy. 1952 * already become busy. 3071 */ 1953 */ 3072 if (need_to_create_worker(pool)) 1954 if (need_to_create_worker(pool)) 3073 goto restart; 1955 goto restart; 3074 } 1956 } 3075 1957 3076 /** 1958 /** 3077 * manage_workers - manage worker pool 1959 * manage_workers - manage worker pool 3078 * @worker: self 1960 * @worker: self 3079 * 1961 * 3080 * Assume the manager role and manage the wor 1962 * Assume the manager role and manage the worker pool @worker belongs 3081 * to. At any given time, there can be only 1963 * to. At any given time, there can be only zero or one manager per 3082 * pool. The exclusion is handled automatica 1964 * pool. The exclusion is handled automatically by this function. 3083 * 1965 * 3084 * The caller can safely start processing wor 1966 * The caller can safely start processing works on false return. On 3085 * true return, it's guaranteed that need_to_ 1967 * true return, it's guaranteed that need_to_create_worker() is false 3086 * and may_start_working() is true. 1968 * and may_start_working() is true. 3087 * 1969 * 3088 * CONTEXT: 1970 * CONTEXT: 3089 * raw_spin_lock_irq(pool->lock) which may be !! 1971 * spin_lock_irq(pool->lock) which may be released and regrabbed 3090 * multiple times. Does GFP_KERNEL allocatio 1972 * multiple times. Does GFP_KERNEL allocations. 3091 * 1973 * 3092 * Return: 1974 * Return: 3093 * %false if the pool doesn't need management 1975 * %false if the pool doesn't need management and the caller can safely 3094 * start processing works, %true if managemen 1976 * start processing works, %true if management function was performed and 3095 * the conditions that the caller verified be 1977 * the conditions that the caller verified before calling the function may 3096 * no longer be true. 1978 * no longer be true. 3097 */ 1979 */ 3098 static bool manage_workers(struct worker *wor 1980 static bool manage_workers(struct worker *worker) 3099 { 1981 { 3100 struct worker_pool *pool = worker->po 1982 struct worker_pool *pool = worker->pool; 3101 1983 3102 if (pool->flags & POOL_MANAGER_ACTIVE 1984 if (pool->flags & POOL_MANAGER_ACTIVE) 3103 return false; 1985 return false; 3104 1986 3105 pool->flags |= POOL_MANAGER_ACTIVE; 1987 pool->flags |= POOL_MANAGER_ACTIVE; 3106 pool->manager = worker; 1988 pool->manager = worker; 3107 1989 3108 maybe_create_worker(pool); 1990 maybe_create_worker(pool); 3109 1991 3110 pool->manager = NULL; 1992 pool->manager = NULL; 3111 pool->flags &= ~POOL_MANAGER_ACTIVE; 1993 pool->flags &= ~POOL_MANAGER_ACTIVE; 3112 rcuwait_wake_up(&manager_wait); !! 1994 wake_up(&wq_manager_wait); 3113 return true; 1995 return true; 3114 } 1996 } 3115 1997 3116 /** 1998 /** 3117 * process_one_work - process single work 1999 * process_one_work - process single work 3118 * @worker: self 2000 * @worker: self 3119 * @work: work to process 2001 * @work: work to process 3120 * 2002 * 3121 * Process @work. This function contains all 2003 * Process @work. This function contains all the logics necessary to 3122 * process a single work including synchroniz 2004 * process a single work including synchronization against and 3123 * interaction with other workers on the same 2005 * interaction with other workers on the same cpu, queueing and 3124 * flushing. As long as context requirement 2006 * flushing. As long as context requirement is met, any worker can 3125 * call this function to process a work. 2007 * call this function to process a work. 3126 * 2008 * 3127 * CONTEXT: 2009 * CONTEXT: 3128 * raw_spin_lock_irq(pool->lock) which is rel !! 2010 * spin_lock_irq(pool->lock) which is released and regrabbed. 3129 */ 2011 */ 3130 static void process_one_work(struct worker *w 2012 static void process_one_work(struct worker *worker, struct work_struct *work) 3131 __releases(&pool->lock) 2013 __releases(&pool->lock) 3132 __acquires(&pool->lock) 2014 __acquires(&pool->lock) 3133 { 2015 { 3134 struct pool_workqueue *pwq = get_work 2016 struct pool_workqueue *pwq = get_work_pwq(work); 3135 struct worker_pool *pool = worker->po 2017 struct worker_pool *pool = worker->pool; 3136 unsigned long work_data; !! 2018 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; 3137 int lockdep_start_depth, rcu_start_de !! 2019 int work_color; 3138 bool bh_draining = pool->flags & POOL !! 2020 struct worker *collision; 3139 #ifdef CONFIG_LOCKDEP 2021 #ifdef CONFIG_LOCKDEP 3140 /* 2022 /* 3141 * It is permissible to free the stru 2023 * It is permissible to free the struct work_struct from 3142 * inside the function that is called 2024 * inside the function that is called from it, this we need to 3143 * take into account for lockdep too. 2025 * take into account for lockdep too. To avoid bogus "held 3144 * lock freed" warnings as well as pr 2026 * lock freed" warnings as well as problems when looking into 3145 * work->lockdep_map, make a copy and 2027 * work->lockdep_map, make a copy and use that here. 3146 */ 2028 */ 3147 struct lockdep_map lockdep_map; 2029 struct lockdep_map lockdep_map; 3148 2030 3149 lockdep_copy_map(&lockdep_map, &work- 2031 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 3150 #endif 2032 #endif 3151 /* ensure we're on the correct CPU */ 2033 /* ensure we're on the correct CPU */ 3152 WARN_ON_ONCE(!(pool->flags & POOL_DIS 2034 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 3153 raw_smp_processor_id() ! 2035 raw_smp_processor_id() != pool->cpu); 3154 2036 >> 2037 /* >> 2038 * A single work shouldn't be executed concurrently by >> 2039 * multiple workers on a single cpu. Check whether anyone is >> 2040 * already processing the work. If so, defer the work to the >> 2041 * currently executing one. >> 2042 */ >> 2043 collision = find_worker_executing_work(pool, work); >> 2044 if (unlikely(collision)) { >> 2045 move_linked_works(work, &collision->scheduled, NULL); >> 2046 return; >> 2047 } >> 2048 3155 /* claim and dequeue */ 2049 /* claim and dequeue */ 3156 debug_work_deactivate(work); 2050 debug_work_deactivate(work); 3157 hash_add(pool->busy_hash, &worker->he 2051 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 3158 worker->current_work = work; 2052 worker->current_work = work; 3159 worker->current_func = work->func; 2053 worker->current_func = work->func; 3160 worker->current_pwq = pwq; 2054 worker->current_pwq = pwq; 3161 if (worker->task) !! 2055 work_color = get_work_color(work); 3162 worker->current_at = worker-> << 3163 work_data = *work_data_bits(work); << 3164 worker->current_color = get_work_colo << 3165 << 3166 /* << 3167 * Record wq name for cmdline and deb << 3168 * overridden through set_worker_desc << 3169 */ << 3170 strscpy(worker->desc, pwq->wq->name, << 3171 2056 3172 list_del_init(&work->entry); 2057 list_del_init(&work->entry); 3173 2058 3174 /* 2059 /* 3175 * CPU intensive works don't particip 2060 * CPU intensive works don't participate in concurrency management. 3176 * They're the scheduler's responsibi 2061 * They're the scheduler's responsibility. This takes @worker out 3177 * of concurrency management and the 2062 * of concurrency management and the next code block will chain 3178 * execution of the pending work item 2063 * execution of the pending work items. 3179 */ 2064 */ 3180 if (unlikely(pwq->wq->flags & WQ_CPU_ !! 2065 if (unlikely(cpu_intensive)) 3181 worker_set_flags(worker, WORK 2066 worker_set_flags(worker, WORKER_CPU_INTENSIVE); 3182 2067 3183 /* 2068 /* 3184 * Kick @pool if necessary. It's alwa !! 2069 * Wake up another worker if necessary. The condition is always 3185 * since nr_running would always be > !! 2070 * false for normal per-cpu workers since nr_running would always 3186 * chain execution of the pending wor !! 2071 * be >= 1 at this point. This is used to chain execution of the 3187 * workers such as the UNBOUND and CP !! 2072 * pending work items for WORKER_NOT_RUNNING workers such as the >> 2073 * UNBOUND and CPU_INTENSIVE ones. 3188 */ 2074 */ 3189 kick_pool(pool); !! 2075 if (need_more_worker(pool)) >> 2076 wake_up_worker(pool); 3190 2077 3191 /* 2078 /* 3192 * Record the last pool and clear PEN 2079 * Record the last pool and clear PENDING which should be the last 3193 * update to @work. Also, do this in 2080 * update to @work. Also, do this inside @pool->lock so that 3194 * PENDING and queued state changes h 2081 * PENDING and queued state changes happen together while IRQ is 3195 * disabled. 2082 * disabled. 3196 */ 2083 */ 3197 set_work_pool_and_clear_pending(work, !! 2084 set_work_pool_and_clear_pending(work, pool->id); 3198 2085 3199 pwq->stats[PWQ_STAT_STARTED]++; !! 2086 spin_unlock_irq(&pool->lock); 3200 raw_spin_unlock_irq(&pool->lock); << 3201 2087 3202 rcu_start_depth = rcu_preempt_depth() !! 2088 lock_map_acquire(&pwq->wq->lockdep_map); 3203 lockdep_start_depth = lockdep_depth(c << 3204 /* see drain_dead_softirq_workfn() */ << 3205 if (!bh_draining) << 3206 lock_map_acquire(&pwq->wq->lo << 3207 lock_map_acquire(&lockdep_map); 2089 lock_map_acquire(&lockdep_map); 3208 /* 2090 /* 3209 * Strictly speaking we should mark t 2091 * Strictly speaking we should mark the invariant state without holding 3210 * any locks, that is, before these t 2092 * any locks, that is, before these two lock_map_acquire()'s. 3211 * 2093 * 3212 * However, that would result in: 2094 * However, that would result in: 3213 * 2095 * 3214 * A(W1) 2096 * A(W1) 3215 * WFC(C) 2097 * WFC(C) 3216 * A(W1) 2098 * A(W1) 3217 * C(C) 2099 * C(C) 3218 * 2100 * 3219 * Which would create W1->C->W1 depen 2101 * Which would create W1->C->W1 dependencies, even though there is no 3220 * actual deadlock possible. There ar 2102 * actual deadlock possible. There are two solutions, using a 3221 * read-recursive acquire on the work 2103 * read-recursive acquire on the work(queue) 'locks', but this will then 3222 * hit the lockdep limitation on recu 2104 * hit the lockdep limitation on recursive locks, or simply discard 3223 * these locks. 2105 * these locks. 3224 * 2106 * 3225 * AFAICT there is no possible deadlo 2107 * AFAICT there is no possible deadlock scenario between the 3226 * flush_work() and complete() primit 2108 * flush_work() and complete() primitives (except for single-threaded 3227 * workqueues), so hiding them isn't 2109 * workqueues), so hiding them isn't a problem. 3228 */ 2110 */ 3229 lockdep_invariant_state(true); 2111 lockdep_invariant_state(true); 3230 trace_workqueue_execute_start(work); 2112 trace_workqueue_execute_start(work); 3231 worker->current_func(work); 2113 worker->current_func(work); 3232 /* 2114 /* 3233 * While we must be careful to not us 2115 * While we must be careful to not use "work" after this, the trace 3234 * point will only record its address 2116 * point will only record its address. 3235 */ 2117 */ 3236 trace_workqueue_execute_end(work, wor !! 2118 trace_workqueue_execute_end(work); 3237 pwq->stats[PWQ_STAT_COMPLETED]++; << 3238 lock_map_release(&lockdep_map); 2119 lock_map_release(&lockdep_map); 3239 if (!bh_draining) !! 2120 lock_map_release(&pwq->wq->lockdep_map); 3240 lock_map_release(&pwq->wq->lo << 3241 2121 3242 if (unlikely((worker->task && in_atom !! 2122 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 3243 lockdep_depth(current) ! !! 2123 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 3244 rcu_preempt_depth() != r !! 2124 " last function: %pf\n", 3245 pr_err("BUG: workqueue leaked !! 2125 current->comm, preempt_count(), task_pid_nr(current), 3246 " preempt=0x%08x l << 3247 current->comm, task_pi << 3248 lockdep_start_depth, l << 3249 rcu_start_depth, rcu_p << 3250 worker->current_func); 2126 worker->current_func); 3251 debug_show_held_locks(current 2127 debug_show_held_locks(current); 3252 dump_stack(); 2128 dump_stack(); 3253 } 2129 } 3254 2130 3255 /* 2131 /* 3256 * The following prevents a kworker f !! 2132 * The following prevents a kworker from hogging CPU on !PREEMPT 3257 * kernels, where a requeueing work i 2133 * kernels, where a requeueing work item waiting for something to 3258 * happen could deadlock with stop_ma 2134 * happen could deadlock with stop_machine as such work item could 3259 * indefinitely requeue itself while 2135 * indefinitely requeue itself while all other CPUs are trapped in 3260 * stop_machine. At the same time, re 2136 * stop_machine. At the same time, report a quiescent RCU state so 3261 * the same condition doesn't freeze 2137 * the same condition doesn't freeze RCU. 3262 */ 2138 */ 3263 if (worker->task) !! 2139 cond_resched(); 3264 cond_resched(); << 3265 2140 3266 raw_spin_lock_irq(&pool->lock); !! 2141 spin_lock_irq(&pool->lock); 3267 2142 3268 /* !! 2143 /* clear cpu intensive status */ 3269 * In addition to %WQ_CPU_INTENSIVE, !! 2144 if (unlikely(cpu_intensive)) 3270 * CPU intensive by wq_worker_tick() !! 2145 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 3271 * wq_cpu_intensive_thresh_us. Clear << 3272 */ << 3273 worker_clr_flags(worker, WORKER_CPU_I << 3274 << 3275 /* tag the worker for identification << 3276 worker->last_func = worker->current_f << 3277 2146 3278 /* we're done with it, release */ 2147 /* we're done with it, release */ 3279 hash_del(&worker->hentry); 2148 hash_del(&worker->hentry); 3280 worker->current_work = NULL; 2149 worker->current_work = NULL; 3281 worker->current_func = NULL; 2150 worker->current_func = NULL; 3282 worker->current_pwq = NULL; 2151 worker->current_pwq = NULL; 3283 worker->current_color = INT_MAX; !! 2152 worker->desc_valid = false; 3284 !! 2153 pwq_dec_nr_in_flight(pwq, work_color); 3285 /* must be the last step, see the fun << 3286 pwq_dec_nr_in_flight(pwq, work_data); << 3287 } 2154 } 3288 2155 3289 /** 2156 /** 3290 * process_scheduled_works - process schedule 2157 * process_scheduled_works - process scheduled works 3291 * @worker: self 2158 * @worker: self 3292 * 2159 * 3293 * Process all scheduled works. Please note 2160 * Process all scheduled works. Please note that the scheduled list 3294 * may change while processing a work, so thi 2161 * may change while processing a work, so this function repeatedly 3295 * fetches a work from the top and executes i 2162 * fetches a work from the top and executes it. 3296 * 2163 * 3297 * CONTEXT: 2164 * CONTEXT: 3298 * raw_spin_lock_irq(pool->lock) which may be !! 2165 * spin_lock_irq(pool->lock) which may be released and regrabbed 3299 * multiple times. 2166 * multiple times. 3300 */ 2167 */ 3301 static void process_scheduled_works(struct wo 2168 static void process_scheduled_works(struct worker *worker) 3302 { 2169 { 3303 struct work_struct *work; !! 2170 while (!list_empty(&worker->scheduled)) { 3304 bool first = true; !! 2171 struct work_struct *work = list_first_entry(&worker->scheduled, 3305 !! 2172 struct work_struct, entry); 3306 while ((work = list_first_entry_or_nu << 3307 << 3308 if (first) { << 3309 worker->pool->watchdo << 3310 first = false; << 3311 } << 3312 process_one_work(worker, work 2173 process_one_work(worker, work); 3313 } 2174 } 3314 } 2175 } 3315 2176 3316 static void set_pf_worker(bool val) << 3317 { << 3318 mutex_lock(&wq_pool_attach_mutex); << 3319 if (val) << 3320 current->flags |= PF_WQ_WORKE << 3321 else << 3322 current->flags &= ~PF_WQ_WORK << 3323 mutex_unlock(&wq_pool_attach_mutex); << 3324 } << 3325 << 3326 /** 2177 /** 3327 * worker_thread - the worker thread function 2178 * worker_thread - the worker thread function 3328 * @__worker: self 2179 * @__worker: self 3329 * 2180 * 3330 * The worker thread function. All workers b 2181 * The worker thread function. All workers belong to a worker_pool - 3331 * either a per-cpu one or dynamic unbound on 2182 * either a per-cpu one or dynamic unbound one. These workers process all 3332 * work items regardless of their specific ta 2183 * work items regardless of their specific target workqueue. The only 3333 * exception is work items which belong to wo 2184 * exception is work items which belong to workqueues with a rescuer which 3334 * will be explained in rescuer_thread(). 2185 * will be explained in rescuer_thread(). 3335 * 2186 * 3336 * Return: 0 2187 * Return: 0 3337 */ 2188 */ 3338 static int worker_thread(void *__worker) 2189 static int worker_thread(void *__worker) 3339 { 2190 { 3340 struct worker *worker = __worker; 2191 struct worker *worker = __worker; 3341 struct worker_pool *pool = worker->po 2192 struct worker_pool *pool = worker->pool; 3342 2193 3343 /* tell the scheduler that this is a 2194 /* tell the scheduler that this is a workqueue worker */ 3344 set_pf_worker(true); !! 2195 worker->task->flags |= PF_WQ_WORKER; 3345 woke_up: 2196 woke_up: 3346 raw_spin_lock_irq(&pool->lock); !! 2197 spin_lock_irq(&pool->lock); 3347 2198 3348 /* am I supposed to die? */ 2199 /* am I supposed to die? */ 3349 if (unlikely(worker->flags & WORKER_D 2200 if (unlikely(worker->flags & WORKER_DIE)) { 3350 raw_spin_unlock_irq(&pool->lo !! 2201 spin_unlock_irq(&pool->lock); 3351 set_pf_worker(false); !! 2202 WARN_ON_ONCE(!list_empty(&worker->entry)); 3352 /* !! 2203 worker->task->flags &= ~PF_WQ_WORKER; 3353 * The worker is dead and PF_ !! 2204 3354 * shouldn't be accessed, res !! 2205 set_task_comm(worker->task, "kworker/dying"); 3355 */ !! 2206 ida_simple_remove(&pool->worker_ida, worker->id); 3356 worker->pool = NULL; !! 2207 worker_detach_from_pool(worker, pool); 3357 ida_free(&pool->worker_ida, w !! 2208 kfree(worker); 3358 return 0; 2209 return 0; 3359 } 2210 } 3360 2211 3361 worker_leave_idle(worker); 2212 worker_leave_idle(worker); 3362 recheck: 2213 recheck: 3363 /* no more worker necessary? */ 2214 /* no more worker necessary? */ 3364 if (!need_more_worker(pool)) 2215 if (!need_more_worker(pool)) 3365 goto sleep; 2216 goto sleep; 3366 2217 3367 /* do we need to manage? */ 2218 /* do we need to manage? */ 3368 if (unlikely(!may_start_working(pool) 2219 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 3369 goto recheck; 2220 goto recheck; 3370 2221 3371 /* 2222 /* 3372 * ->scheduled list can only be fille 2223 * ->scheduled list can only be filled while a worker is 3373 * preparing to process a work or act 2224 * preparing to process a work or actually processing it. 3374 * Make sure nobody diddled with it w 2225 * Make sure nobody diddled with it while I was sleeping. 3375 */ 2226 */ 3376 WARN_ON_ONCE(!list_empty(&worker->sch 2227 WARN_ON_ONCE(!list_empty(&worker->scheduled)); 3377 2228 3378 /* 2229 /* 3379 * Finish PREP stage. We're guarante 2230 * Finish PREP stage. We're guaranteed to have at least one idle 3380 * worker or that someone else has al 2231 * worker or that someone else has already assumed the manager 3381 * role. This is where @worker start 2232 * role. This is where @worker starts participating in concurrency 3382 * management if applicable and concu 2233 * management if applicable and concurrency management is restored 3383 * after being rebound. See rebind_w 2234 * after being rebound. See rebind_workers() for details. 3384 */ 2235 */ 3385 worker_clr_flags(worker, WORKER_PREP 2236 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); 3386 2237 3387 do { 2238 do { 3388 struct work_struct *work = 2239 struct work_struct *work = 3389 list_first_entry(&poo 2240 list_first_entry(&pool->worklist, 3390 stru 2241 struct work_struct, entry); 3391 2242 3392 if (assign_work(work, worker, !! 2243 pool->watchdog_ts = jiffies; >> 2244 >> 2245 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { >> 2246 /* optimization path, not strictly necessary */ >> 2247 process_one_work(worker, work); >> 2248 if (unlikely(!list_empty(&worker->scheduled))) >> 2249 process_scheduled_works(worker); >> 2250 } else { >> 2251 move_linked_works(work, &worker->scheduled, NULL); 3393 process_scheduled_wor 2252 process_scheduled_works(worker); >> 2253 } 3394 } while (keep_working(pool)); 2254 } while (keep_working(pool)); 3395 2255 3396 worker_set_flags(worker, WORKER_PREP) 2256 worker_set_flags(worker, WORKER_PREP); 3397 sleep: 2257 sleep: 3398 /* 2258 /* 3399 * pool->lock is held and there's no 2259 * pool->lock is held and there's no work to process and no need to 3400 * manage, sleep. Workers are woken 2260 * manage, sleep. Workers are woken up only while holding 3401 * pool->lock or from local cpu, so s 2261 * pool->lock or from local cpu, so setting the current state 3402 * before releasing pool->lock is eno 2262 * before releasing pool->lock is enough to prevent losing any 3403 * event. 2263 * event. 3404 */ 2264 */ 3405 worker_enter_idle(worker); 2265 worker_enter_idle(worker); 3406 __set_current_state(TASK_IDLE); 2266 __set_current_state(TASK_IDLE); 3407 raw_spin_unlock_irq(&pool->lock); !! 2267 spin_unlock_irq(&pool->lock); 3408 schedule(); 2268 schedule(); 3409 goto woke_up; 2269 goto woke_up; 3410 } 2270 } 3411 2271 3412 /** 2272 /** 3413 * rescuer_thread - the rescuer thread functi 2273 * rescuer_thread - the rescuer thread function 3414 * @__rescuer: self 2274 * @__rescuer: self 3415 * 2275 * 3416 * Workqueue rescuer thread function. There' 2276 * Workqueue rescuer thread function. There's one rescuer for each 3417 * workqueue which has WQ_MEM_RECLAIM set. 2277 * workqueue which has WQ_MEM_RECLAIM set. 3418 * 2278 * 3419 * Regular work processing on a pool may bloc 2279 * Regular work processing on a pool may block trying to create a new 3420 * worker which uses GFP_KERNEL allocation wh 2280 * worker which uses GFP_KERNEL allocation which has slight chance of 3421 * developing into deadlock if some works cur 2281 * developing into deadlock if some works currently on the same queue 3422 * need to be processed to satisfy the GFP_KE 2282 * need to be processed to satisfy the GFP_KERNEL allocation. This is 3423 * the problem rescuer solves. 2283 * the problem rescuer solves. 3424 * 2284 * 3425 * When such condition is possible, the pool 2285 * When such condition is possible, the pool summons rescuers of all 3426 * workqueues which have works queued on the 2286 * workqueues which have works queued on the pool and let them process 3427 * those works so that forward progress can b 2287 * those works so that forward progress can be guaranteed. 3428 * 2288 * 3429 * This should happen rarely. 2289 * This should happen rarely. 3430 * 2290 * 3431 * Return: 0 2291 * Return: 0 3432 */ 2292 */ 3433 static int rescuer_thread(void *__rescuer) 2293 static int rescuer_thread(void *__rescuer) 3434 { 2294 { 3435 struct worker *rescuer = __rescuer; 2295 struct worker *rescuer = __rescuer; 3436 struct workqueue_struct *wq = rescuer 2296 struct workqueue_struct *wq = rescuer->rescue_wq; >> 2297 struct list_head *scheduled = &rescuer->scheduled; 3437 bool should_stop; 2298 bool should_stop; 3438 2299 3439 set_user_nice(current, RESCUER_NICE_L 2300 set_user_nice(current, RESCUER_NICE_LEVEL); 3440 2301 3441 /* 2302 /* 3442 * Mark rescuer as worker too. As WO 2303 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 3443 * doesn't participate in concurrency 2304 * doesn't participate in concurrency management. 3444 */ 2305 */ 3445 set_pf_worker(true); !! 2306 rescuer->task->flags |= PF_WQ_WORKER; 3446 repeat: 2307 repeat: 3447 set_current_state(TASK_IDLE); 2308 set_current_state(TASK_IDLE); 3448 2309 3449 /* 2310 /* 3450 * By the time the rescuer is request 2311 * By the time the rescuer is requested to stop, the workqueue 3451 * shouldn't have any work pending, b 2312 * shouldn't have any work pending, but @wq->maydays may still have 3452 * pwq(s) queued. This can happen by 2313 * pwq(s) queued. This can happen by non-rescuer workers consuming 3453 * all the work items before the resc 2314 * all the work items before the rescuer got to them. Go through 3454 * @wq->maydays processing before act 2315 * @wq->maydays processing before acting on should_stop so that the 3455 * list is always empty on exit. 2316 * list is always empty on exit. 3456 */ 2317 */ 3457 should_stop = kthread_should_stop(); 2318 should_stop = kthread_should_stop(); 3458 2319 3459 /* see whether any pwq is asking for 2320 /* see whether any pwq is asking for help */ 3460 raw_spin_lock_irq(&wq_mayday_lock); !! 2321 spin_lock_irq(&wq_mayday_lock); 3461 2322 3462 while (!list_empty(&wq->maydays)) { 2323 while (!list_empty(&wq->maydays)) { 3463 struct pool_workqueue *pwq = 2324 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 3464 struc 2325 struct pool_workqueue, mayday_node); 3465 struct worker_pool *pool = pw 2326 struct worker_pool *pool = pwq->pool; 3466 struct work_struct *work, *n; 2327 struct work_struct *work, *n; >> 2328 bool first = true; 3467 2329 3468 __set_current_state(TASK_RUNN 2330 __set_current_state(TASK_RUNNING); 3469 list_del_init(&pwq->mayday_no 2331 list_del_init(&pwq->mayday_node); 3470 2332 3471 raw_spin_unlock_irq(&wq_mayda !! 2333 spin_unlock_irq(&wq_mayday_lock); 3472 2334 3473 worker_attach_to_pool(rescuer 2335 worker_attach_to_pool(rescuer, pool); 3474 2336 3475 raw_spin_lock_irq(&pool->lock !! 2337 spin_lock_irq(&pool->lock); >> 2338 rescuer->pool = pool; 3476 2339 3477 /* 2340 /* 3478 * Slurp in all works issued 2341 * Slurp in all works issued via this workqueue and 3479 * process'em. 2342 * process'em. 3480 */ 2343 */ 3481 WARN_ON_ONCE(!list_empty(&res !! 2344 WARN_ON_ONCE(!list_empty(scheduled)); 3482 list_for_each_entry_safe(work 2345 list_for_each_entry_safe(work, n, &pool->worklist, entry) { 3483 if (get_work_pwq(work !! 2346 if (get_work_pwq(work) == pwq) { 3484 assign_work(work, !! 2347 if (first) 3485 pwq->stats[PW !! 2348 pool->watchdog_ts = jiffies; >> 2349 move_linked_works(work, scheduled, &n); >> 2350 } >> 2351 first = false; 3486 } 2352 } 3487 2353 3488 if (!list_empty(&rescuer->sch !! 2354 if (!list_empty(scheduled)) { 3489 process_scheduled_wor 2355 process_scheduled_works(rescuer); 3490 2356 3491 /* 2357 /* 3492 * The above executio 2358 * The above execution of rescued work items could 3493 * have created more 2359 * have created more to rescue through 3494 * pwq_activate_first !! 2360 * pwq_activate_first_delayed() or chained 3495 * queueing. Let's p 2361 * queueing. Let's put @pwq back on mayday list so 3496 * that such back-to- 2362 * that such back-to-back work items, which may be 3497 * being used to reli 2363 * being used to relieve memory pressure, don't 3498 * incur MAYDAY_INTER 2364 * incur MAYDAY_INTERVAL delay inbetween. 3499 */ 2365 */ 3500 if (pwq->nr_active && !! 2366 if (need_to_create_worker(pool)) { 3501 raw_spin_lock !! 2367 spin_lock(&wq_mayday_lock); 3502 /* !! 2368 get_pwq(pwq); 3503 * Queue iff !! 2369 list_move_tail(&pwq->mayday_node, &wq->maydays); 3504 * and somebo !! 2370 spin_unlock(&wq_mayday_lock); 3505 */ << 3506 if (wq->rescu << 3507 get_p << 3508 list_ << 3509 } << 3510 raw_spin_unlo << 3511 } 2371 } 3512 } 2372 } 3513 2373 3514 /* 2374 /* 3515 * Put the reference grabbed 2375 * Put the reference grabbed by send_mayday(). @pool won't 3516 * go away while we're still 2376 * go away while we're still attached to it. 3517 */ 2377 */ 3518 put_pwq(pwq); 2378 put_pwq(pwq); 3519 2379 3520 /* 2380 /* 3521 * Leave this pool. Notify re !! 2381 * Leave this pool. If need_more_worker() is %true, notify a 3522 * with 0 concurrency and sta !! 2382 * regular worker; otherwise, we end up with 0 concurrency >> 2383 * and stalling the execution. 3523 */ 2384 */ 3524 kick_pool(pool); !! 2385 if (need_more_worker(pool)) >> 2386 wake_up_worker(pool); 3525 2387 3526 raw_spin_unlock_irq(&pool->lo !! 2388 rescuer->pool = NULL; >> 2389 spin_unlock_irq(&pool->lock); 3527 2390 3528 worker_detach_from_pool(rescu !! 2391 worker_detach_from_pool(rescuer, pool); 3529 2392 3530 raw_spin_lock_irq(&wq_mayday_ !! 2393 spin_lock_irq(&wq_mayday_lock); 3531 } 2394 } 3532 2395 3533 raw_spin_unlock_irq(&wq_mayday_lock); !! 2396 spin_unlock_irq(&wq_mayday_lock); 3534 2397 3535 if (should_stop) { 2398 if (should_stop) { 3536 __set_current_state(TASK_RUNN 2399 __set_current_state(TASK_RUNNING); 3537 set_pf_worker(false); !! 2400 rescuer->task->flags &= ~PF_WQ_WORKER; 3538 return 0; 2401 return 0; 3539 } 2402 } 3540 2403 3541 /* rescuers should never participate 2404 /* rescuers should never participate in concurrency management */ 3542 WARN_ON_ONCE(!(rescuer->flags & WORKE 2405 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 3543 schedule(); 2406 schedule(); 3544 goto repeat; 2407 goto repeat; 3545 } 2408 } 3546 2409 3547 static void bh_worker(struct worker *worker) << 3548 { << 3549 struct worker_pool *pool = worker->po << 3550 int nr_restarts = BH_WORKER_RESTARTS; << 3551 unsigned long end = jiffies + BH_WORK << 3552 << 3553 raw_spin_lock_irq(&pool->lock); << 3554 worker_leave_idle(worker); << 3555 << 3556 /* << 3557 * This function follows the structur << 3558 * explanations on each step. << 3559 */ << 3560 if (!need_more_worker(pool)) << 3561 goto done; << 3562 << 3563 WARN_ON_ONCE(!list_empty(&worker->sch << 3564 worker_clr_flags(worker, WORKER_PREP << 3565 << 3566 do { << 3567 struct work_struct *work = << 3568 list_first_entry(&poo << 3569 stru << 3570 << 3571 if (assign_work(work, worker, << 3572 process_scheduled_wor << 3573 } while (keep_working(pool) && << 3574 --nr_restarts && time_before << 3575 << 3576 worker_set_flags(worker, WORKER_PREP) << 3577 done: << 3578 worker_enter_idle(worker); << 3579 kick_pool(pool); << 3580 raw_spin_unlock_irq(&pool->lock); << 3581 } << 3582 << 3583 /* << 3584 * TODO: Convert all tasklet users to workque << 3585 * << 3586 * This is currently called from tasklet[_hi] << 3587 * whenever there are tasklets to run. Let's << 3588 * queued. Once conversion from tasklet is co << 3589 * can be dropped. << 3590 * << 3591 * After full conversion, we'll add worker->s << 3592 * softirq action and obtain the worker point << 3593 */ << 3594 void workqueue_softirq_action(bool highpri) << 3595 { << 3596 struct worker_pool *pool = << 3597 &per_cpu(bh_worker_pools, smp << 3598 if (need_more_worker(pool)) << 3599 bh_worker(list_first_entry(&p << 3600 } << 3601 << 3602 struct wq_drain_dead_softirq_work { << 3603 struct work_struct work; << 3604 struct worker_pool *pool; << 3605 struct completion done; << 3606 }; << 3607 << 3608 static void drain_dead_softirq_workfn(struct << 3609 { << 3610 struct wq_drain_dead_softirq_work *de << 3611 container_of(work, struct wq_ << 3612 struct worker_pool *pool = dead_work- << 3613 bool repeat; << 3614 << 3615 /* << 3616 * @pool's CPU is dead and we want to << 3617 * items from this BH work item which << 3618 * its CPU is dead, @pool can't be ki << 3619 * will be nested, a lockdep annotati << 3620 * @pool with %POOL_BH_DRAINING for t << 3621 */ << 3622 raw_spin_lock_irq(&pool->lock); << 3623 pool->flags |= POOL_BH_DRAINING; << 3624 raw_spin_unlock_irq(&pool->lock); << 3625 << 3626 bh_worker(list_first_entry(&pool->wor << 3627 << 3628 raw_spin_lock_irq(&pool->lock); << 3629 pool->flags &= ~POOL_BH_DRAINING; << 3630 repeat = need_more_worker(pool); << 3631 raw_spin_unlock_irq(&pool->lock); << 3632 << 3633 /* << 3634 * bh_worker() might hit consecutive << 3635 * still are pending work items, resc << 3636 * don't hog this CPU's BH. << 3637 */ << 3638 if (repeat) { << 3639 if (pool->attrs->nice == HIGH << 3640 queue_work(system_bh_ << 3641 else << 3642 queue_work(system_bh_ << 3643 } else { << 3644 complete(&dead_work->done); << 3645 } << 3646 } << 3647 << 3648 /* << 3649 * @cpu is dead. Drain the remaining BH work << 3650 * possible to allocate dead_work per CPU and << 3651 * have to worry about draining overlapping w << 3652 * nesting (one CPU's dead_work queued on ano << 3653 * on). Let's keep it simple and drain them s << 3654 * items which shouldn't be requeued on the s << 3655 */ << 3656 void workqueue_softirq_dead(unsigned int cpu) << 3657 { << 3658 int i; << 3659 << 3660 for (i = 0; i < NR_STD_WORKER_POOLS; << 3661 struct worker_pool *pool = &p << 3662 struct wq_drain_dead_softirq_ << 3663 << 3664 if (!need_more_worker(pool)) << 3665 continue; << 3666 << 3667 INIT_WORK_ONSTACK(&dead_work. << 3668 dead_work.pool = pool; << 3669 init_completion(&dead_work.do << 3670 << 3671 if (pool->attrs->nice == HIGH << 3672 queue_work(system_bh_ << 3673 else << 3674 queue_work(system_bh_ << 3675 << 3676 wait_for_completion(&dead_wor << 3677 destroy_work_on_stack(&dead_w << 3678 } << 3679 } << 3680 << 3681 /** 2410 /** 3682 * check_flush_dependency - check for flush d 2411 * check_flush_dependency - check for flush dependency sanity 3683 * @target_wq: workqueue being flushed 2412 * @target_wq: workqueue being flushed 3684 * @target_work: work item being flushed (NUL 2413 * @target_work: work item being flushed (NULL for workqueue flushes) 3685 * 2414 * 3686 * %current is trying to flush the whole @tar 2415 * %current is trying to flush the whole @target_wq or @target_work on it. 3687 * If @target_wq doesn't have %WQ_MEM_RECLAIM 2416 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not 3688 * reclaiming memory or running on a workqueu 2417 * reclaiming memory or running on a workqueue which doesn't have 3689 * %WQ_MEM_RECLAIM as that can break forward- 2418 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to 3690 * a deadlock. 2419 * a deadlock. 3691 */ 2420 */ 3692 static void check_flush_dependency(struct wor 2421 static void check_flush_dependency(struct workqueue_struct *target_wq, 3693 struct wor 2422 struct work_struct *target_work) 3694 { 2423 { 3695 work_func_t target_func = target_work 2424 work_func_t target_func = target_work ? target_work->func : NULL; 3696 struct worker *worker; 2425 struct worker *worker; 3697 2426 3698 if (target_wq->flags & WQ_MEM_RECLAIM 2427 if (target_wq->flags & WQ_MEM_RECLAIM) 3699 return; 2428 return; 3700 2429 3701 worker = current_wq_worker(); 2430 worker = current_wq_worker(); 3702 2431 3703 WARN_ONCE(current->flags & PF_MEMALLO 2432 WARN_ONCE(current->flags & PF_MEMALLOC, 3704 "workqueue: PF_MEMALLOC tas !! 2433 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf", 3705 current->pid, current->comm 2434 current->pid, current->comm, target_wq->name, target_func); 3706 WARN_ONCE(worker && ((worker->current 2435 WARN_ONCE(worker && ((worker->current_pwq->wq->flags & 3707 (WQ_MEM_RECLAIM 2436 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), 3708 "workqueue: WQ_MEM_RECLAIM !! 2437 "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf", 3709 worker->current_pwq->wq->na 2438 worker->current_pwq->wq->name, worker->current_func, 3710 target_wq->name, target_fun 2439 target_wq->name, target_func); 3711 } 2440 } 3712 2441 3713 struct wq_barrier { 2442 struct wq_barrier { 3714 struct work_struct work; 2443 struct work_struct work; 3715 struct completion done; 2444 struct completion done; 3716 struct task_struct *task; /* pu 2445 struct task_struct *task; /* purely informational */ 3717 }; 2446 }; 3718 2447 3719 static void wq_barrier_func(struct work_struc 2448 static void wq_barrier_func(struct work_struct *work) 3720 { 2449 { 3721 struct wq_barrier *barr = container_o 2450 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 3722 complete(&barr->done); 2451 complete(&barr->done); 3723 } 2452 } 3724 2453 3725 /** 2454 /** 3726 * insert_wq_barrier - insert a barrier work 2455 * insert_wq_barrier - insert a barrier work 3727 * @pwq: pwq to insert barrier into 2456 * @pwq: pwq to insert barrier into 3728 * @barr: wq_barrier to insert 2457 * @barr: wq_barrier to insert 3729 * @target: target work to attach @barr to 2458 * @target: target work to attach @barr to 3730 * @worker: worker currently executing @targe 2459 * @worker: worker currently executing @target, NULL if @target is not executing 3731 * 2460 * 3732 * @barr is linked to @target such that @barr 2461 * @barr is linked to @target such that @barr is completed only after 3733 * @target finishes execution. Please note t 2462 * @target finishes execution. Please note that the ordering 3734 * guarantee is observed only with respect to 2463 * guarantee is observed only with respect to @target and on the local 3735 * cpu. 2464 * cpu. 3736 * 2465 * 3737 * Currently, a queued barrier can't be cance 2466 * Currently, a queued barrier can't be canceled. This is because 3738 * try_to_grab_pending() can't determine whet 2467 * try_to_grab_pending() can't determine whether the work to be 3739 * grabbed is at the head of the queue and th 2468 * grabbed is at the head of the queue and thus can't clear LINKED 3740 * flag of the previous work while there must 2469 * flag of the previous work while there must be a valid next work 3741 * after a work with LINKED flag set. 2470 * after a work with LINKED flag set. 3742 * 2471 * 3743 * Note that when @worker is non-NULL, @targe 2472 * Note that when @worker is non-NULL, @target may be modified 3744 * underneath us, so we can't reliably determ 2473 * underneath us, so we can't reliably determine pwq from @target. 3745 * 2474 * 3746 * CONTEXT: 2475 * CONTEXT: 3747 * raw_spin_lock_irq(pool->lock). !! 2476 * spin_lock_irq(pool->lock). 3748 */ 2477 */ 3749 static void insert_wq_barrier(struct pool_wor 2478 static void insert_wq_barrier(struct pool_workqueue *pwq, 3750 struct wq_barri 2479 struct wq_barrier *barr, 3751 struct work_str 2480 struct work_struct *target, struct worker *worker) 3752 { 2481 { 3753 static __maybe_unused struct lock_cla << 3754 unsigned int work_flags = 0; << 3755 unsigned int work_color; << 3756 struct list_head *head; 2482 struct list_head *head; >> 2483 unsigned int linked = 0; 3757 2484 3758 /* 2485 /* 3759 * debugobject calls are safe here ev 2486 * debugobject calls are safe here even with pool->lock locked 3760 * as we know for sure that this will 2487 * as we know for sure that this will not trigger any of the 3761 * checks and call back into the fixu 2488 * checks and call back into the fixup functions where we 3762 * might deadlock. 2489 * might deadlock. 3763 * << 3764 * BH and threaded workqueues need se << 3765 * spuriously triggering "inconsisten << 3766 * usage". << 3767 */ 2490 */ 3768 INIT_WORK_ONSTACK_KEY(&barr->work, wq !! 2491 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 3769 (pwq->wq->flags << 3770 __set_bit(WORK_STRUCT_PENDING_BIT, wo 2492 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 3771 2493 3772 init_completion_map(&barr->done, &tar 2494 init_completion_map(&barr->done, &target->lockdep_map); 3773 2495 3774 barr->task = current; 2496 barr->task = current; 3775 2497 3776 /* The barrier work item does not par << 3777 work_flags |= WORK_STRUCT_INACTIVE; << 3778 << 3779 /* 2498 /* 3780 * If @target is currently being exec 2499 * If @target is currently being executed, schedule the 3781 * barrier to the worker; otherwise, 2500 * barrier to the worker; otherwise, put it after @target. 3782 */ 2501 */ 3783 if (worker) { !! 2502 if (worker) 3784 head = worker->scheduled.next 2503 head = worker->scheduled.next; 3785 work_color = worker->current_ !! 2504 else { 3786 } else { << 3787 unsigned long *bits = work_da 2505 unsigned long *bits = work_data_bits(target); 3788 2506 3789 head = target->entry.next; 2507 head = target->entry.next; 3790 /* there can already be other 2508 /* there can already be other linked works, inherit and set */ 3791 work_flags |= *bits & WORK_ST !! 2509 linked = *bits & WORK_STRUCT_LINKED; 3792 work_color = get_work_color(* << 3793 __set_bit(WORK_STRUCT_LINKED_ 2510 __set_bit(WORK_STRUCT_LINKED_BIT, bits); 3794 } 2511 } 3795 2512 3796 pwq->nr_in_flight[work_color]++; !! 2513 debug_work_activate(&barr->work); 3797 work_flags |= work_color_to_flags(wor !! 2514 insert_work(pwq, &barr->work, head, 3798 !! 2515 work_color_to_flags(WORK_NO_COLOR) | linked); 3799 insert_work(pwq, &barr->work, head, w << 3800 } 2516 } 3801 2517 3802 /** 2518 /** 3803 * flush_workqueue_prep_pwqs - prepare pwqs f 2519 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 3804 * @wq: workqueue being flushed 2520 * @wq: workqueue being flushed 3805 * @flush_color: new flush color, < 0 for no- 2521 * @flush_color: new flush color, < 0 for no-op 3806 * @work_color: new work color, < 0 for no-op 2522 * @work_color: new work color, < 0 for no-op 3807 * 2523 * 3808 * Prepare pwqs for workqueue flushing. 2524 * Prepare pwqs for workqueue flushing. 3809 * 2525 * 3810 * If @flush_color is non-negative, flush_col 2526 * If @flush_color is non-negative, flush_color on all pwqs should be 3811 * -1. If no pwq has in-flight commands at t 2527 * -1. If no pwq has in-flight commands at the specified color, all 3812 * pwq->flush_color's stay at -1 and %false i 2528 * pwq->flush_color's stay at -1 and %false is returned. If any pwq 3813 * has in flight commands, its pwq->flush_col 2529 * has in flight commands, its pwq->flush_color is set to 3814 * @flush_color, @wq->nr_pwqs_to_flush is upd 2530 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 3815 * wakeup logic is armed and %true is returne 2531 * wakeup logic is armed and %true is returned. 3816 * 2532 * 3817 * The caller should have initialized @wq->fi 2533 * The caller should have initialized @wq->first_flusher prior to 3818 * calling this function with non-negative @f 2534 * calling this function with non-negative @flush_color. If 3819 * @flush_color is negative, no flush color u 2535 * @flush_color is negative, no flush color update is done and %false 3820 * is returned. 2536 * is returned. 3821 * 2537 * 3822 * If @work_color is non-negative, all pwqs s 2538 * If @work_color is non-negative, all pwqs should have the same 3823 * work_color which is previous to @work_colo 2539 * work_color which is previous to @work_color and all will be 3824 * advanced to @work_color. 2540 * advanced to @work_color. 3825 * 2541 * 3826 * CONTEXT: 2542 * CONTEXT: 3827 * mutex_lock(wq->mutex). 2543 * mutex_lock(wq->mutex). 3828 * 2544 * 3829 * Return: 2545 * Return: 3830 * %true if @flush_color >= 0 and there's som 2546 * %true if @flush_color >= 0 and there's something to flush. %false 3831 * otherwise. 2547 * otherwise. 3832 */ 2548 */ 3833 static bool flush_workqueue_prep_pwqs(struct 2549 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 3834 int flu 2550 int flush_color, int work_color) 3835 { 2551 { 3836 bool wait = false; 2552 bool wait = false; 3837 struct pool_workqueue *pwq; 2553 struct pool_workqueue *pwq; 3838 2554 3839 if (flush_color >= 0) { 2555 if (flush_color >= 0) { 3840 WARN_ON_ONCE(atomic_read(&wq- 2556 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 3841 atomic_set(&wq->nr_pwqs_to_fl 2557 atomic_set(&wq->nr_pwqs_to_flush, 1); 3842 } 2558 } 3843 2559 3844 for_each_pwq(pwq, wq) { 2560 for_each_pwq(pwq, wq) { 3845 struct worker_pool *pool = pw 2561 struct worker_pool *pool = pwq->pool; 3846 2562 3847 raw_spin_lock_irq(&pool->lock !! 2563 spin_lock_irq(&pool->lock); 3848 2564 3849 if (flush_color >= 0) { 2565 if (flush_color >= 0) { 3850 WARN_ON_ONCE(pwq->flu 2566 WARN_ON_ONCE(pwq->flush_color != -1); 3851 2567 3852 if (pwq->nr_in_flight 2568 if (pwq->nr_in_flight[flush_color]) { 3853 pwq->flush_co 2569 pwq->flush_color = flush_color; 3854 atomic_inc(&w 2570 atomic_inc(&wq->nr_pwqs_to_flush); 3855 wait = true; 2571 wait = true; 3856 } 2572 } 3857 } 2573 } 3858 2574 3859 if (work_color >= 0) { 2575 if (work_color >= 0) { 3860 WARN_ON_ONCE(work_col 2576 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 3861 pwq->work_color = wor 2577 pwq->work_color = work_color; 3862 } 2578 } 3863 2579 3864 raw_spin_unlock_irq(&pool->lo !! 2580 spin_unlock_irq(&pool->lock); 3865 } 2581 } 3866 2582 3867 if (flush_color >= 0 && atomic_dec_an 2583 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 3868 complete(&wq->first_flusher-> 2584 complete(&wq->first_flusher->done); 3869 2585 3870 return wait; 2586 return wait; 3871 } 2587 } 3872 2588 3873 static void touch_wq_lockdep_map(struct workq << 3874 { << 3875 #ifdef CONFIG_LOCKDEP << 3876 if (wq->flags & WQ_BH) << 3877 local_bh_disable(); << 3878 << 3879 lock_map_acquire(&wq->lockdep_map); << 3880 lock_map_release(&wq->lockdep_map); << 3881 << 3882 if (wq->flags & WQ_BH) << 3883 local_bh_enable(); << 3884 #endif << 3885 } << 3886 << 3887 static void touch_work_lockdep_map(struct wor << 3888 struct wor << 3889 { << 3890 #ifdef CONFIG_LOCKDEP << 3891 if (wq->flags & WQ_BH) << 3892 local_bh_disable(); << 3893 << 3894 lock_map_acquire(&work->lockdep_map); << 3895 lock_map_release(&work->lockdep_map); << 3896 << 3897 if (wq->flags & WQ_BH) << 3898 local_bh_enable(); << 3899 #endif << 3900 } << 3901 << 3902 /** 2589 /** 3903 * __flush_workqueue - ensure that any schedu !! 2590 * flush_workqueue - ensure that any scheduled work has run to completion. 3904 * @wq: workqueue to flush 2591 * @wq: workqueue to flush 3905 * 2592 * 3906 * This function sleeps until all work items 2593 * This function sleeps until all work items which were queued on entry 3907 * have finished execution, but it is not liv 2594 * have finished execution, but it is not livelocked by new incoming ones. 3908 */ 2595 */ 3909 void __flush_workqueue(struct workqueue_struc !! 2596 void flush_workqueue(struct workqueue_struct *wq) 3910 { 2597 { 3911 struct wq_flusher this_flusher = { 2598 struct wq_flusher this_flusher = { 3912 .list = LIST_HEAD_INIT(this_f 2599 .list = LIST_HEAD_INIT(this_flusher.list), 3913 .flush_color = -1, 2600 .flush_color = -1, 3914 .done = COMPLETION_INITIALIZE 2601 .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), 3915 }; 2602 }; 3916 int next_color; 2603 int next_color; 3917 2604 3918 if (WARN_ON(!wq_online)) 2605 if (WARN_ON(!wq_online)) 3919 return; 2606 return; 3920 2607 3921 touch_wq_lockdep_map(wq); << 3922 << 3923 mutex_lock(&wq->mutex); 2608 mutex_lock(&wq->mutex); 3924 2609 3925 /* 2610 /* 3926 * Start-to-wait phase 2611 * Start-to-wait phase 3927 */ 2612 */ 3928 next_color = work_next_color(wq->work 2613 next_color = work_next_color(wq->work_color); 3929 2614 3930 if (next_color != wq->flush_color) { 2615 if (next_color != wq->flush_color) { 3931 /* 2616 /* 3932 * Color space is not full. 2617 * Color space is not full. The current work_color 3933 * becomes our flush_color an 2618 * becomes our flush_color and work_color is advanced 3934 * by one. 2619 * by one. 3935 */ 2620 */ 3936 WARN_ON_ONCE(!list_empty(&wq- 2621 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 3937 this_flusher.flush_color = wq 2622 this_flusher.flush_color = wq->work_color; 3938 wq->work_color = next_color; 2623 wq->work_color = next_color; 3939 2624 3940 if (!wq->first_flusher) { 2625 if (!wq->first_flusher) { 3941 /* no flush in progre 2626 /* no flush in progress, become the first flusher */ 3942 WARN_ON_ONCE(wq->flus 2627 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3943 2628 3944 wq->first_flusher = & 2629 wq->first_flusher = &this_flusher; 3945 2630 3946 if (!flush_workqueue_ 2631 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 3947 2632 wq->work_color)) { 3948 /* nothing to 2633 /* nothing to flush, done */ 3949 wq->flush_col 2634 wq->flush_color = next_color; 3950 wq->first_flu 2635 wq->first_flusher = NULL; 3951 goto out_unlo 2636 goto out_unlock; 3952 } 2637 } 3953 } else { 2638 } else { 3954 /* wait in queue */ 2639 /* wait in queue */ 3955 WARN_ON_ONCE(wq->flus 2640 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 3956 list_add_tail(&this_f 2641 list_add_tail(&this_flusher.list, &wq->flusher_queue); 3957 flush_workqueue_prep_ 2642 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 3958 } 2643 } 3959 } else { 2644 } else { 3960 /* 2645 /* 3961 * Oops, color space is full, 2646 * Oops, color space is full, wait on overflow queue. 3962 * The next flush completion 2647 * The next flush completion will assign us 3963 * flush_color and transfer t 2648 * flush_color and transfer to flusher_queue. 3964 */ 2649 */ 3965 list_add_tail(&this_flusher.l 2650 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 3966 } 2651 } 3967 2652 3968 check_flush_dependency(wq, NULL); 2653 check_flush_dependency(wq, NULL); 3969 2654 3970 mutex_unlock(&wq->mutex); 2655 mutex_unlock(&wq->mutex); 3971 2656 3972 wait_for_completion(&this_flusher.don 2657 wait_for_completion(&this_flusher.done); 3973 2658 3974 /* 2659 /* 3975 * Wake-up-and-cascade phase 2660 * Wake-up-and-cascade phase 3976 * 2661 * 3977 * First flushers are responsible for 2662 * First flushers are responsible for cascading flushes and 3978 * handling overflow. Non-first flus 2663 * handling overflow. Non-first flushers can simply return. 3979 */ 2664 */ 3980 if (READ_ONCE(wq->first_flusher) != & !! 2665 if (wq->first_flusher != &this_flusher) 3981 return; 2666 return; 3982 2667 3983 mutex_lock(&wq->mutex); 2668 mutex_lock(&wq->mutex); 3984 2669 3985 /* we might have raced, check again w 2670 /* we might have raced, check again with mutex held */ 3986 if (wq->first_flusher != &this_flushe 2671 if (wq->first_flusher != &this_flusher) 3987 goto out_unlock; 2672 goto out_unlock; 3988 2673 3989 WRITE_ONCE(wq->first_flusher, NULL); !! 2674 wq->first_flusher = NULL; 3990 2675 3991 WARN_ON_ONCE(!list_empty(&this_flushe 2676 WARN_ON_ONCE(!list_empty(&this_flusher.list)); 3992 WARN_ON_ONCE(wq->flush_color != this_ 2677 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3993 2678 3994 while (true) { 2679 while (true) { 3995 struct wq_flusher *next, *tmp 2680 struct wq_flusher *next, *tmp; 3996 2681 3997 /* complete all the flushers 2682 /* complete all the flushers sharing the current flush color */ 3998 list_for_each_entry_safe(next 2683 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 3999 if (next->flush_color 2684 if (next->flush_color != wq->flush_color) 4000 break; 2685 break; 4001 list_del_init(&next-> 2686 list_del_init(&next->list); 4002 complete(&next->done) 2687 complete(&next->done); 4003 } 2688 } 4004 2689 4005 WARN_ON_ONCE(!list_empty(&wq- 2690 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 4006 wq->flush_color 2691 wq->flush_color != work_next_color(wq->work_color)); 4007 2692 4008 /* this flush_color is finish 2693 /* this flush_color is finished, advance by one */ 4009 wq->flush_color = work_next_c 2694 wq->flush_color = work_next_color(wq->flush_color); 4010 2695 4011 /* one color has been freed, 2696 /* one color has been freed, handle overflow queue */ 4012 if (!list_empty(&wq->flusher_ 2697 if (!list_empty(&wq->flusher_overflow)) { 4013 /* 2698 /* 4014 * Assign the same co 2699 * Assign the same color to all overflowed 4015 * flushers, advance 2700 * flushers, advance work_color and append to 4016 * flusher_queue. Th 2701 * flusher_queue. This is the start-to-wait 4017 * phase for these ov 2702 * phase for these overflowed flushers. 4018 */ 2703 */ 4019 list_for_each_entry(t 2704 list_for_each_entry(tmp, &wq->flusher_overflow, list) 4020 tmp->flush_co 2705 tmp->flush_color = wq->work_color; 4021 2706 4022 wq->work_color = work 2707 wq->work_color = work_next_color(wq->work_color); 4023 2708 4024 list_splice_tail_init 2709 list_splice_tail_init(&wq->flusher_overflow, 4025 2710 &wq->flusher_queue); 4026 flush_workqueue_prep_ 2711 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 4027 } 2712 } 4028 2713 4029 if (list_empty(&wq->flusher_q 2714 if (list_empty(&wq->flusher_queue)) { 4030 WARN_ON_ONCE(wq->flus 2715 WARN_ON_ONCE(wq->flush_color != wq->work_color); 4031 break; 2716 break; 4032 } 2717 } 4033 2718 4034 /* 2719 /* 4035 * Need to flush more colors. 2720 * Need to flush more colors. Make the next flusher 4036 * the new first flusher and 2721 * the new first flusher and arm pwqs. 4037 */ 2722 */ 4038 WARN_ON_ONCE(wq->flush_color 2723 WARN_ON_ONCE(wq->flush_color == wq->work_color); 4039 WARN_ON_ONCE(wq->flush_color 2724 WARN_ON_ONCE(wq->flush_color != next->flush_color); 4040 2725 4041 list_del_init(&next->list); 2726 list_del_init(&next->list); 4042 wq->first_flusher = next; 2727 wq->first_flusher = next; 4043 2728 4044 if (flush_workqueue_prep_pwqs 2729 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 4045 break; 2730 break; 4046 2731 4047 /* 2732 /* 4048 * Meh... this color is alrea 2733 * Meh... this color is already done, clear first 4049 * flusher and repeat cascadi 2734 * flusher and repeat cascading. 4050 */ 2735 */ 4051 wq->first_flusher = NULL; 2736 wq->first_flusher = NULL; 4052 } 2737 } 4053 2738 4054 out_unlock: 2739 out_unlock: 4055 mutex_unlock(&wq->mutex); 2740 mutex_unlock(&wq->mutex); 4056 } 2741 } 4057 EXPORT_SYMBOL(__flush_workqueue); !! 2742 EXPORT_SYMBOL(flush_workqueue); 4058 2743 4059 /** 2744 /** 4060 * drain_workqueue - drain a workqueue 2745 * drain_workqueue - drain a workqueue 4061 * @wq: workqueue to drain 2746 * @wq: workqueue to drain 4062 * 2747 * 4063 * Wait until the workqueue becomes empty. W 2748 * Wait until the workqueue becomes empty. While draining is in progress, 4064 * only chain queueing is allowed. IOW, only 2749 * only chain queueing is allowed. IOW, only currently pending or running 4065 * work items on @wq can queue further work i 2750 * work items on @wq can queue further work items on it. @wq is flushed 4066 * repeatedly until it becomes empty. The nu 2751 * repeatedly until it becomes empty. The number of flushing is determined 4067 * by the depth of chaining and should be rel 2752 * by the depth of chaining and should be relatively short. Whine if it 4068 * takes too long. 2753 * takes too long. 4069 */ 2754 */ 4070 void drain_workqueue(struct workqueue_struct 2755 void drain_workqueue(struct workqueue_struct *wq) 4071 { 2756 { 4072 unsigned int flush_cnt = 0; 2757 unsigned int flush_cnt = 0; 4073 struct pool_workqueue *pwq; 2758 struct pool_workqueue *pwq; 4074 2759 4075 /* 2760 /* 4076 * __queue_work() needs to test wheth 2761 * __queue_work() needs to test whether there are drainers, is much 4077 * hotter than drain_workqueue() and 2762 * hotter than drain_workqueue() and already looks at @wq->flags. 4078 * Use __WQ_DRAINING so that queue do 2763 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 4079 */ 2764 */ 4080 mutex_lock(&wq->mutex); 2765 mutex_lock(&wq->mutex); 4081 if (!wq->nr_drainers++) 2766 if (!wq->nr_drainers++) 4082 wq->flags |= __WQ_DRAINING; 2767 wq->flags |= __WQ_DRAINING; 4083 mutex_unlock(&wq->mutex); 2768 mutex_unlock(&wq->mutex); 4084 reflush: 2769 reflush: 4085 __flush_workqueue(wq); !! 2770 flush_workqueue(wq); 4086 2771 4087 mutex_lock(&wq->mutex); 2772 mutex_lock(&wq->mutex); 4088 2773 4089 for_each_pwq(pwq, wq) { 2774 for_each_pwq(pwq, wq) { 4090 bool drained; 2775 bool drained; 4091 2776 4092 raw_spin_lock_irq(&pwq->pool- !! 2777 spin_lock_irq(&pwq->pool->lock); 4093 drained = pwq_is_empty(pwq); !! 2778 drained = !pwq->nr_active && list_empty(&pwq->delayed_works); 4094 raw_spin_unlock_irq(&pwq->poo !! 2779 spin_unlock_irq(&pwq->pool->lock); 4095 2780 4096 if (drained) 2781 if (drained) 4097 continue; 2782 continue; 4098 2783 4099 if (++flush_cnt == 10 || 2784 if (++flush_cnt == 10 || 4100 (flush_cnt % 100 == 0 && 2785 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 4101 pr_warn("workqueue %s !! 2786 pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n", 4102 wq->name, __f !! 2787 wq->name, flush_cnt); 4103 2788 4104 mutex_unlock(&wq->mutex); 2789 mutex_unlock(&wq->mutex); 4105 goto reflush; 2790 goto reflush; 4106 } 2791 } 4107 2792 4108 if (!--wq->nr_drainers) 2793 if (!--wq->nr_drainers) 4109 wq->flags &= ~__WQ_DRAINING; 2794 wq->flags &= ~__WQ_DRAINING; 4110 mutex_unlock(&wq->mutex); 2795 mutex_unlock(&wq->mutex); 4111 } 2796 } 4112 EXPORT_SYMBOL_GPL(drain_workqueue); 2797 EXPORT_SYMBOL_GPL(drain_workqueue); 4113 2798 4114 static bool start_flush_work(struct work_stru !! 2799 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) 4115 bool from_cancel << 4116 { 2800 { 4117 struct worker *worker = NULL; 2801 struct worker *worker = NULL; 4118 struct worker_pool *pool; 2802 struct worker_pool *pool; 4119 struct pool_workqueue *pwq; 2803 struct pool_workqueue *pwq; 4120 struct workqueue_struct *wq; << 4121 2804 4122 rcu_read_lock(); !! 2805 might_sleep(); >> 2806 >> 2807 local_irq_disable(); 4123 pool = get_work_pool(work); 2808 pool = get_work_pool(work); 4124 if (!pool) { 2809 if (!pool) { 4125 rcu_read_unlock(); !! 2810 local_irq_enable(); 4126 return false; 2811 return false; 4127 } 2812 } 4128 2813 4129 raw_spin_lock_irq(&pool->lock); !! 2814 spin_lock(&pool->lock); 4130 /* see the comment in try_to_grab_pen 2815 /* see the comment in try_to_grab_pending() with the same code */ 4131 pwq = get_work_pwq(work); 2816 pwq = get_work_pwq(work); 4132 if (pwq) { 2817 if (pwq) { 4133 if (unlikely(pwq->pool != poo 2818 if (unlikely(pwq->pool != pool)) 4134 goto already_gone; 2819 goto already_gone; 4135 } else { 2820 } else { 4136 worker = find_worker_executin 2821 worker = find_worker_executing_work(pool, work); 4137 if (!worker) 2822 if (!worker) 4138 goto already_gone; 2823 goto already_gone; 4139 pwq = worker->current_pwq; 2824 pwq = worker->current_pwq; 4140 } 2825 } 4141 2826 4142 wq = pwq->wq; !! 2827 check_flush_dependency(pwq->wq, work); 4143 check_flush_dependency(wq, work); << 4144 2828 4145 insert_wq_barrier(pwq, barr, work, wo 2829 insert_wq_barrier(pwq, barr, work, worker); 4146 raw_spin_unlock_irq(&pool->lock); !! 2830 spin_unlock_irq(&pool->lock); 4147 << 4148 touch_work_lockdep_map(work, wq); << 4149 2831 4150 /* 2832 /* 4151 * Force a lock recursion deadlock wh 2833 * Force a lock recursion deadlock when using flush_work() inside a 4152 * single-threaded or rescuer equippe 2834 * single-threaded or rescuer equipped workqueue. 4153 * 2835 * 4154 * For single threaded workqueues the 2836 * For single threaded workqueues the deadlock happens when the work 4155 * is after the work issuing the flus 2837 * is after the work issuing the flush_work(). For rescuer equipped 4156 * workqueues the deadlock happens wh 2838 * workqueues the deadlock happens when the rescuer stalls, blocking 4157 * forward progress. 2839 * forward progress. 4158 */ 2840 */ 4159 if (!from_cancel && (wq->saved_max_ac !! 2841 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) { 4160 touch_wq_lockdep_map(wq); !! 2842 lock_map_acquire(&pwq->wq->lockdep_map); >> 2843 lock_map_release(&pwq->wq->lockdep_map); >> 2844 } 4161 2845 4162 rcu_read_unlock(); << 4163 return true; 2846 return true; 4164 already_gone: 2847 already_gone: 4165 raw_spin_unlock_irq(&pool->lock); !! 2848 spin_unlock_irq(&pool->lock); 4166 rcu_read_unlock(); << 4167 return false; 2849 return false; 4168 } 2850 } 4169 2851 4170 static bool __flush_work(struct work_struct * !! 2852 /** >> 2853 * flush_work - wait for a work to finish executing the last queueing instance >> 2854 * @work: the work to flush >> 2855 * >> 2856 * Wait until @work has finished execution. @work is guaranteed to be idle >> 2857 * on return if it hasn't been requeued since flush started. >> 2858 * >> 2859 * Return: >> 2860 * %true if flush_work() waited for the work to finish execution, >> 2861 * %false if it was already idle. >> 2862 */ >> 2863 bool flush_work(struct work_struct *work) 4171 { 2864 { 4172 struct wq_barrier barr; 2865 struct wq_barrier barr; 4173 2866 4174 if (WARN_ON(!wq_online)) 2867 if (WARN_ON(!wq_online)) 4175 return false; 2868 return false; 4176 2869 4177 if (WARN_ON(!work->func)) !! 2870 if (start_flush_work(work, &barr)) { >> 2871 wait_for_completion(&barr.done); >> 2872 destroy_work_on_stack(&barr.work); >> 2873 return true; >> 2874 } else { 4178 return false; 2875 return false; >> 2876 } >> 2877 } >> 2878 EXPORT_SYMBOL_GPL(flush_work); 4179 2879 4180 if (!start_flush_work(work, &barr, fr !! 2880 struct cwt_wait { 4181 return false; !! 2881 wait_queue_entry_t wait; >> 2882 struct work_struct *work; >> 2883 }; >> 2884 >> 2885 static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) >> 2886 { >> 2887 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); >> 2888 >> 2889 if (cwait->work != key) >> 2890 return 0; >> 2891 return autoremove_wake_function(wait, mode, sync, key); >> 2892 } >> 2893 >> 2894 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) >> 2895 { >> 2896 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); >> 2897 unsigned long flags; >> 2898 int ret; >> 2899 >> 2900 do { >> 2901 ret = try_to_grab_pending(work, is_dwork, &flags); >> 2902 /* >> 2903 * If someone else is already canceling, wait for it to >> 2904 * finish. flush_work() doesn't work for PREEMPT_NONE >> 2905 * because we may get scheduled between @work's completion >> 2906 * and the other canceling task resuming and clearing >> 2907 * CANCELING - flush_work() will return false immediately >> 2908 * as @work is no longer busy, try_to_grab_pending() will >> 2909 * return -ENOENT as @work is still being canceled and the >> 2910 * other canceling task won't be able to clear CANCELING as >> 2911 * we're hogging the CPU. >> 2912 * >> 2913 * Let's wait for completion using a waitqueue. As this >> 2914 * may lead to the thundering herd problem, use a custom >> 2915 * wake function which matches @work along with exclusive >> 2916 * wait and wakeup. >> 2917 */ >> 2918 if (unlikely(ret == -ENOENT)) { >> 2919 struct cwt_wait cwait; >> 2920 >> 2921 init_wait(&cwait.wait); >> 2922 cwait.wait.func = cwt_wakefn; >> 2923 cwait.work = work; >> 2924 >> 2925 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, >> 2926 TASK_UNINTERRUPTIBLE); >> 2927 if (work_is_canceling(work)) >> 2928 schedule(); >> 2929 finish_wait(&cancel_waitq, &cwait.wait); >> 2930 } >> 2931 } while (unlikely(ret < 0)); >> 2932 >> 2933 /* tell other tasks trying to grab @work to back off */ >> 2934 mark_work_canceling(work); >> 2935 local_irq_restore(flags); 4182 2936 4183 /* 2937 /* 4184 * start_flush_work() returned %true. !! 2938 * This allows canceling during early boot. We know that @work 4185 * that @work must have been executin !! 2939 * isn't executing. 4186 * can't currently be queued. Its dat << 4187 * was queued on a BH workqueue, we a << 4188 * BH context and thus can be busy-wa << 4189 */ 2940 */ 4190 if (from_cancel) { !! 2941 if (wq_online) 4191 unsigned long data = *work_da !! 2942 flush_work(work); 4192 2943 4193 if (!WARN_ON_ONCE(data & WORK !! 2944 clear_work_data(work); 4194 (data & WORK_OFFQ_BH)) { << 4195 /* << 4196 * On RT, prevent a l << 4197 * soft interrupt pro << 4198 * running by keeping << 4199 * runs on a differen << 4200 * than doing the BH << 4201 * This is copied fro << 4202 * kernel/softirq.c:: << 4203 */ << 4204 while (!try_wait_for_ << 4205 if (IS_ENABLE << 4206 local << 4207 local << 4208 } else { << 4209 cpu_r << 4210 } << 4211 } << 4212 goto out_destroy; << 4213 } << 4214 } << 4215 2945 4216 wait_for_completion(&barr.done); !! 2946 /* >> 2947 * Paired with prepare_to_wait() above so that either >> 2948 * waitqueue_active() is visible here or !work_is_canceling() is >> 2949 * visible there. >> 2950 */ >> 2951 smp_mb(); >> 2952 if (waitqueue_active(&cancel_waitq)) >> 2953 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); 4217 2954 4218 out_destroy: !! 2955 return ret; 4219 destroy_work_on_stack(&barr.work); << 4220 return true; << 4221 } 2956 } 4222 2957 4223 /** 2958 /** 4224 * flush_work - wait for a work to finish exe !! 2959 * cancel_work_sync - cancel a work and wait for it to finish 4225 * @work: the work to flush !! 2960 * @work: the work to cancel 4226 * 2961 * 4227 * Wait until @work has finished execution. !! 2962 * Cancel @work and wait for its execution to finish. This function 4228 * on return if it hasn't been requeued since !! 2963 * can be used even if the work re-queues itself or migrates to >> 2964 * another workqueue. On return from this function, @work is >> 2965 * guaranteed to be not pending or executing on any CPU. >> 2966 * >> 2967 * cancel_work_sync(&delayed_work->work) must not be used for >> 2968 * delayed_work's. Use cancel_delayed_work_sync() instead. >> 2969 * >> 2970 * The caller must ensure that the workqueue on which @work was last >> 2971 * queued can't be destroyed before this function returns. 4229 * 2972 * 4230 * Return: 2973 * Return: 4231 * %true if flush_work() waited for the work !! 2974 * %true if @work was pending, %false otherwise. 4232 * %false if it was already idle. << 4233 */ 2975 */ 4234 bool flush_work(struct work_struct *work) !! 2976 bool cancel_work_sync(struct work_struct *work) 4235 { 2977 { 4236 might_sleep(); !! 2978 return __cancel_work_timer(work, false); 4237 return __flush_work(work, false); << 4238 } 2979 } 4239 EXPORT_SYMBOL_GPL(flush_work); !! 2980 EXPORT_SYMBOL_GPL(cancel_work_sync); 4240 2981 4241 /** 2982 /** 4242 * flush_delayed_work - wait for a dwork to f 2983 * flush_delayed_work - wait for a dwork to finish executing the last queueing 4243 * @dwork: the delayed work to flush 2984 * @dwork: the delayed work to flush 4244 * 2985 * 4245 * Delayed timer is cancelled and the pending 2986 * Delayed timer is cancelled and the pending work is queued for 4246 * immediate execution. Like flush_work(), t 2987 * immediate execution. Like flush_work(), this function only 4247 * considers the last queueing instance of @d 2988 * considers the last queueing instance of @dwork. 4248 * 2989 * 4249 * Return: 2990 * Return: 4250 * %true if flush_work() waited for the work 2991 * %true if flush_work() waited for the work to finish execution, 4251 * %false if it was already idle. 2992 * %false if it was already idle. 4252 */ 2993 */ 4253 bool flush_delayed_work(struct delayed_work * 2994 bool flush_delayed_work(struct delayed_work *dwork) 4254 { 2995 { 4255 local_irq_disable(); 2996 local_irq_disable(); 4256 if (del_timer_sync(&dwork->timer)) 2997 if (del_timer_sync(&dwork->timer)) 4257 __queue_work(dwork->cpu, dwor 2998 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 4258 local_irq_enable(); 2999 local_irq_enable(); 4259 return flush_work(&dwork->work); 3000 return flush_work(&dwork->work); 4260 } 3001 } 4261 EXPORT_SYMBOL(flush_delayed_work); 3002 EXPORT_SYMBOL(flush_delayed_work); 4262 3003 4263 /** !! 3004 static bool __cancel_work(struct work_struct *work, bool is_dwork) 4264 * flush_rcu_work - wait for a rwork to finis << 4265 * @rwork: the rcu work to flush << 4266 * << 4267 * Return: << 4268 * %true if flush_rcu_work() waited for the w << 4269 * %false if it was already idle. << 4270 */ << 4271 bool flush_rcu_work(struct rcu_work *rwork) << 4272 { << 4273 if (test_bit(WORK_STRUCT_PENDING_BIT, << 4274 rcu_barrier(); << 4275 flush_work(&rwork->work); << 4276 return true; << 4277 } else { << 4278 return flush_work(&rwork->wor << 4279 } << 4280 } << 4281 EXPORT_SYMBOL(flush_rcu_work); << 4282 << 4283 static void work_offqd_disable(struct work_of << 4284 { 3005 { 4285 const unsigned long max = (1lu << WOR !! 3006 unsigned long flags; 4286 << 4287 if (likely(offqd->disable < max)) << 4288 offqd->disable++; << 4289 else << 4290 WARN_ONCE(true, "workqueue: w << 4291 } << 4292 << 4293 static void work_offqd_enable(struct work_off << 4294 { << 4295 if (likely(offqd->disable > 0)) << 4296 offqd->disable--; << 4297 else << 4298 WARN_ONCE(true, "workqueue: w << 4299 } << 4300 << 4301 static bool __cancel_work(struct work_struct << 4302 { << 4303 struct work_offq_data offqd; << 4304 unsigned long irq_flags; << 4305 int ret; 3007 int ret; 4306 3008 4307 ret = work_grab_pending(work, cflags, !! 3009 do { 4308 !! 3010 ret = try_to_grab_pending(work, is_dwork, &flags); 4309 work_offqd_unpack(&offqd, *work_data_ !! 3011 } while (unlikely(ret == -EAGAIN)); 4310 << 4311 if (cflags & WORK_CANCEL_DISABLE) << 4312 work_offqd_disable(&offqd); << 4313 << 4314 set_work_pool_and_clear_pending(work, << 4315 work_ << 4316 local_irq_restore(irq_flags); << 4317 return ret; << 4318 } << 4319 << 4320 static bool __cancel_work_sync(struct work_st << 4321 { << 4322 bool ret; << 4323 << 4324 ret = __cancel_work(work, cflags | WO << 4325 << 4326 if (*work_data_bits(work) & WORK_OFFQ << 4327 WARN_ON_ONCE(in_hardirq()); << 4328 else << 4329 might_sleep(); << 4330 << 4331 /* << 4332 * Skip __flush_work() during early b << 4333 * executing. This allows canceling d << 4334 */ << 4335 if (wq_online) << 4336 __flush_work(work, true); << 4337 3012 4338 if (!(cflags & WORK_CANCEL_DISABLE)) !! 3013 if (unlikely(ret < 0)) 4339 enable_work(work); !! 3014 return false; 4340 3015 >> 3016 set_work_pool_and_clear_pending(work, get_work_pool_id(work)); >> 3017 local_irq_restore(flags); 4341 return ret; 3018 return ret; 4342 } 3019 } 4343 3020 4344 /* << 4345 * See cancel_delayed_work() << 4346 */ << 4347 bool cancel_work(struct work_struct *work) << 4348 { << 4349 return __cancel_work(work, 0); << 4350 } << 4351 EXPORT_SYMBOL(cancel_work); << 4352 << 4353 /** << 4354 * cancel_work_sync - cancel a work and wait << 4355 * @work: the work to cancel << 4356 * << 4357 * Cancel @work and wait for its execution to << 4358 * even if the work re-queues itself or migra << 4359 * from this function, @work is guaranteed to << 4360 * CPU as long as there aren't racing enqueue << 4361 * << 4362 * cancel_work_sync(&delayed_work->work) must << 4363 * Use cancel_delayed_work_sync() instead. << 4364 * << 4365 * Must be called from a sleepable context if << 4366 * workqueue. Can also be called from non-har << 4367 * if @work was last queued on a BH workqueue << 4368 * << 4369 * Returns %true if @work was pending, %false << 4370 */ << 4371 bool cancel_work_sync(struct work_struct *wor << 4372 { << 4373 return __cancel_work_sync(work, 0); << 4374 } << 4375 EXPORT_SYMBOL_GPL(cancel_work_sync); << 4376 << 4377 /** 3021 /** 4378 * cancel_delayed_work - cancel a delayed wor 3022 * cancel_delayed_work - cancel a delayed work 4379 * @dwork: delayed_work to cancel 3023 * @dwork: delayed_work to cancel 4380 * 3024 * 4381 * Kill off a pending delayed_work. 3025 * Kill off a pending delayed_work. 4382 * 3026 * 4383 * Return: %true if @dwork was pending and ca 3027 * Return: %true if @dwork was pending and canceled; %false if it wasn't 4384 * pending. 3028 * pending. 4385 * 3029 * 4386 * Note: 3030 * Note: 4387 * The work callback function may still be ru 3031 * The work callback function may still be running on return, unless 4388 * it returns %true and the work doesn't re-a 3032 * it returns %true and the work doesn't re-arm itself. Explicitly flush or 4389 * use cancel_delayed_work_sync() to wait on 3033 * use cancel_delayed_work_sync() to wait on it. 4390 * 3034 * 4391 * This function is safe to call from any con 3035 * This function is safe to call from any context including IRQ handler. 4392 */ 3036 */ 4393 bool cancel_delayed_work(struct delayed_work 3037 bool cancel_delayed_work(struct delayed_work *dwork) 4394 { 3038 { 4395 return __cancel_work(&dwork->work, WO !! 3039 return __cancel_work(&dwork->work, true); 4396 } 3040 } 4397 EXPORT_SYMBOL(cancel_delayed_work); 3041 EXPORT_SYMBOL(cancel_delayed_work); 4398 3042 4399 /** 3043 /** 4400 * cancel_delayed_work_sync - cancel a delaye 3044 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 4401 * @dwork: the delayed work cancel 3045 * @dwork: the delayed work cancel 4402 * 3046 * 4403 * This is cancel_work_sync() for delayed wor 3047 * This is cancel_work_sync() for delayed works. 4404 * 3048 * 4405 * Return: 3049 * Return: 4406 * %true if @dwork was pending, %false otherw 3050 * %true if @dwork was pending, %false otherwise. 4407 */ 3051 */ 4408 bool cancel_delayed_work_sync(struct delayed_ 3052 bool cancel_delayed_work_sync(struct delayed_work *dwork) 4409 { 3053 { 4410 return __cancel_work_sync(&dwork->wor !! 3054 return __cancel_work_timer(&dwork->work, true); 4411 } 3055 } 4412 EXPORT_SYMBOL(cancel_delayed_work_sync); 3056 EXPORT_SYMBOL(cancel_delayed_work_sync); 4413 3057 4414 /** 3058 /** 4415 * disable_work - Disable and cancel a work i << 4416 * @work: work item to disable << 4417 * << 4418 * Disable @work by incrementing its disable << 4419 * pending. As long as the disable count is n << 4420 * will fail and return %false. The maximum s << 4421 * power of %WORK_OFFQ_DISABLE_BITS, currentl << 4422 * << 4423 * Can be called from any context. Returns %t << 4424 * otherwise. << 4425 */ << 4426 bool disable_work(struct work_struct *work) << 4427 { << 4428 return __cancel_work(work, WORK_CANCE << 4429 } << 4430 EXPORT_SYMBOL_GPL(disable_work); << 4431 << 4432 /** << 4433 * disable_work_sync - Disable, cancel and dr << 4434 * @work: work item to disable << 4435 * << 4436 * Similar to disable_work() but also wait fo << 4437 * executing. << 4438 * << 4439 * Must be called from a sleepable context if << 4440 * workqueue. Can also be called from non-har << 4441 * if @work was last queued on a BH workqueue << 4442 * << 4443 * Returns %true if @work was pending, %false << 4444 */ << 4445 bool disable_work_sync(struct work_struct *wo << 4446 { << 4447 return __cancel_work_sync(work, WORK_ << 4448 } << 4449 EXPORT_SYMBOL_GPL(disable_work_sync); << 4450 << 4451 /** << 4452 * enable_work - Enable a work item << 4453 * @work: work item to enable << 4454 * << 4455 * Undo disable_work[_sync]() by decrementing << 4456 * only be queued if its disable count is 0. << 4457 * << 4458 * Can be called from any context. Returns %t << 4459 * Otherwise, %false. << 4460 */ << 4461 bool enable_work(struct work_struct *work) << 4462 { << 4463 struct work_offq_data offqd; << 4464 unsigned long irq_flags; << 4465 << 4466 work_grab_pending(work, 0, &irq_flags << 4467 << 4468 work_offqd_unpack(&offqd, *work_data_ << 4469 work_offqd_enable(&offqd); << 4470 set_work_pool_and_clear_pending(work, << 4471 work_ << 4472 local_irq_restore(irq_flags); << 4473 << 4474 return !offqd.disable; << 4475 } << 4476 EXPORT_SYMBOL_GPL(enable_work); << 4477 << 4478 /** << 4479 * disable_delayed_work - Disable and cancel << 4480 * @dwork: delayed work item to disable << 4481 * << 4482 * disable_work() for delayed work items. << 4483 */ << 4484 bool disable_delayed_work(struct delayed_work << 4485 { << 4486 return __cancel_work(&dwork->work, << 4487 WORK_CANCEL_DELA << 4488 } << 4489 EXPORT_SYMBOL_GPL(disable_delayed_work); << 4490 << 4491 /** << 4492 * disable_delayed_work_sync - Disable, cance << 4493 * @dwork: delayed work item to disable << 4494 * << 4495 * disable_work_sync() for delayed work items << 4496 */ << 4497 bool disable_delayed_work_sync(struct delayed << 4498 { << 4499 return __cancel_work_sync(&dwork->wor << 4500 WORK_CANCEL << 4501 } << 4502 EXPORT_SYMBOL_GPL(disable_delayed_work_sync); << 4503 << 4504 /** << 4505 * enable_delayed_work - Enable a delayed wor << 4506 * @dwork: delayed work item to enable << 4507 * << 4508 * enable_work() for delayed work items. << 4509 */ << 4510 bool enable_delayed_work(struct delayed_work << 4511 { << 4512 return enable_work(&dwork->work); << 4513 } << 4514 EXPORT_SYMBOL_GPL(enable_delayed_work); << 4515 << 4516 /** << 4517 * schedule_on_each_cpu - execute a function 3059 * schedule_on_each_cpu - execute a function synchronously on each online CPU 4518 * @func: the function to call 3060 * @func: the function to call 4519 * 3061 * 4520 * schedule_on_each_cpu() executes @func on e 3062 * schedule_on_each_cpu() executes @func on each online CPU using the 4521 * system workqueue and blocks until all CPUs 3063 * system workqueue and blocks until all CPUs have completed. 4522 * schedule_on_each_cpu() is very slow. 3064 * schedule_on_each_cpu() is very slow. 4523 * 3065 * 4524 * Return: 3066 * Return: 4525 * 0 on success, -errno on failure. 3067 * 0 on success, -errno on failure. 4526 */ 3068 */ 4527 int schedule_on_each_cpu(work_func_t func) 3069 int schedule_on_each_cpu(work_func_t func) 4528 { 3070 { 4529 int cpu; 3071 int cpu; 4530 struct work_struct __percpu *works; 3072 struct work_struct __percpu *works; 4531 3073 4532 works = alloc_percpu(struct work_stru 3074 works = alloc_percpu(struct work_struct); 4533 if (!works) 3075 if (!works) 4534 return -ENOMEM; 3076 return -ENOMEM; 4535 3077 4536 cpus_read_lock(); !! 3078 get_online_cpus(); 4537 3079 4538 for_each_online_cpu(cpu) { 3080 for_each_online_cpu(cpu) { 4539 struct work_struct *work = pe 3081 struct work_struct *work = per_cpu_ptr(works, cpu); 4540 3082 4541 INIT_WORK(work, func); 3083 INIT_WORK(work, func); 4542 schedule_work_on(cpu, work); 3084 schedule_work_on(cpu, work); 4543 } 3085 } 4544 3086 4545 for_each_online_cpu(cpu) 3087 for_each_online_cpu(cpu) 4546 flush_work(per_cpu_ptr(works, 3088 flush_work(per_cpu_ptr(works, cpu)); 4547 3089 4548 cpus_read_unlock(); !! 3090 put_online_cpus(); 4549 free_percpu(works); 3091 free_percpu(works); 4550 return 0; 3092 return 0; 4551 } 3093 } 4552 3094 4553 /** 3095 /** 4554 * execute_in_process_context - reliably exec 3096 * execute_in_process_context - reliably execute the routine with user context 4555 * @fn: the function to execute 3097 * @fn: the function to execute 4556 * @ew: guaranteed storage for the ex 3098 * @ew: guaranteed storage for the execute work structure (must 4557 * be available when the work ex 3099 * be available when the work executes) 4558 * 3100 * 4559 * Executes the function immediately if proce 3101 * Executes the function immediately if process context is available, 4560 * otherwise schedules the function for delay 3102 * otherwise schedules the function for delayed execution. 4561 * 3103 * 4562 * Return: 0 - function was executed 3104 * Return: 0 - function was executed 4563 * 1 - function was scheduled fo 3105 * 1 - function was scheduled for execution 4564 */ 3106 */ 4565 int execute_in_process_context(work_func_t fn 3107 int execute_in_process_context(work_func_t fn, struct execute_work *ew) 4566 { 3108 { 4567 if (!in_interrupt()) { 3109 if (!in_interrupt()) { 4568 fn(&ew->work); 3110 fn(&ew->work); 4569 return 0; 3111 return 0; 4570 } 3112 } 4571 3113 4572 INIT_WORK(&ew->work, fn); 3114 INIT_WORK(&ew->work, fn); 4573 schedule_work(&ew->work); 3115 schedule_work(&ew->work); 4574 3116 4575 return 1; 3117 return 1; 4576 } 3118 } 4577 EXPORT_SYMBOL_GPL(execute_in_process_context) 3119 EXPORT_SYMBOL_GPL(execute_in_process_context); 4578 3120 4579 /** 3121 /** 4580 * free_workqueue_attrs - free a workqueue_at 3122 * free_workqueue_attrs - free a workqueue_attrs 4581 * @attrs: workqueue_attrs to free 3123 * @attrs: workqueue_attrs to free 4582 * 3124 * 4583 * Undo alloc_workqueue_attrs(). 3125 * Undo alloc_workqueue_attrs(). 4584 */ 3126 */ 4585 void free_workqueue_attrs(struct workqueue_at 3127 void free_workqueue_attrs(struct workqueue_attrs *attrs) 4586 { 3128 { 4587 if (attrs) { 3129 if (attrs) { 4588 free_cpumask_var(attrs->cpuma 3130 free_cpumask_var(attrs->cpumask); 4589 free_cpumask_var(attrs->__pod << 4590 kfree(attrs); 3131 kfree(attrs); 4591 } 3132 } 4592 } 3133 } 4593 3134 4594 /** 3135 /** 4595 * alloc_workqueue_attrs - allocate a workque 3136 * alloc_workqueue_attrs - allocate a workqueue_attrs >> 3137 * @gfp_mask: allocation mask to use 4596 * 3138 * 4597 * Allocate a new workqueue_attrs, initialize 3139 * Allocate a new workqueue_attrs, initialize with default settings and 4598 * return it. 3140 * return it. 4599 * 3141 * 4600 * Return: The allocated new workqueue_attr o 3142 * Return: The allocated new workqueue_attr on success. %NULL on failure. 4601 */ 3143 */ 4602 struct workqueue_attrs *alloc_workqueue_attrs !! 3144 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) 4603 { 3145 { 4604 struct workqueue_attrs *attrs; 3146 struct workqueue_attrs *attrs; 4605 3147 4606 attrs = kzalloc(sizeof(*attrs), GFP_K !! 3148 attrs = kzalloc(sizeof(*attrs), gfp_mask); 4607 if (!attrs) 3149 if (!attrs) 4608 goto fail; 3150 goto fail; 4609 if (!alloc_cpumask_var(&attrs->cpumas !! 3151 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) 4610 goto fail; << 4611 if (!alloc_cpumask_var(&attrs->__pod_ << 4612 goto fail; 3152 goto fail; 4613 3153 4614 cpumask_copy(attrs->cpumask, cpu_poss 3154 cpumask_copy(attrs->cpumask, cpu_possible_mask); 4615 attrs->affn_scope = WQ_AFFN_DFL; << 4616 return attrs; 3155 return attrs; 4617 fail: 3156 fail: 4618 free_workqueue_attrs(attrs); 3157 free_workqueue_attrs(attrs); 4619 return NULL; 3158 return NULL; 4620 } 3159 } 4621 3160 4622 static void copy_workqueue_attrs(struct workq 3161 static void copy_workqueue_attrs(struct workqueue_attrs *to, 4623 const struct 3162 const struct workqueue_attrs *from) 4624 { 3163 { 4625 to->nice = from->nice; 3164 to->nice = from->nice; 4626 cpumask_copy(to->cpumask, from->cpuma 3165 cpumask_copy(to->cpumask, from->cpumask); 4627 cpumask_copy(to->__pod_cpumask, from- << 4628 to->affn_strict = from->affn_strict; << 4629 << 4630 /* 3166 /* 4631 * Unlike hash and equality test, cop !! 3167 * Unlike hash and equality test, this function doesn't ignore 4632 * fields as copying is used for both !! 3168 * ->no_numa as it is used for both pool and wq attrs. Instead, 4633 * get_unbound_pool() explicitly clea !! 3169 * get_unbound_pool() explicitly clears ->no_numa after copying. 4634 */ 3170 */ 4635 to->affn_scope = from->affn_scope; !! 3171 to->no_numa = from->no_numa; 4636 to->ordered = from->ordered; << 4637 } << 4638 << 4639 /* << 4640 * Some attrs fields are workqueue-only. Clea << 4641 * comments in 'struct workqueue_attrs' defin << 4642 */ << 4643 static void wqattrs_clear_for_pool(struct wor << 4644 { << 4645 attrs->affn_scope = WQ_AFFN_NR_TYPES; << 4646 attrs->ordered = false; << 4647 if (attrs->affn_strict) << 4648 cpumask_copy(attrs->cpumask, << 4649 } 3172 } 4650 3173 4651 /* hash value of the content of @attr */ 3174 /* hash value of the content of @attr */ 4652 static u32 wqattrs_hash(const struct workqueu 3175 static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 4653 { 3176 { 4654 u32 hash = 0; 3177 u32 hash = 0; 4655 3178 4656 hash = jhash_1word(attrs->nice, hash) 3179 hash = jhash_1word(attrs->nice, hash); 4657 hash = jhash_1word(attrs->affn_strict !! 3180 hash = jhash(cpumask_bits(attrs->cpumask), 4658 hash = jhash(cpumask_bits(attrs->__po << 4659 BITS_TO_LONGS(nr_cpumask 3181 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 4660 if (!attrs->affn_strict) << 4661 hash = jhash(cpumask_bits(att << 4662 BITS_TO_LONGS(nr << 4663 return hash; 3182 return hash; 4664 } 3183 } 4665 3184 4666 /* content equality test */ 3185 /* content equality test */ 4667 static bool wqattrs_equal(const struct workqu 3186 static bool wqattrs_equal(const struct workqueue_attrs *a, 4668 const struct workqu 3187 const struct workqueue_attrs *b) 4669 { 3188 { 4670 if (a->nice != b->nice) 3189 if (a->nice != b->nice) 4671 return false; 3190 return false; 4672 if (a->affn_strict != b->affn_strict) !! 3191 if (!cpumask_equal(a->cpumask, b->cpumask)) 4673 return false; << 4674 if (!cpumask_equal(a->__pod_cpumask, << 4675 return false; << 4676 if (!a->affn_strict && !cpumask_equal << 4677 return false; 3192 return false; 4678 return true; 3193 return true; 4679 } 3194 } 4680 3195 4681 /* Update @attrs with actually available CPUs << 4682 static void wqattrs_actualize_cpumask(struct << 4683 const c << 4684 { << 4685 /* << 4686 * Calculate the effective CPU mask o << 4687 * @attrs->cpumask doesn't overlap wi << 4688 * @unbound_cpumask. << 4689 */ << 4690 cpumask_and(attrs->cpumask, attrs->cp << 4691 if (unlikely(cpumask_empty(attrs->cpu << 4692 cpumask_copy(attrs->cpumask, << 4693 } << 4694 << 4695 /* find wq_pod_type to use for @attrs */ << 4696 static const struct wq_pod_type * << 4697 wqattrs_pod_type(const struct workqueue_attrs << 4698 { << 4699 enum wq_affn_scope scope; << 4700 struct wq_pod_type *pt; << 4701 << 4702 /* to synchronize access to wq_affn_d << 4703 lockdep_assert_held(&wq_pool_mutex); << 4704 << 4705 if (attrs->affn_scope == WQ_AFFN_DFL) << 4706 scope = wq_affn_dfl; << 4707 else << 4708 scope = attrs->affn_scope; << 4709 << 4710 pt = &wq_pod_types[scope]; << 4711 << 4712 if (!WARN_ON_ONCE(attrs->affn_scope = << 4713 likely(pt->nr_pods)) << 4714 return pt; << 4715 << 4716 /* << 4717 * Before workqueue_init_topology(), << 4718 * initialized in workqueue_init_earl << 4719 */ << 4720 pt = &wq_pod_types[WQ_AFFN_SYSTEM]; << 4721 BUG_ON(!pt->nr_pods); << 4722 return pt; << 4723 } << 4724 << 4725 /** 3196 /** 4726 * init_worker_pool - initialize a newly zall 3197 * init_worker_pool - initialize a newly zalloc'd worker_pool 4727 * @pool: worker_pool to initialize 3198 * @pool: worker_pool to initialize 4728 * 3199 * 4729 * Initialize a newly zalloc'd @pool. It als 3200 * Initialize a newly zalloc'd @pool. It also allocates @pool->attrs. 4730 * 3201 * 4731 * Return: 0 on success, -errno on failure. 3202 * Return: 0 on success, -errno on failure. Even on failure, all fields 4732 * inside @pool proper are initialized and pu 3203 * inside @pool proper are initialized and put_unbound_pool() can be called 4733 * on @pool safely to release it. 3204 * on @pool safely to release it. 4734 */ 3205 */ 4735 static int init_worker_pool(struct worker_poo 3206 static int init_worker_pool(struct worker_pool *pool) 4736 { 3207 { 4737 raw_spin_lock_init(&pool->lock); !! 3208 spin_lock_init(&pool->lock); 4738 pool->id = -1; 3209 pool->id = -1; 4739 pool->cpu = -1; 3210 pool->cpu = -1; 4740 pool->node = NUMA_NO_NODE; 3211 pool->node = NUMA_NO_NODE; 4741 pool->flags |= POOL_DISASSOCIATED; 3212 pool->flags |= POOL_DISASSOCIATED; 4742 pool->watchdog_ts = jiffies; 3213 pool->watchdog_ts = jiffies; 4743 INIT_LIST_HEAD(&pool->worklist); 3214 INIT_LIST_HEAD(&pool->worklist); 4744 INIT_LIST_HEAD(&pool->idle_list); 3215 INIT_LIST_HEAD(&pool->idle_list); 4745 hash_init(pool->busy_hash); 3216 hash_init(pool->busy_hash); 4746 3217 4747 timer_setup(&pool->idle_timer, idle_w 3218 timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE); 4748 INIT_WORK(&pool->idle_cull_work, idle << 4749 3219 4750 timer_setup(&pool->mayday_timer, pool 3220 timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0); 4751 3221 >> 3222 mutex_init(&pool->attach_mutex); 4752 INIT_LIST_HEAD(&pool->workers); 3223 INIT_LIST_HEAD(&pool->workers); 4753 3224 4754 ida_init(&pool->worker_ida); 3225 ida_init(&pool->worker_ida); 4755 INIT_HLIST_NODE(&pool->hash_node); 3226 INIT_HLIST_NODE(&pool->hash_node); 4756 pool->refcnt = 1; 3227 pool->refcnt = 1; 4757 3228 4758 /* shouldn't fail above this point */ 3229 /* shouldn't fail above this point */ 4759 pool->attrs = alloc_workqueue_attrs() !! 3230 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); 4760 if (!pool->attrs) 3231 if (!pool->attrs) 4761 return -ENOMEM; 3232 return -ENOMEM; 4762 << 4763 wqattrs_clear_for_pool(pool->attrs); << 4764 << 4765 return 0; << 4766 } << 4767 << 4768 #ifdef CONFIG_LOCKDEP << 4769 static void wq_init_lockdep(struct workqueue_ << 4770 { << 4771 char *lock_name; << 4772 << 4773 lockdep_register_key(&wq->key); << 4774 lock_name = kasprintf(GFP_KERNEL, "%s << 4775 if (!lock_name) << 4776 lock_name = wq->name; << 4777 << 4778 wq->lock_name = lock_name; << 4779 lockdep_init_map(&wq->lockdep_map, lo << 4780 } << 4781 << 4782 static void wq_unregister_lockdep(struct work << 4783 { << 4784 lockdep_unregister_key(&wq->key); << 4785 } << 4786 << 4787 static void wq_free_lockdep(struct workqueue_ << 4788 { << 4789 if (wq->lock_name != wq->name) << 4790 kfree(wq->lock_name); << 4791 } << 4792 #else << 4793 static void wq_init_lockdep(struct workqueue_ << 4794 { << 4795 } << 4796 << 4797 static void wq_unregister_lockdep(struct work << 4798 { << 4799 } << 4800 << 4801 static void wq_free_lockdep(struct workqueue_ << 4802 { << 4803 } << 4804 #endif << 4805 << 4806 static void free_node_nr_active(struct wq_nod << 4807 { << 4808 int node; << 4809 << 4810 for_each_node(node) { << 4811 kfree(nna_ar[node]); << 4812 nna_ar[node] = NULL; << 4813 } << 4814 << 4815 kfree(nna_ar[nr_node_ids]); << 4816 nna_ar[nr_node_ids] = NULL; << 4817 } << 4818 << 4819 static void init_node_nr_active(struct wq_nod << 4820 { << 4821 nna->max = WQ_DFL_MIN_ACTIVE; << 4822 atomic_set(&nna->nr, 0); << 4823 raw_spin_lock_init(&nna->lock); << 4824 INIT_LIST_HEAD(&nna->pending_pwqs); << 4825 } << 4826 << 4827 /* << 4828 * Each node's nr_active counter will be acce << 4829 * should be allocated in the node. << 4830 */ << 4831 static int alloc_node_nr_active(struct wq_nod << 4832 { << 4833 struct wq_node_nr_active *nna; << 4834 int node; << 4835 << 4836 for_each_node(node) { << 4837 nna = kzalloc_node(sizeof(*nn << 4838 if (!nna) << 4839 goto err_free; << 4840 init_node_nr_active(nna); << 4841 nna_ar[node] = nna; << 4842 } << 4843 << 4844 /* [nr_node_ids] is used as the fallb << 4845 nna = kzalloc_node(sizeof(*nna), GFP_ << 4846 if (!nna) << 4847 goto err_free; << 4848 init_node_nr_active(nna); << 4849 nna_ar[nr_node_ids] = nna; << 4850 << 4851 return 0; 3233 return 0; 4852 << 4853 err_free: << 4854 free_node_nr_active(nna_ar); << 4855 return -ENOMEM; << 4856 } 3234 } 4857 3235 4858 static void rcu_free_wq(struct rcu_head *rcu) 3236 static void rcu_free_wq(struct rcu_head *rcu) 4859 { 3237 { 4860 struct workqueue_struct *wq = 3238 struct workqueue_struct *wq = 4861 container_of(rcu, struct work 3239 container_of(rcu, struct workqueue_struct, rcu); 4862 3240 4863 if (wq->flags & WQ_UNBOUND) !! 3241 if (!(wq->flags & WQ_UNBOUND)) 4864 free_node_nr_active(wq->node_ !! 3242 free_percpu(wq->cpu_pwqs); >> 3243 else >> 3244 free_workqueue_attrs(wq->unbound_attrs); 4865 3245 4866 wq_free_lockdep(wq); !! 3246 kfree(wq->rescuer); 4867 free_percpu(wq->cpu_pwq); << 4868 free_workqueue_attrs(wq->unbound_attr << 4869 kfree(wq); 3247 kfree(wq); 4870 } 3248 } 4871 3249 4872 static void rcu_free_pool(struct rcu_head *rc 3250 static void rcu_free_pool(struct rcu_head *rcu) 4873 { 3251 { 4874 struct worker_pool *pool = container_ 3252 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 4875 3253 4876 ida_destroy(&pool->worker_ida); 3254 ida_destroy(&pool->worker_ida); 4877 free_workqueue_attrs(pool->attrs); 3255 free_workqueue_attrs(pool->attrs); 4878 kfree(pool); 3256 kfree(pool); 4879 } 3257 } 4880 3258 4881 /** 3259 /** 4882 * put_unbound_pool - put a worker_pool 3260 * put_unbound_pool - put a worker_pool 4883 * @pool: worker_pool to put 3261 * @pool: worker_pool to put 4884 * 3262 * 4885 * Put @pool. If its refcnt reaches zero, it !! 3263 * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU 4886 * safe manner. get_unbound_pool() calls thi 3264 * safe manner. get_unbound_pool() calls this function on its failure path 4887 * and this function should be able to releas 3265 * and this function should be able to release pools which went through, 4888 * successfully or not, init_worker_pool(). 3266 * successfully or not, init_worker_pool(). 4889 * 3267 * 4890 * Should be called with wq_pool_mutex held. 3268 * Should be called with wq_pool_mutex held. 4891 */ 3269 */ 4892 static void put_unbound_pool(struct worker_po 3270 static void put_unbound_pool(struct worker_pool *pool) 4893 { 3271 { >> 3272 DECLARE_COMPLETION_ONSTACK(detach_completion); 4894 struct worker *worker; 3273 struct worker *worker; 4895 LIST_HEAD(cull_list); << 4896 3274 4897 lockdep_assert_held(&wq_pool_mutex); 3275 lockdep_assert_held(&wq_pool_mutex); 4898 3276 4899 if (--pool->refcnt) 3277 if (--pool->refcnt) 4900 return; 3278 return; 4901 3279 4902 /* sanity checks */ 3280 /* sanity checks */ 4903 if (WARN_ON(!(pool->cpu < 0)) || 3281 if (WARN_ON(!(pool->cpu < 0)) || 4904 WARN_ON(!list_empty(&pool->workli 3282 WARN_ON(!list_empty(&pool->worklist))) 4905 return; 3283 return; 4906 3284 4907 /* release id and unhash */ 3285 /* release id and unhash */ 4908 if (pool->id >= 0) 3286 if (pool->id >= 0) 4909 idr_remove(&worker_pool_idr, 3287 idr_remove(&worker_pool_idr, pool->id); 4910 hash_del(&pool->hash_node); 3288 hash_del(&pool->hash_node); 4911 3289 4912 /* 3290 /* 4913 * Become the manager and destroy all 3291 * Become the manager and destroy all workers. This prevents 4914 * @pool's workers from blocking on a 3292 * @pool's workers from blocking on attach_mutex. We're the last 4915 * manager and @pool gets freed with 3293 * manager and @pool gets freed with the flag set. 4916 * << 4917 * Having a concurrent manager is qui << 4918 * only get here with << 4919 * pwq->refcnt == pool->refcnt == 0 << 4920 * which implies no work queued to th << 4921 * become the manager. However a work << 4922 * manager before the refcnts dropped << 4923 * drops pool->lock << 4924 */ 3294 */ 4925 while (true) { !! 3295 spin_lock_irq(&pool->lock); 4926 rcuwait_wait_event(&manager_w !! 3296 wait_event_lock_irq(wq_manager_wait, 4927 !(pool->fl !! 3297 !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock); 4928 TASK_UNINT !! 3298 pool->flags |= POOL_MANAGER_ACTIVE; 4929 << 4930 mutex_lock(&wq_pool_attach_mu << 4931 raw_spin_lock_irq(&pool->lock << 4932 if (!(pool->flags & POOL_MANA << 4933 pool->flags |= POOL_M << 4934 break; << 4935 } << 4936 raw_spin_unlock_irq(&pool->lo << 4937 mutex_unlock(&wq_pool_attach_ << 4938 } << 4939 3299 4940 while ((worker = first_idle_worker(po 3300 while ((worker = first_idle_worker(pool))) 4941 set_worker_dying(worker, &cul !! 3301 destroy_worker(worker); 4942 WARN_ON(pool->nr_workers || pool->nr_ 3302 WARN_ON(pool->nr_workers || pool->nr_idle); 4943 raw_spin_unlock_irq(&pool->lock); !! 3303 spin_unlock_irq(&pool->lock); 4944 << 4945 detach_dying_workers(&cull_list); << 4946 3304 4947 mutex_unlock(&wq_pool_attach_mutex); !! 3305 mutex_lock(&pool->attach_mutex); >> 3306 if (!list_empty(&pool->workers)) >> 3307 pool->detach_completion = &detach_completion; >> 3308 mutex_unlock(&pool->attach_mutex); 4948 3309 4949 reap_dying_workers(&cull_list); !! 3310 if (pool->detach_completion) >> 3311 wait_for_completion(pool->detach_completion); 4950 3312 4951 /* shut down the timers */ 3313 /* shut down the timers */ 4952 del_timer_sync(&pool->idle_timer); 3314 del_timer_sync(&pool->idle_timer); 4953 cancel_work_sync(&pool->idle_cull_wor << 4954 del_timer_sync(&pool->mayday_timer); 3315 del_timer_sync(&pool->mayday_timer); 4955 3316 4956 /* RCU protected to allow dereference !! 3317 /* sched-RCU protected to allow dereferences from get_work_pool() */ 4957 call_rcu(&pool->rcu, rcu_free_pool); !! 3318 call_rcu_sched(&pool->rcu, rcu_free_pool); 4958 } 3319 } 4959 3320 4960 /** 3321 /** 4961 * get_unbound_pool - get a worker_pool with 3322 * get_unbound_pool - get a worker_pool with the specified attributes 4962 * @attrs: the attributes of the worker_pool 3323 * @attrs: the attributes of the worker_pool to get 4963 * 3324 * 4964 * Obtain a worker_pool which has the same at 3325 * Obtain a worker_pool which has the same attributes as @attrs, bump the 4965 * reference count and return it. If there a 3326 * reference count and return it. If there already is a matching 4966 * worker_pool, it will be used; otherwise, t 3327 * worker_pool, it will be used; otherwise, this function attempts to 4967 * create a new one. 3328 * create a new one. 4968 * 3329 * 4969 * Should be called with wq_pool_mutex held. 3330 * Should be called with wq_pool_mutex held. 4970 * 3331 * 4971 * Return: On success, a worker_pool with the 3332 * Return: On success, a worker_pool with the same attributes as @attrs. 4972 * On failure, %NULL. 3333 * On failure, %NULL. 4973 */ 3334 */ 4974 static struct worker_pool *get_unbound_pool(c 3335 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 4975 { 3336 { 4976 struct wq_pod_type *pt = &wq_pod_type << 4977 u32 hash = wqattrs_hash(attrs); 3337 u32 hash = wqattrs_hash(attrs); 4978 struct worker_pool *pool; 3338 struct worker_pool *pool; 4979 int pod, node = NUMA_NO_NODE; !! 3339 int node; >> 3340 int target_node = NUMA_NO_NODE; 4980 3341 4981 lockdep_assert_held(&wq_pool_mutex); 3342 lockdep_assert_held(&wq_pool_mutex); 4982 3343 4983 /* do we already have a matching pool 3344 /* do we already have a matching pool? */ 4984 hash_for_each_possible(unbound_pool_h 3345 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 4985 if (wqattrs_equal(pool->attrs 3346 if (wqattrs_equal(pool->attrs, attrs)) { 4986 pool->refcnt++; 3347 pool->refcnt++; 4987 return pool; 3348 return pool; 4988 } 3349 } 4989 } 3350 } 4990 3351 4991 /* If __pod_cpumask is contained insi !! 3352 /* if cpumask is contained inside a NUMA node, we belong to that node */ 4992 for (pod = 0; pod < pt->nr_pods; pod+ !! 3353 if (wq_numa_enabled) { 4993 if (cpumask_subset(attrs->__p !! 3354 for_each_node(node) { 4994 node = pt->pod_node[p !! 3355 if (cpumask_subset(attrs->cpumask, 4995 break; !! 3356 wq_numa_possible_cpumask[node])) { >> 3357 target_node = node; >> 3358 break; >> 3359 } 4996 } 3360 } 4997 } 3361 } 4998 3362 4999 /* nope, create a new one */ 3363 /* nope, create a new one */ 5000 pool = kzalloc_node(sizeof(*pool), GF !! 3364 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node); 5001 if (!pool || init_worker_pool(pool) < 3365 if (!pool || init_worker_pool(pool) < 0) 5002 goto fail; 3366 goto fail; 5003 3367 5004 pool->node = node; !! 3368 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ 5005 copy_workqueue_attrs(pool->attrs, att 3369 copy_workqueue_attrs(pool->attrs, attrs); 5006 wqattrs_clear_for_pool(pool->attrs); !! 3370 pool->node = target_node; >> 3371 >> 3372 /* >> 3373 * no_numa isn't a worker_pool attribute, always clear it. See >> 3374 * 'struct workqueue_attrs' comments for detail. >> 3375 */ >> 3376 pool->attrs->no_numa = false; 5007 3377 5008 if (worker_pool_assign_id(pool) < 0) 3378 if (worker_pool_assign_id(pool) < 0) 5009 goto fail; 3379 goto fail; 5010 3380 5011 /* create and start the initial worke 3381 /* create and start the initial worker */ 5012 if (wq_online && !create_worker(pool) 3382 if (wq_online && !create_worker(pool)) 5013 goto fail; 3383 goto fail; 5014 3384 5015 /* install */ 3385 /* install */ 5016 hash_add(unbound_pool_hash, &pool->ha 3386 hash_add(unbound_pool_hash, &pool->hash_node, hash); 5017 3387 5018 return pool; 3388 return pool; 5019 fail: 3389 fail: 5020 if (pool) 3390 if (pool) 5021 put_unbound_pool(pool); 3391 put_unbound_pool(pool); 5022 return NULL; 3392 return NULL; 5023 } 3393 } 5024 3394 >> 3395 static void rcu_free_pwq(struct rcu_head *rcu) >> 3396 { >> 3397 kmem_cache_free(pwq_cache, >> 3398 container_of(rcu, struct pool_workqueue, rcu)); >> 3399 } >> 3400 5025 /* 3401 /* 5026 * Scheduled on pwq_release_worker by put_pwq !! 3402 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt 5027 * refcnt and needs to be destroyed. !! 3403 * and needs to be destroyed. 5028 */ 3404 */ 5029 static void pwq_release_workfn(struct kthread !! 3405 static void pwq_unbound_release_workfn(struct work_struct *work) 5030 { 3406 { 5031 struct pool_workqueue *pwq = containe 3407 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 5032 !! 3408 unbound_release_work); 5033 struct workqueue_struct *wq = pwq->wq 3409 struct workqueue_struct *wq = pwq->wq; 5034 struct worker_pool *pool = pwq->pool; 3410 struct worker_pool *pool = pwq->pool; 5035 bool is_last = false; !! 3411 bool is_last; 5036 3412 5037 /* !! 3413 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) 5038 * When @pwq is not linked, it doesn' !! 3414 return; 5039 * @wq, and @wq is invalid to access. << 5040 */ << 5041 if (!list_empty(&pwq->pwqs_node)) { << 5042 mutex_lock(&wq->mutex); << 5043 list_del_rcu(&pwq->pwqs_node) << 5044 is_last = list_empty(&wq->pwq << 5045 3415 5046 /* !! 3416 mutex_lock(&wq->mutex); 5047 * For ordered workqueue with !! 3417 list_del_rcu(&pwq->pwqs_node); 5048 */ !! 3418 is_last = list_empty(&wq->pwqs); 5049 if (!is_last && (wq->flags & !! 3419 mutex_unlock(&wq->mutex); 5050 unplug_oldest_pwq(wq) << 5051 3420 5052 mutex_unlock(&wq->mutex); !! 3421 mutex_lock(&wq_pool_mutex); 5053 } !! 3422 put_unbound_pool(pool); >> 3423 mutex_unlock(&wq_pool_mutex); 5054 3424 5055 if (wq->flags & WQ_UNBOUND) { !! 3425 call_rcu_sched(&pwq->rcu, rcu_free_pwq); 5056 mutex_lock(&wq_pool_mutex); !! 3426 5057 put_unbound_pool(pool); !! 3427 /* 5058 mutex_unlock(&wq_pool_mutex); !! 3428 * If we're the last pwq going away, @wq is already dead and no one 5059 } !! 3429 * is gonna access it anymore. Schedule RCU free. >> 3430 */ >> 3431 if (is_last) >> 3432 call_rcu_sched(&wq->rcu, rcu_free_wq); >> 3433 } 5060 3434 5061 if (!list_empty(&pwq->pending_node)) !! 3435 /** 5062 struct wq_node_nr_active *nna !! 3436 * pwq_adjust_max_active - update a pwq's max_active to the current setting 5063 wq_node_nr_active(pwq !! 3437 * @pwq: target pool_workqueue >> 3438 * >> 3439 * If @pwq isn't freezing, set @pwq->max_active to the associated >> 3440 * workqueue's saved_max_active and activate delayed work items >> 3441 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero. >> 3442 */ >> 3443 static void pwq_adjust_max_active(struct pool_workqueue *pwq) >> 3444 { >> 3445 struct workqueue_struct *wq = pwq->wq; >> 3446 bool freezable = wq->flags & WQ_FREEZABLE; >> 3447 unsigned long flags; 5064 3448 5065 raw_spin_lock_irq(&nna->lock) !! 3449 /* for @wq->saved_max_active */ 5066 list_del_init(&pwq->pending_n !! 3450 lockdep_assert_held(&wq->mutex); 5067 raw_spin_unlock_irq(&nna->loc !! 3451 5068 } !! 3452 /* fast exit for non-freezable wqs */ >> 3453 if (!freezable && pwq->max_active == wq->saved_max_active) >> 3454 return; 5069 3455 5070 kfree_rcu(pwq, rcu); !! 3456 /* this function can be called during early boot w/ irq disabled */ >> 3457 spin_lock_irqsave(&pwq->pool->lock, flags); 5071 3458 5072 /* 3459 /* 5073 * If we're the last pwq going away, !! 3460 * During [un]freezing, the caller is responsible for ensuring that 5074 * is gonna access it anymore. Sched !! 3461 * this function is called at least once after @workqueue_freezing >> 3462 * is updated and visible. 5075 */ 3463 */ 5076 if (is_last) { !! 3464 if (!freezable || !workqueue_freezing) { 5077 wq_unregister_lockdep(wq); !! 3465 pwq->max_active = wq->saved_max_active; 5078 call_rcu(&wq->rcu, rcu_free_w !! 3466 >> 3467 while (!list_empty(&pwq->delayed_works) && >> 3468 pwq->nr_active < pwq->max_active) >> 3469 pwq_activate_first_delayed(pwq); >> 3470 >> 3471 /* >> 3472 * Need to kick a worker after thawed or an unbound wq's >> 3473 * max_active is bumped. It's a slow path. Do it always. >> 3474 */ >> 3475 wake_up_worker(pwq->pool); >> 3476 } else { >> 3477 pwq->max_active = 0; 5079 } 3478 } >> 3479 >> 3480 spin_unlock_irqrestore(&pwq->pool->lock, flags); 5080 } 3481 } 5081 3482 5082 /* initialize newly allocated @pwq which is a !! 3483 /* initialize newly alloced @pwq which is associated with @wq and @pool */ 5083 static void init_pwq(struct pool_workqueue *p 3484 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, 5084 struct worker_pool *pool 3485 struct worker_pool *pool) 5085 { 3486 { 5086 BUG_ON((unsigned long)pwq & ~WORK_STR !! 3487 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 5087 3488 5088 memset(pwq, 0, sizeof(*pwq)); 3489 memset(pwq, 0, sizeof(*pwq)); 5089 3490 5090 pwq->pool = pool; 3491 pwq->pool = pool; 5091 pwq->wq = wq; 3492 pwq->wq = wq; 5092 pwq->flush_color = -1; 3493 pwq->flush_color = -1; 5093 pwq->refcnt = 1; 3494 pwq->refcnt = 1; 5094 INIT_LIST_HEAD(&pwq->inactive_works); !! 3495 INIT_LIST_HEAD(&pwq->delayed_works); 5095 INIT_LIST_HEAD(&pwq->pending_node); << 5096 INIT_LIST_HEAD(&pwq->pwqs_node); 3496 INIT_LIST_HEAD(&pwq->pwqs_node); 5097 INIT_LIST_HEAD(&pwq->mayday_node); 3497 INIT_LIST_HEAD(&pwq->mayday_node); 5098 kthread_init_work(&pwq->release_work, !! 3498 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); 5099 } 3499 } 5100 3500 5101 /* sync @pwq with the current state of its as 3501 /* sync @pwq with the current state of its associated wq and link it */ 5102 static void link_pwq(struct pool_workqueue *p 3502 static void link_pwq(struct pool_workqueue *pwq) 5103 { 3503 { 5104 struct workqueue_struct *wq = pwq->wq 3504 struct workqueue_struct *wq = pwq->wq; 5105 3505 5106 lockdep_assert_held(&wq->mutex); 3506 lockdep_assert_held(&wq->mutex); 5107 3507 5108 /* may be called multiple times, igno 3508 /* may be called multiple times, ignore if already linked */ 5109 if (!list_empty(&pwq->pwqs_node)) 3509 if (!list_empty(&pwq->pwqs_node)) 5110 return; 3510 return; 5111 3511 5112 /* set the matching work_color */ 3512 /* set the matching work_color */ 5113 pwq->work_color = wq->work_color; 3513 pwq->work_color = wq->work_color; 5114 3514 >> 3515 /* sync max_active to the current setting */ >> 3516 pwq_adjust_max_active(pwq); >> 3517 5115 /* link in @pwq */ 3518 /* link in @pwq */ 5116 list_add_tail_rcu(&pwq->pwqs_node, &w !! 3519 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 5117 } 3520 } 5118 3521 5119 /* obtain a pool matching @attr and create a 3522 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ 5120 static struct pool_workqueue *alloc_unbound_p 3523 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, 5121 const 3524 const struct workqueue_attrs *attrs) 5122 { 3525 { 5123 struct worker_pool *pool; 3526 struct worker_pool *pool; 5124 struct pool_workqueue *pwq; 3527 struct pool_workqueue *pwq; 5125 3528 5126 lockdep_assert_held(&wq_pool_mutex); 3529 lockdep_assert_held(&wq_pool_mutex); 5127 3530 5128 pool = get_unbound_pool(attrs); 3531 pool = get_unbound_pool(attrs); 5129 if (!pool) 3532 if (!pool) 5130 return NULL; 3533 return NULL; 5131 3534 5132 pwq = kmem_cache_alloc_node(pwq_cache 3535 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); 5133 if (!pwq) { 3536 if (!pwq) { 5134 put_unbound_pool(pool); 3537 put_unbound_pool(pool); 5135 return NULL; 3538 return NULL; 5136 } 3539 } 5137 3540 5138 init_pwq(pwq, wq, pool); 3541 init_pwq(pwq, wq, pool); 5139 return pwq; 3542 return pwq; 5140 } 3543 } 5141 3544 5142 static void apply_wqattrs_lock(void) << 5143 { << 5144 mutex_lock(&wq_pool_mutex); << 5145 } << 5146 << 5147 static void apply_wqattrs_unlock(void) << 5148 { << 5149 mutex_unlock(&wq_pool_mutex); << 5150 } << 5151 << 5152 /** 3545 /** 5153 * wq_calc_pod_cpumask - calculate a wq_attrs !! 3546 * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node 5154 * @attrs: the wq_attrs of the default pwq of 3547 * @attrs: the wq_attrs of the default pwq of the target workqueue 5155 * @cpu: the target CPU !! 3548 * @node: the target NUMA node >> 3549 * @cpu_going_down: if >= 0, the CPU to consider as offline >> 3550 * @cpumask: outarg, the resulting cpumask 5156 * 3551 * 5157 * Calculate the cpumask a workqueue with @at !! 3552 * Calculate the cpumask a workqueue with @attrs should use on @node. If 5158 * The result is stored in @attrs->__pod_cpum !! 3553 * @cpu_going_down is >= 0, that cpu is considered offline during >> 3554 * calculation. The result is stored in @cpumask. 5159 * 3555 * 5160 * If pod affinity is not enabled, @attrs->cp !! 3556 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If 5161 * and @pod has online CPUs requested by @att !! 3557 * enabled and @node has online CPUs requested by @attrs, the returned 5162 * intersection of the possible CPUs of @pod !! 3558 * cpumask is the intersection of the possible CPUs of @node and >> 3559 * @attrs->cpumask. 5163 * 3560 * 5164 * The caller is responsible for ensuring tha !! 3561 * The caller is responsible for ensuring that the cpumask of @node stays >> 3562 * stable. >> 3563 * >> 3564 * Return: %true if the resulting @cpumask is different from @attrs->cpumask, >> 3565 * %false if equal. 5165 */ 3566 */ 5166 static void wq_calc_pod_cpumask(struct workqu !! 3567 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, >> 3568 int cpu_going_down, cpumask_t *cpumask) 5167 { 3569 { 5168 const struct wq_pod_type *pt = wqattr !! 3570 if (!wq_numa_enabled || attrs->no_numa) 5169 int pod = pt->cpu_pod[cpu]; !! 3571 goto use_dfl; 5170 3572 5171 /* calculate possible CPUs in @pod th !! 3573 /* does @node have any online CPUs @attrs wants? */ 5172 cpumask_and(attrs->__pod_cpumask, pt- !! 3574 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask); 5173 /* does @pod have any online CPUs @at !! 3575 if (cpu_going_down >= 0) 5174 if (!cpumask_intersects(attrs->__pod_ !! 3576 cpumask_clear_cpu(cpu_going_down, cpumask); 5175 cpumask_copy(attrs->__pod_cpu !! 3577 5176 return; !! 3578 if (cpumask_empty(cpumask)) >> 3579 goto use_dfl; >> 3580 >> 3581 /* yeap, return possible CPUs in @node that @attrs wants */ >> 3582 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]); >> 3583 >> 3584 if (cpumask_empty(cpumask)) { >> 3585 pr_warn_once("WARNING: workqueue cpumask: online intersect > " >> 3586 "possible intersect\n"); >> 3587 return false; 5177 } 3588 } >> 3589 >> 3590 return !cpumask_equal(cpumask, attrs->cpumask); >> 3591 >> 3592 use_dfl: >> 3593 cpumask_copy(cpumask, attrs->cpumask); >> 3594 return false; 5178 } 3595 } 5179 3596 5180 /* install @pwq into @wq and return the old p !! 3597 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */ 5181 static struct pool_workqueue *install_unbound !! 3598 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, 5182 int c !! 3599 int node, >> 3600 struct pool_workqueue *pwq) 5183 { 3601 { 5184 struct pool_workqueue __rcu **slot = << 5185 struct pool_workqueue *old_pwq; 3602 struct pool_workqueue *old_pwq; 5186 3603 5187 lockdep_assert_held(&wq_pool_mutex); 3604 lockdep_assert_held(&wq_pool_mutex); 5188 lockdep_assert_held(&wq->mutex); 3605 lockdep_assert_held(&wq->mutex); 5189 3606 5190 /* link_pwq() can handle duplicate ca 3607 /* link_pwq() can handle duplicate calls */ 5191 link_pwq(pwq); 3608 link_pwq(pwq); 5192 3609 5193 old_pwq = rcu_access_pointer(*slot); !! 3610 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); 5194 rcu_assign_pointer(*slot, pwq); !! 3611 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); 5195 return old_pwq; 3612 return old_pwq; 5196 } 3613 } 5197 3614 5198 /* context to store the prepared attrs & pwqs 3615 /* context to store the prepared attrs & pwqs before applying */ 5199 struct apply_wqattrs_ctx { 3616 struct apply_wqattrs_ctx { 5200 struct workqueue_struct *wq; 3617 struct workqueue_struct *wq; /* target workqueue */ 5201 struct workqueue_attrs *attrs; 3618 struct workqueue_attrs *attrs; /* attrs to apply */ 5202 struct list_head list; 3619 struct list_head list; /* queued for batching commit */ 5203 struct pool_workqueue *dfl_pwq; 3620 struct pool_workqueue *dfl_pwq; 5204 struct pool_workqueue *pwq_tbl[]; 3621 struct pool_workqueue *pwq_tbl[]; 5205 }; 3622 }; 5206 3623 5207 /* free the resources after success or abort 3624 /* free the resources after success or abort */ 5208 static void apply_wqattrs_cleanup(struct appl 3625 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx) 5209 { 3626 { 5210 if (ctx) { 3627 if (ctx) { 5211 int cpu; !! 3628 int node; 5212 3629 5213 for_each_possible_cpu(cpu) !! 3630 for_each_node(node) 5214 put_pwq_unlocked(ctx- !! 3631 put_pwq_unlocked(ctx->pwq_tbl[node]); 5215 put_pwq_unlocked(ctx->dfl_pwq 3632 put_pwq_unlocked(ctx->dfl_pwq); 5216 3633 5217 free_workqueue_attrs(ctx->att 3634 free_workqueue_attrs(ctx->attrs); 5218 3635 5219 kfree(ctx); 3636 kfree(ctx); 5220 } 3637 } 5221 } 3638 } 5222 3639 5223 /* allocate the attrs and pwqs for later inst 3640 /* allocate the attrs and pwqs for later installation */ 5224 static struct apply_wqattrs_ctx * 3641 static struct apply_wqattrs_ctx * 5225 apply_wqattrs_prepare(struct workqueue_struct 3642 apply_wqattrs_prepare(struct workqueue_struct *wq, 5226 const struct workqueue_ !! 3643 const struct workqueue_attrs *attrs) 5227 const cpumask_var_t unb << 5228 { 3644 { 5229 struct apply_wqattrs_ctx *ctx; 3645 struct apply_wqattrs_ctx *ctx; 5230 struct workqueue_attrs *new_attrs; !! 3646 struct workqueue_attrs *new_attrs, *tmp_attrs; 5231 int cpu; !! 3647 int node; 5232 3648 5233 lockdep_assert_held(&wq_pool_mutex); 3649 lockdep_assert_held(&wq_pool_mutex); 5234 3650 5235 if (WARN_ON(attrs->affn_scope < 0 || !! 3651 ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]), 5236 attrs->affn_scope >= WQ_A !! 3652 GFP_KERNEL); 5237 return ERR_PTR(-EINVAL); << 5238 3653 5239 ctx = kzalloc(struct_size(ctx, pwq_tb !! 3654 new_attrs = alloc_workqueue_attrs(GFP_KERNEL); 5240 !! 3655 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); 5241 new_attrs = alloc_workqueue_attrs(); !! 3656 if (!ctx || !new_attrs || !tmp_attrs) 5242 if (!ctx || !new_attrs) << 5243 goto out_free; 3657 goto out_free; 5244 3658 5245 /* 3659 /* >> 3660 * Calculate the attrs of the default pwq. >> 3661 * If the user configured cpumask doesn't overlap with the >> 3662 * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask. >> 3663 */ >> 3664 copy_workqueue_attrs(new_attrs, attrs); >> 3665 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask); >> 3666 if (unlikely(cpumask_empty(new_attrs->cpumask))) >> 3667 cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask); >> 3668 >> 3669 /* >> 3670 * We may create multiple pwqs with differing cpumasks. Make a >> 3671 * copy of @new_attrs which will be modified and used to obtain >> 3672 * pools. >> 3673 */ >> 3674 copy_workqueue_attrs(tmp_attrs, new_attrs); >> 3675 >> 3676 /* 5246 * If something goes wrong during CPU 3677 * If something goes wrong during CPU up/down, we'll fall back to 5247 * the default pwq covering whole @at 3678 * the default pwq covering whole @attrs->cpumask. Always create 5248 * it even if we don't use it immedia 3679 * it even if we don't use it immediately. 5249 */ 3680 */ 5250 copy_workqueue_attrs(new_attrs, attrs << 5251 wqattrs_actualize_cpumask(new_attrs, << 5252 cpumask_copy(new_attrs->__pod_cpumask << 5253 ctx->dfl_pwq = alloc_unbound_pwq(wq, 3681 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 5254 if (!ctx->dfl_pwq) 3682 if (!ctx->dfl_pwq) 5255 goto out_free; 3683 goto out_free; 5256 3684 5257 for_each_possible_cpu(cpu) { !! 3685 for_each_node(node) { 5258 if (new_attrs->ordered) { !! 3686 if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) { 5259 ctx->dfl_pwq->refcnt+ !! 3687 ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); 5260 ctx->pwq_tbl[cpu] = c !! 3688 if (!ctx->pwq_tbl[node]) 5261 } else { << 5262 wq_calc_pod_cpumask(n << 5263 ctx->pwq_tbl[cpu] = a << 5264 if (!ctx->pwq_tbl[cpu << 5265 goto out_free 3689 goto out_free; >> 3690 } else { >> 3691 ctx->dfl_pwq->refcnt++; >> 3692 ctx->pwq_tbl[node] = ctx->dfl_pwq; 5266 } 3693 } 5267 } 3694 } 5268 3695 5269 /* save the user configured attrs and 3696 /* save the user configured attrs and sanitize it. */ 5270 copy_workqueue_attrs(new_attrs, attrs 3697 copy_workqueue_attrs(new_attrs, attrs); 5271 cpumask_and(new_attrs->cpumask, new_a 3698 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); 5272 cpumask_copy(new_attrs->__pod_cpumask << 5273 ctx->attrs = new_attrs; 3699 ctx->attrs = new_attrs; 5274 3700 5275 /* << 5276 * For initialized ordered workqueues << 5277 * (dfl_pwq). Set the plugged flag of << 5278 * of newly queued work items until e << 5279 * the old pwq's have completed. << 5280 */ << 5281 if ((wq->flags & __WQ_ORDERED) && !li << 5282 ctx->dfl_pwq->plugged = true; << 5283 << 5284 ctx->wq = wq; 3701 ctx->wq = wq; >> 3702 free_workqueue_attrs(tmp_attrs); 5285 return ctx; 3703 return ctx; 5286 3704 5287 out_free: 3705 out_free: >> 3706 free_workqueue_attrs(tmp_attrs); 5288 free_workqueue_attrs(new_attrs); 3707 free_workqueue_attrs(new_attrs); 5289 apply_wqattrs_cleanup(ctx); 3708 apply_wqattrs_cleanup(ctx); 5290 return ERR_PTR(-ENOMEM); !! 3709 return NULL; 5291 } 3710 } 5292 3711 5293 /* set attrs and install prepared pwqs, @ctx 3712 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */ 5294 static void apply_wqattrs_commit(struct apply 3713 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) 5295 { 3714 { 5296 int cpu; !! 3715 int node; 5297 3716 5298 /* all pwqs have been created success 3717 /* all pwqs have been created successfully, let's install'em */ 5299 mutex_lock(&ctx->wq->mutex); 3718 mutex_lock(&ctx->wq->mutex); 5300 3719 5301 copy_workqueue_attrs(ctx->wq->unbound 3720 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); 5302 3721 5303 /* save the previous pwqs and install !! 3722 /* save the previous pwq and install the new one */ 5304 for_each_possible_cpu(cpu) !! 3723 for_each_node(node) 5305 ctx->pwq_tbl[cpu] = install_u !! 3724 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node, 5306 !! 3725 ctx->pwq_tbl[node]); 5307 ctx->dfl_pwq = install_unbound_pwq(ct !! 3726 5308 !! 3727 /* @dfl_pwq might not have been used, ensure it's linked */ 5309 /* update node_nr_active->max */ !! 3728 link_pwq(ctx->dfl_pwq); 5310 wq_update_node_max_active(ctx->wq, -1 !! 3729 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); 5311 << 5312 /* rescuer needs to respect wq cpumas << 5313 if (ctx->wq->rescuer) << 5314 set_cpus_allowed_ptr(ctx->wq- << 5315 unbound_ << 5316 3730 5317 mutex_unlock(&ctx->wq->mutex); 3731 mutex_unlock(&ctx->wq->mutex); 5318 } 3732 } 5319 3733 >> 3734 static void apply_wqattrs_lock(void) >> 3735 { >> 3736 /* CPUs should stay stable across pwq creations and installations */ >> 3737 get_online_cpus(); >> 3738 mutex_lock(&wq_pool_mutex); >> 3739 } >> 3740 >> 3741 static void apply_wqattrs_unlock(void) >> 3742 { >> 3743 mutex_unlock(&wq_pool_mutex); >> 3744 put_online_cpus(); >> 3745 } >> 3746 5320 static int apply_workqueue_attrs_locked(struc 3747 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, 5321 const 3748 const struct workqueue_attrs *attrs) 5322 { 3749 { 5323 struct apply_wqattrs_ctx *ctx; 3750 struct apply_wqattrs_ctx *ctx; 5324 3751 5325 /* only unbound workqueues can change 3752 /* only unbound workqueues can change attributes */ 5326 if (WARN_ON(!(wq->flags & WQ_UNBOUND) 3753 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 5327 return -EINVAL; 3754 return -EINVAL; 5328 3755 5329 ctx = apply_wqattrs_prepare(wq, attrs !! 3756 /* creating multiple pwqs breaks ordering guarantee */ 5330 if (IS_ERR(ctx)) !! 3757 if (!list_empty(&wq->pwqs)) { 5331 return PTR_ERR(ctx); !! 3758 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) >> 3759 return -EINVAL; >> 3760 >> 3761 wq->flags &= ~__WQ_ORDERED; >> 3762 } >> 3763 >> 3764 ctx = apply_wqattrs_prepare(wq, attrs); >> 3765 if (!ctx) >> 3766 return -ENOMEM; 5332 3767 5333 /* the ctx has been prepared successf 3768 /* the ctx has been prepared successfully, let's commit it */ 5334 apply_wqattrs_commit(ctx); 3769 apply_wqattrs_commit(ctx); 5335 apply_wqattrs_cleanup(ctx); 3770 apply_wqattrs_cleanup(ctx); 5336 3771 5337 return 0; 3772 return 0; 5338 } 3773 } 5339 3774 5340 /** 3775 /** 5341 * apply_workqueue_attrs - apply new workqueu 3776 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 5342 * @wq: the target workqueue 3777 * @wq: the target workqueue 5343 * @attrs: the workqueue_attrs to apply, allo 3778 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() 5344 * 3779 * 5345 * Apply @attrs to an unbound workqueue @wq. !! 3780 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA 5346 * a separate pwq to each CPU pod with possib !! 3781 * machines, this function maps a separate pwq to each NUMA node with 5347 * work items are affine to the pod it was is !! 3782 * possibles CPUs in @attrs->cpumask so that work items are affine to the 5348 * in-flight work items finish. Note that a w !! 3783 * NUMA node it was issued on. Older pwqs are released as in-flight work 5349 * itself back-to-back will stay on its curre !! 3784 * items finish. Note that a work item which repeatedly requeues itself >> 3785 * back-to-back will stay on its current pwq. 5350 * 3786 * 5351 * Performs GFP_KERNEL allocations. 3787 * Performs GFP_KERNEL allocations. 5352 * 3788 * 5353 * Return: 0 on success and -errno on failure 3789 * Return: 0 on success and -errno on failure. 5354 */ 3790 */ 5355 int apply_workqueue_attrs(struct workqueue_st 3791 int apply_workqueue_attrs(struct workqueue_struct *wq, 5356 const struct workqu 3792 const struct workqueue_attrs *attrs) 5357 { 3793 { 5358 int ret; 3794 int ret; 5359 3795 5360 mutex_lock(&wq_pool_mutex); !! 3796 apply_wqattrs_lock(); 5361 ret = apply_workqueue_attrs_locked(wq 3797 ret = apply_workqueue_attrs_locked(wq, attrs); 5362 mutex_unlock(&wq_pool_mutex); !! 3798 apply_wqattrs_unlock(); 5363 3799 5364 return ret; 3800 return ret; 5365 } 3801 } >> 3802 EXPORT_SYMBOL_GPL(apply_workqueue_attrs); 5366 3803 5367 /** 3804 /** 5368 * unbound_wq_update_pwq - update a pwq slot !! 3805 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug 5369 * @wq: the target workqueue 3806 * @wq: the target workqueue 5370 * @cpu: the CPU to update the pwq slot for !! 3807 * @cpu: the CPU coming up or going down >> 3808 * @online: whether @cpu is coming up or going down 5371 * 3809 * 5372 * This function is to be called from %CPU_DO 3810 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and 5373 * %CPU_DOWN_FAILED. @cpu is in the same pod !! 3811 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of 5374 * !! 3812 * @wq accordingly. 5375 * 3813 * 5376 * If pod affinity can't be adjusted due to m !! 3814 * If NUMA affinity can't be adjusted due to memory allocation failure, it 5377 * back to @wq->dfl_pwq which may not be opti !! 3815 * falls back to @wq->dfl_pwq which may not be optimal but is always 5378 * !! 3816 * correct. 5379 * Note that when the last allowed CPU of a p !! 3817 * 5380 * with a cpumask spanning multiple pods, the !! 3818 * Note that when the last allowed CPU of a NUMA node goes offline for a 5381 * executing the work items for the workqueue !! 3819 * workqueue with a cpumask spanning multiple nodes, the workers which were 5382 * may execute on any CPU. This is similar to !! 3820 * already executing the work items for the workqueue will lose their CPU 5383 * CPU_DOWN. If a workqueue user wants strict !! 3821 * affinity and may execute on any CPU. This is similar to how per-cpu 5384 * responsibility to flush the work item from !! 3822 * workqueues behave on CPU_DOWN. If a workqueue user wants strict >> 3823 * affinity, it's the user's responsibility to flush the work item from >> 3824 * CPU_DOWN_PREPARE. 5385 */ 3825 */ 5386 static void unbound_wq_update_pwq(struct work !! 3826 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, >> 3827 bool online) 5387 { 3828 { >> 3829 int node = cpu_to_node(cpu); >> 3830 int cpu_off = online ? -1 : cpu; 5388 struct pool_workqueue *old_pwq = NULL 3831 struct pool_workqueue *old_pwq = NULL, *pwq; 5389 struct workqueue_attrs *target_attrs; 3832 struct workqueue_attrs *target_attrs; >> 3833 cpumask_t *cpumask; 5390 3834 5391 lockdep_assert_held(&wq_pool_mutex); 3835 lockdep_assert_held(&wq_pool_mutex); 5392 3836 5393 if (!(wq->flags & WQ_UNBOUND) || wq-> !! 3837 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) || >> 3838 wq->unbound_attrs->no_numa) 5394 return; 3839 return; 5395 3840 5396 /* 3841 /* 5397 * We don't wanna alloc/free wq_attrs 3842 * We don't wanna alloc/free wq_attrs for each wq for each CPU. 5398 * Let's use a preallocated one. The 3843 * Let's use a preallocated one. The following buf is protected by 5399 * CPU hotplug exclusion. 3844 * CPU hotplug exclusion. 5400 */ 3845 */ 5401 target_attrs = unbound_wq_update_pwq_ !! 3846 target_attrs = wq_update_unbound_numa_attrs_buf; >> 3847 cpumask = target_attrs->cpumask; 5402 3848 5403 copy_workqueue_attrs(target_attrs, wq 3849 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); 5404 wqattrs_actualize_cpumask(target_attr !! 3850 pwq = unbound_pwq_by_node(wq, node); 5405 3851 5406 /* nothing to do if the target cpumas !! 3852 /* 5407 wq_calc_pod_cpumask(target_attrs, cpu !! 3853 * Let's determine what needs to be done. If the target cpumask is 5408 if (wqattrs_equal(target_attrs, unbou !! 3854 * different from the default pwq's, we need to compare it to @pwq's 5409 return; !! 3855 * and create a new one if they don't match. If the target cpumask >> 3856 * equals the default pwq's, the default pwq should be used. >> 3857 */ >> 3858 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) { >> 3859 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) >> 3860 return; >> 3861 } else { >> 3862 goto use_dfl_pwq; >> 3863 } 5410 3864 5411 /* create a new pwq */ 3865 /* create a new pwq */ 5412 pwq = alloc_unbound_pwq(wq, target_at 3866 pwq = alloc_unbound_pwq(wq, target_attrs); 5413 if (!pwq) { 3867 if (!pwq) { 5414 pr_warn("workqueue: allocatio !! 3868 pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n", 5415 wq->name); 3869 wq->name); 5416 goto use_dfl_pwq; 3870 goto use_dfl_pwq; 5417 } 3871 } 5418 3872 5419 /* Install the new pwq. */ 3873 /* Install the new pwq. */ 5420 mutex_lock(&wq->mutex); 3874 mutex_lock(&wq->mutex); 5421 old_pwq = install_unbound_pwq(wq, cpu !! 3875 old_pwq = numa_pwq_tbl_install(wq, node, pwq); 5422 goto out_unlock; 3876 goto out_unlock; 5423 3877 5424 use_dfl_pwq: 3878 use_dfl_pwq: 5425 mutex_lock(&wq->mutex); 3879 mutex_lock(&wq->mutex); 5426 pwq = unbound_pwq(wq, -1); !! 3880 spin_lock_irq(&wq->dfl_pwq->pool->lock); 5427 raw_spin_lock_irq(&pwq->pool->lock); !! 3881 get_pwq(wq->dfl_pwq); 5428 get_pwq(pwq); !! 3882 spin_unlock_irq(&wq->dfl_pwq->pool->lock); 5429 raw_spin_unlock_irq(&pwq->pool->lock) !! 3883 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); 5430 old_pwq = install_unbound_pwq(wq, cpu << 5431 out_unlock: 3884 out_unlock: 5432 mutex_unlock(&wq->mutex); 3885 mutex_unlock(&wq->mutex); 5433 put_pwq_unlocked(old_pwq); 3886 put_pwq_unlocked(old_pwq); 5434 } 3887 } 5435 3888 5436 static int alloc_and_link_pwqs(struct workque 3889 static int alloc_and_link_pwqs(struct workqueue_struct *wq) 5437 { 3890 { 5438 bool highpri = wq->flags & WQ_HIGHPRI 3891 bool highpri = wq->flags & WQ_HIGHPRI; 5439 int cpu, ret; 3892 int cpu, ret; 5440 3893 5441 lockdep_assert_held(&wq_pool_mutex); << 5442 << 5443 wq->cpu_pwq = alloc_percpu(struct poo << 5444 if (!wq->cpu_pwq) << 5445 goto enomem; << 5446 << 5447 if (!(wq->flags & WQ_UNBOUND)) { 3894 if (!(wq->flags & WQ_UNBOUND)) { 5448 struct worker_pool __percpu * !! 3895 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); 5449 !! 3896 if (!wq->cpu_pwqs) 5450 if (wq->flags & WQ_BH) !! 3897 return -ENOMEM; 5451 pools = bh_worker_poo << 5452 else << 5453 pools = cpu_worker_po << 5454 3898 5455 for_each_possible_cpu(cpu) { 3899 for_each_possible_cpu(cpu) { 5456 struct pool_workqueue !! 3900 struct pool_workqueue *pwq = 5457 struct worker_pool *p !! 3901 per_cpu_ptr(wq->cpu_pwqs, cpu); 5458 !! 3902 struct worker_pool *cpu_pools = 5459 pool = &(per_cpu_ptr( !! 3903 per_cpu(cpu_worker_pools, cpu); 5460 pwq_p = per_cpu_ptr(w << 5461 3904 5462 *pwq_p = kmem_cache_a !! 3905 init_pwq(pwq, wq, &cpu_pools[highpri]); 5463 << 5464 if (!*pwq_p) << 5465 goto enomem; << 5466 << 5467 init_pwq(*pwq_p, wq, << 5468 3906 5469 mutex_lock(&wq->mutex 3907 mutex_lock(&wq->mutex); 5470 link_pwq(*pwq_p); !! 3908 link_pwq(pwq); 5471 mutex_unlock(&wq->mut 3909 mutex_unlock(&wq->mutex); 5472 } 3910 } 5473 return 0; 3911 return 0; 5474 } !! 3912 } else if (wq->flags & __WQ_ORDERED) { 5475 !! 3913 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 5476 if (wq->flags & __WQ_ORDERED) { << 5477 struct pool_workqueue *dfl_pw << 5478 << 5479 ret = apply_workqueue_attrs_l << 5480 /* there should only be singl 3914 /* there should only be single pwq for ordering guarantee */ 5481 dfl_pwq = rcu_access_pointer( !! 3915 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 5482 WARN(!ret && (wq->pwqs.next ! !! 3916 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), 5483 wq->pwqs.prev ! << 5484 "ordering guarantee brok 3917 "ordering guarantee broken for workqueue %s\n", wq->name); >> 3918 return ret; 5485 } else { 3919 } else { 5486 ret = apply_workqueue_attrs_l !! 3920 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 5487 } 3921 } 5488 << 5489 return ret; << 5490 << 5491 enomem: << 5492 if (wq->cpu_pwq) { << 5493 for_each_possible_cpu(cpu) { << 5494 struct pool_workqueue << 5495 << 5496 if (pwq) << 5497 kmem_cache_fr << 5498 } << 5499 free_percpu(wq->cpu_pwq); << 5500 wq->cpu_pwq = NULL; << 5501 } << 5502 return -ENOMEM; << 5503 } 3922 } 5504 3923 5505 static int wq_clamp_max_active(int max_active 3924 static int wq_clamp_max_active(int max_active, unsigned int flags, 5506 const char *na 3925 const char *name) 5507 { 3926 { 5508 if (max_active < 1 || max_active > WQ !! 3927 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; >> 3928 >> 3929 if (max_active < 1 || max_active > lim) 5509 pr_warn("workqueue: max_activ 3930 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 5510 max_active, name, 1, !! 3931 max_active, name, 1, lim); 5511 3932 5512 return clamp_val(max_active, 1, WQ_MA !! 3933 return clamp_val(max_active, 1, lim); 5513 } 3934 } 5514 3935 5515 /* 3936 /* 5516 * Workqueues which may be used during memory 3937 * Workqueues which may be used during memory reclaim should have a rescuer 5517 * to guarantee forward progress. 3938 * to guarantee forward progress. 5518 */ 3939 */ 5519 static int init_rescuer(struct workqueue_stru 3940 static int init_rescuer(struct workqueue_struct *wq) 5520 { 3941 { 5521 struct worker *rescuer; 3942 struct worker *rescuer; 5522 char id_buf[WORKER_ID_LEN]; << 5523 int ret; 3943 int ret; 5524 3944 5525 lockdep_assert_held(&wq_pool_mutex); << 5526 << 5527 if (!(wq->flags & WQ_MEM_RECLAIM)) 3945 if (!(wq->flags & WQ_MEM_RECLAIM)) 5528 return 0; 3946 return 0; 5529 3947 5530 rescuer = alloc_worker(NUMA_NO_NODE); 3948 rescuer = alloc_worker(NUMA_NO_NODE); 5531 if (!rescuer) { !! 3949 if (!rescuer) 5532 pr_err("workqueue: Failed to << 5533 wq->name); << 5534 return -ENOMEM; 3950 return -ENOMEM; 5535 } << 5536 3951 5537 rescuer->rescue_wq = wq; 3952 rescuer->rescue_wq = wq; 5538 format_worker_id(id_buf, sizeof(id_bu !! 3953 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name); 5539 !! 3954 ret = PTR_ERR_OR_ZERO(rescuer->task); 5540 rescuer->task = kthread_create(rescue !! 3955 if (ret) { 5541 if (IS_ERR(rescuer->task)) { << 5542 ret = PTR_ERR(rescuer->task); << 5543 pr_err("workqueue: Failed to << 5544 wq->name, ERR_PTR(ret) << 5545 kfree(rescuer); 3956 kfree(rescuer); 5546 return ret; 3957 return ret; 5547 } 3958 } 5548 3959 5549 wq->rescuer = rescuer; 3960 wq->rescuer = rescuer; 5550 if (wq->flags & WQ_UNBOUND) !! 3961 kthread_bind_mask(rescuer->task, cpu_possible_mask); 5551 kthread_bind_mask(rescuer->ta << 5552 else << 5553 kthread_bind_mask(rescuer->ta << 5554 wake_up_process(rescuer->task); 3962 wake_up_process(rescuer->task); 5555 3963 5556 return 0; 3964 return 0; 5557 } 3965 } 5558 3966 5559 /** !! 3967 struct workqueue_struct *__alloc_workqueue_key(const char *fmt, 5560 * wq_adjust_max_active - update a wq's max_a !! 3968 unsigned int flags, 5561 * @wq: target workqueue !! 3969 int max_active, 5562 * !! 3970 struct lock_class_key *key, 5563 * If @wq isn't freezing, set @wq->max_active !! 3971 const char *lock_name, ...) 5564 * activate inactive work items accordingly. << 5565 * @wq->max_active to zero. << 5566 */ << 5567 static void wq_adjust_max_active(struct workq << 5568 { << 5569 bool activated; << 5570 int new_max, new_min; << 5571 << 5572 lockdep_assert_held(&wq->mutex); << 5573 << 5574 if ((wq->flags & WQ_FREEZABLE) && wor << 5575 new_max = 0; << 5576 new_min = 0; << 5577 } else { << 5578 new_max = wq->saved_max_activ << 5579 new_min = wq->saved_min_activ << 5580 } << 5581 << 5582 if (wq->max_active == new_max && wq-> << 5583 return; << 5584 << 5585 /* << 5586 * Update @wq->max/min_active and the << 5587 * active work items are allowed. Thi << 5588 * because new work items are always << 5589 * work items if there are any. << 5590 */ << 5591 WRITE_ONCE(wq->max_active, new_max); << 5592 WRITE_ONCE(wq->min_active, new_min); << 5593 << 5594 if (wq->flags & WQ_UNBOUND) << 5595 wq_update_node_max_active(wq, << 5596 << 5597 if (new_max == 0) << 5598 return; << 5599 << 5600 /* << 5601 * Round-robin through pwq's activati << 5602 * until max_active is filled. << 5603 */ << 5604 do { << 5605 struct pool_workqueue *pwq; << 5606 << 5607 activated = false; << 5608 for_each_pwq(pwq, wq) { << 5609 unsigned long irq_fla << 5610 << 5611 /* can be called duri << 5612 raw_spin_lock_irqsave << 5613 if (pwq_activate_firs << 5614 activated = t << 5615 kick_pool(pwq << 5616 } << 5617 raw_spin_unlock_irqre << 5618 } << 5619 } while (activated); << 5620 } << 5621 << 5622 __printf(1, 4) << 5623 struct workqueue_struct *alloc_workqueue(cons << 5624 unsi << 5625 int << 5626 { 3972 { >> 3973 size_t tbl_size = 0; 5627 va_list args; 3974 va_list args; 5628 struct workqueue_struct *wq; 3975 struct workqueue_struct *wq; 5629 size_t wq_size; !! 3976 struct pool_workqueue *pwq; 5630 int name_len; << 5631 3977 5632 if (flags & WQ_BH) { !! 3978 /* 5633 if (WARN_ON_ONCE(flags & ~__W !! 3979 * Unbound && max_active == 1 used to imply ordered, which is no 5634 return NULL; !! 3980 * longer the case on NUMA machines due to per-node pools. While 5635 if (WARN_ON_ONCE(max_active)) !! 3981 * alloc_ordered_workqueue() is the right way to create an ordered 5636 return NULL; !! 3982 * workqueue, keep the previous behavior to avoid subtle breakages 5637 } !! 3983 * on NUMA. >> 3984 */ >> 3985 if ((flags & WQ_UNBOUND) && max_active == 1) >> 3986 flags |= __WQ_ORDERED; 5638 3987 5639 /* see the comment above the definiti 3988 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 5640 if ((flags & WQ_POWER_EFFICIENT) && w 3989 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 5641 flags |= WQ_UNBOUND; 3990 flags |= WQ_UNBOUND; 5642 3991 5643 /* allocate wq and format name */ 3992 /* allocate wq and format name */ 5644 if (flags & WQ_UNBOUND) 3993 if (flags & WQ_UNBOUND) 5645 wq_size = struct_size(wq, nod !! 3994 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]); 5646 else << 5647 wq_size = sizeof(*wq); << 5648 3995 5649 wq = kzalloc(wq_size, GFP_KERNEL); !! 3996 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL); 5650 if (!wq) 3997 if (!wq) 5651 return NULL; 3998 return NULL; 5652 3999 5653 if (flags & WQ_UNBOUND) { 4000 if (flags & WQ_UNBOUND) { 5654 wq->unbound_attrs = alloc_wor !! 4001 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); 5655 if (!wq->unbound_attrs) 4002 if (!wq->unbound_attrs) 5656 goto err_free_wq; 4003 goto err_free_wq; 5657 } 4004 } 5658 4005 5659 va_start(args, max_active); !! 4006 va_start(args, lock_name); 5660 name_len = vsnprintf(wq->name, sizeof !! 4007 vsnprintf(wq->name, sizeof(wq->name), fmt, args); 5661 va_end(args); 4008 va_end(args); 5662 4009 5663 if (name_len >= WQ_NAME_LEN) !! 4010 max_active = max_active ?: WQ_DFL_ACTIVE; 5664 pr_warn_once("workqueue: name !! 4011 max_active = wq_clamp_max_active(max_active, flags, wq->name); 5665 wq->name); << 5666 << 5667 if (flags & WQ_BH) { << 5668 /* << 5669 * BH workqueues always share << 5670 * and don't impose any max_a << 5671 */ << 5672 max_active = INT_MAX; << 5673 } else { << 5674 max_active = max_active ?: WQ << 5675 max_active = wq_clamp_max_act << 5676 } << 5677 4012 5678 /* init wq */ 4013 /* init wq */ 5679 wq->flags = flags; 4014 wq->flags = flags; 5680 wq->max_active = max_active; !! 4015 wq->saved_max_active = max_active; 5681 wq->min_active = min(max_active, WQ_D << 5682 wq->saved_max_active = wq->max_active << 5683 wq->saved_min_active = wq->min_active << 5684 mutex_init(&wq->mutex); 4016 mutex_init(&wq->mutex); 5685 atomic_set(&wq->nr_pwqs_to_flush, 0); 4017 atomic_set(&wq->nr_pwqs_to_flush, 0); 5686 INIT_LIST_HEAD(&wq->pwqs); 4018 INIT_LIST_HEAD(&wq->pwqs); 5687 INIT_LIST_HEAD(&wq->flusher_queue); 4019 INIT_LIST_HEAD(&wq->flusher_queue); 5688 INIT_LIST_HEAD(&wq->flusher_overflow) 4020 INIT_LIST_HEAD(&wq->flusher_overflow); 5689 INIT_LIST_HEAD(&wq->maydays); 4021 INIT_LIST_HEAD(&wq->maydays); 5690 4022 5691 wq_init_lockdep(wq); !! 4023 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 5692 INIT_LIST_HEAD(&wq->list); 4024 INIT_LIST_HEAD(&wq->list); 5693 4025 5694 if (flags & WQ_UNBOUND) { !! 4026 if (alloc_and_link_pwqs(wq) < 0) 5695 if (alloc_node_nr_active(wq-> !! 4027 goto err_free_wq; 5696 goto err_unreg_lockde !! 4028 5697 } !! 4029 if (wq_online && init_rescuer(wq) < 0) >> 4030 goto err_destroy; >> 4031 >> 4032 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) >> 4033 goto err_destroy; 5698 4034 5699 /* 4035 /* 5700 * wq_pool_mutex protects the workque !! 4036 * wq_pool_mutex protects global freeze state and workqueues list. 5701 * and the global freeze state. !! 4037 * Grab it, adjust max_active and add the new @wq to workqueues >> 4038 * list. 5702 */ 4039 */ 5703 apply_wqattrs_lock(); !! 4040 mutex_lock(&wq_pool_mutex); 5704 << 5705 if (alloc_and_link_pwqs(wq) < 0) << 5706 goto err_unlock_free_node_nr_ << 5707 4041 5708 mutex_lock(&wq->mutex); 4042 mutex_lock(&wq->mutex); 5709 wq_adjust_max_active(wq); !! 4043 for_each_pwq(pwq, wq) >> 4044 pwq_adjust_max_active(pwq); 5710 mutex_unlock(&wq->mutex); 4045 mutex_unlock(&wq->mutex); 5711 4046 5712 list_add_tail_rcu(&wq->list, &workque 4047 list_add_tail_rcu(&wq->list, &workqueues); 5713 4048 5714 if (wq_online && init_rescuer(wq) < 0 !! 4049 mutex_unlock(&wq_pool_mutex); 5715 goto err_unlock_destroy; << 5716 << 5717 apply_wqattrs_unlock(); << 5718 << 5719 if ((wq->flags & WQ_SYSFS) && workque << 5720 goto err_destroy; << 5721 4050 5722 return wq; 4051 return wq; 5723 4052 5724 err_unlock_free_node_nr_active: << 5725 apply_wqattrs_unlock(); << 5726 /* << 5727 * Failed alloc_and_link_pwqs() may l << 5728 * flushing the pwq_release_worker en << 5729 * completes before calling kfree(wq) << 5730 */ << 5731 if (wq->flags & WQ_UNBOUND) { << 5732 kthread_flush_worker(pwq_rele << 5733 free_node_nr_active(wq->node_ << 5734 } << 5735 err_unreg_lockdep: << 5736 wq_unregister_lockdep(wq); << 5737 wq_free_lockdep(wq); << 5738 err_free_wq: 4053 err_free_wq: 5739 free_workqueue_attrs(wq->unbound_attr 4054 free_workqueue_attrs(wq->unbound_attrs); 5740 kfree(wq); 4055 kfree(wq); 5741 return NULL; 4056 return NULL; 5742 err_unlock_destroy: << 5743 apply_wqattrs_unlock(); << 5744 err_destroy: 4057 err_destroy: 5745 destroy_workqueue(wq); 4058 destroy_workqueue(wq); 5746 return NULL; 4059 return NULL; 5747 } 4060 } 5748 EXPORT_SYMBOL_GPL(alloc_workqueue); !! 4061 EXPORT_SYMBOL_GPL(__alloc_workqueue_key); 5749 << 5750 static bool pwq_busy(struct pool_workqueue *p << 5751 { << 5752 int i; << 5753 << 5754 for (i = 0; i < WORK_NR_COLORS; i++) << 5755 if (pwq->nr_in_flight[i]) << 5756 return true; << 5757 << 5758 if ((pwq != rcu_access_pointer(pwq->w << 5759 return true; << 5760 if (!pwq_is_empty(pwq)) << 5761 return true; << 5762 << 5763 return false; << 5764 } << 5765 4062 5766 /** 4063 /** 5767 * destroy_workqueue - safely terminate a wor 4064 * destroy_workqueue - safely terminate a workqueue 5768 * @wq: target workqueue 4065 * @wq: target workqueue 5769 * 4066 * 5770 * Safely destroy a workqueue. All work curre 4067 * Safely destroy a workqueue. All work currently pending will be done first. 5771 */ 4068 */ 5772 void destroy_workqueue(struct workqueue_struc 4069 void destroy_workqueue(struct workqueue_struct *wq) 5773 { 4070 { 5774 struct pool_workqueue *pwq; 4071 struct pool_workqueue *pwq; 5775 int cpu; !! 4072 int node; 5776 << 5777 /* << 5778 * Remove it from sysfs first so that << 5779 * lead to sysfs name conflicts. << 5780 */ << 5781 workqueue_sysfs_unregister(wq); << 5782 << 5783 /* mark the workqueue destruction is << 5784 mutex_lock(&wq->mutex); << 5785 wq->flags |= __WQ_DESTROYING; << 5786 mutex_unlock(&wq->mutex); << 5787 4073 5788 /* drain it before proceeding with de 4074 /* drain it before proceeding with destruction */ 5789 drain_workqueue(wq); 4075 drain_workqueue(wq); 5790 4076 5791 /* kill rescuer, if sanity checks fai !! 4077 /* sanity checks */ 5792 if (wq->rescuer) { << 5793 struct worker *rescuer = wq-> << 5794 << 5795 /* this prevents new queueing << 5796 raw_spin_lock_irq(&wq_mayday_ << 5797 wq->rescuer = NULL; << 5798 raw_spin_unlock_irq(&wq_mayda << 5799 << 5800 /* rescuer will empty maydays << 5801 kthread_stop(rescuer->task); << 5802 kfree(rescuer); << 5803 } << 5804 << 5805 /* << 5806 * Sanity checks - grab all the locks << 5807 * in-flight operations which may do << 5808 */ << 5809 mutex_lock(&wq_pool_mutex); << 5810 mutex_lock(&wq->mutex); 4078 mutex_lock(&wq->mutex); 5811 for_each_pwq(pwq, wq) { 4079 for_each_pwq(pwq, wq) { 5812 raw_spin_lock_irq(&pwq->pool- !! 4080 int i; 5813 if (WARN_ON(pwq_busy(pwq))) { !! 4081 5814 pr_warn("%s: %s has t !! 4082 for (i = 0; i < WORK_NR_COLORS; i++) { 5815 __func__, wq- !! 4083 if (WARN_ON(pwq->nr_in_flight[i])) { 5816 show_pwq(pwq); !! 4084 mutex_unlock(&wq->mutex); 5817 raw_spin_unlock_irq(& !! 4085 show_workqueue_state(); >> 4086 return; >> 4087 } >> 4088 } >> 4089 >> 4090 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) || >> 4091 WARN_ON(pwq->nr_active) || >> 4092 WARN_ON(!list_empty(&pwq->delayed_works))) { 5818 mutex_unlock(&wq->mut 4093 mutex_unlock(&wq->mutex); 5819 mutex_unlock(&wq_pool !! 4094 show_workqueue_state(); 5820 show_one_workqueue(wq << 5821 return; 4095 return; 5822 } 4096 } 5823 raw_spin_unlock_irq(&pwq->poo << 5824 } 4097 } 5825 mutex_unlock(&wq->mutex); 4098 mutex_unlock(&wq->mutex); 5826 4099 5827 /* 4100 /* 5828 * wq list is used to freeze wq, remo 4101 * wq list is used to freeze wq, remove from list after 5829 * flushing is complete in case freez 4102 * flushing is complete in case freeze races us. 5830 */ 4103 */ >> 4104 mutex_lock(&wq_pool_mutex); 5831 list_del_rcu(&wq->list); 4105 list_del_rcu(&wq->list); 5832 mutex_unlock(&wq_pool_mutex); 4106 mutex_unlock(&wq_pool_mutex); 5833 4107 5834 /* !! 4108 workqueue_sysfs_unregister(wq); 5835 * We're the sole accessor of @wq. Di << 5836 * to put the base refs. @wq will be << 5837 * pwq_put. RCU read lock prevents @w << 5838 */ << 5839 rcu_read_lock(); << 5840 4109 5841 for_each_possible_cpu(cpu) { !! 4110 if (wq->rescuer) 5842 put_pwq_unlocked(unbound_pwq( !! 4111 kthread_stop(wq->rescuer->task); 5843 RCU_INIT_POINTER(*unbound_pwq << 5844 } << 5845 4112 5846 put_pwq_unlocked(unbound_pwq(wq, -1)) !! 4113 if (!(wq->flags & WQ_UNBOUND)) { 5847 RCU_INIT_POINTER(*unbound_pwq_slot(wq !! 4114 /* >> 4115 * The base ref is never dropped on per-cpu pwqs. Directly >> 4116 * schedule RCU free. >> 4117 */ >> 4118 call_rcu_sched(&wq->rcu, rcu_free_wq); >> 4119 } else { >> 4120 /* >> 4121 * We're the sole accessor of @wq at this point. Directly >> 4122 * access numa_pwq_tbl[] and dfl_pwq to put the base refs. >> 4123 * @wq will be freed when the last pwq is released. >> 4124 */ >> 4125 for_each_node(node) { >> 4126 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); >> 4127 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL); >> 4128 put_pwq_unlocked(pwq); >> 4129 } 5848 4130 5849 rcu_read_unlock(); !! 4131 /* >> 4132 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is >> 4133 * put. Don't access it afterwards. >> 4134 */ >> 4135 pwq = wq->dfl_pwq; >> 4136 wq->dfl_pwq = NULL; >> 4137 put_pwq_unlocked(pwq); >> 4138 } 5850 } 4139 } 5851 EXPORT_SYMBOL_GPL(destroy_workqueue); 4140 EXPORT_SYMBOL_GPL(destroy_workqueue); 5852 4141 5853 /** 4142 /** 5854 * workqueue_set_max_active - adjust max_acti 4143 * workqueue_set_max_active - adjust max_active of a workqueue 5855 * @wq: target workqueue 4144 * @wq: target workqueue 5856 * @max_active: new max_active value. 4145 * @max_active: new max_active value. 5857 * 4146 * 5858 * Set max_active of @wq to @max_active. See !! 4147 * Set max_active of @wq to @max_active. 5859 * comment. << 5860 * 4148 * 5861 * CONTEXT: 4149 * CONTEXT: 5862 * Don't call from IRQ context. 4150 * Don't call from IRQ context. 5863 */ 4151 */ 5864 void workqueue_set_max_active(struct workqueu 4152 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 5865 { 4153 { 5866 /* max_active doesn't mean anything f !! 4154 struct pool_workqueue *pwq; 5867 if (WARN_ON(wq->flags & WQ_BH)) !! 4155 5868 return; << 5869 /* disallow meddling with max_active 4156 /* disallow meddling with max_active for ordered workqueues */ 5870 if (WARN_ON(wq->flags & __WQ_ORDERED) !! 4157 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 5871 return; 4158 return; 5872 4159 5873 max_active = wq_clamp_max_active(max_ 4160 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 5874 4161 5875 mutex_lock(&wq->mutex); 4162 mutex_lock(&wq->mutex); 5876 4163 >> 4164 wq->flags &= ~__WQ_ORDERED; 5877 wq->saved_max_active = max_active; 4165 wq->saved_max_active = max_active; 5878 if (wq->flags & WQ_UNBOUND) << 5879 wq->saved_min_active = min(wq << 5880 4166 5881 wq_adjust_max_active(wq); !! 4167 for_each_pwq(pwq, wq) >> 4168 pwq_adjust_max_active(pwq); 5882 4169 5883 mutex_unlock(&wq->mutex); 4170 mutex_unlock(&wq->mutex); 5884 } 4171 } 5885 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4172 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 5886 4173 5887 /** 4174 /** 5888 * workqueue_set_min_active - adjust min_acti << 5889 * @wq: target unbound workqueue << 5890 * @min_active: new min_active value << 5891 * << 5892 * Set min_active of an unbound workqueue. Un << 5893 * unbound workqueue is not guaranteed to be << 5894 * interdependent work items. Instead, an unb << 5895 * able to process min_active number of inter << 5896 * %WQ_DFL_MIN_ACTIVE by default. << 5897 * << 5898 * Use this function to adjust the min_active << 5899 * max_active. << 5900 */ << 5901 void workqueue_set_min_active(struct workqueu << 5902 { << 5903 /* min_active is only meaningful for << 5904 if (WARN_ON((wq->flags & (WQ_BH | WQ_ << 5905 WQ_UNBOUND)) << 5906 return; << 5907 << 5908 mutex_lock(&wq->mutex); << 5909 wq->saved_min_active = clamp(min_acti << 5910 wq_adjust_max_active(wq); << 5911 mutex_unlock(&wq->mutex); << 5912 } << 5913 << 5914 /** << 5915 * current_work - retrieve %current task's wo 4175 * current_work - retrieve %current task's work struct 5916 * 4176 * 5917 * Determine if %current task is a workqueue 4177 * Determine if %current task is a workqueue worker and what it's working on. 5918 * Useful to find out the context that the %c 4178 * Useful to find out the context that the %current task is running in. 5919 * 4179 * 5920 * Return: work struct if %current task is a 4180 * Return: work struct if %current task is a workqueue worker, %NULL otherwise. 5921 */ 4181 */ 5922 struct work_struct *current_work(void) 4182 struct work_struct *current_work(void) 5923 { 4183 { 5924 struct worker *worker = current_wq_wo 4184 struct worker *worker = current_wq_worker(); 5925 4185 5926 return worker ? worker->current_work 4186 return worker ? worker->current_work : NULL; 5927 } 4187 } 5928 EXPORT_SYMBOL(current_work); 4188 EXPORT_SYMBOL(current_work); 5929 4189 5930 /** 4190 /** 5931 * current_is_workqueue_rescuer - is %current 4191 * current_is_workqueue_rescuer - is %current workqueue rescuer? 5932 * 4192 * 5933 * Determine whether %current is a workqueue 4193 * Determine whether %current is a workqueue rescuer. Can be used from 5934 * work functions to determine whether it's b 4194 * work functions to determine whether it's being run off the rescuer task. 5935 * 4195 * 5936 * Return: %true if %current is a workqueue r 4196 * Return: %true if %current is a workqueue rescuer. %false otherwise. 5937 */ 4197 */ 5938 bool current_is_workqueue_rescuer(void) 4198 bool current_is_workqueue_rescuer(void) 5939 { 4199 { 5940 struct worker *worker = current_wq_wo 4200 struct worker *worker = current_wq_worker(); 5941 4201 5942 return worker && worker->rescue_wq; 4202 return worker && worker->rescue_wq; 5943 } 4203 } 5944 4204 5945 /** 4205 /** 5946 * workqueue_congested - test whether a workq 4206 * workqueue_congested - test whether a workqueue is congested 5947 * @cpu: CPU in question 4207 * @cpu: CPU in question 5948 * @wq: target workqueue 4208 * @wq: target workqueue 5949 * 4209 * 5950 * Test whether @wq's cpu workqueue for @cpu 4210 * Test whether @wq's cpu workqueue for @cpu is congested. There is 5951 * no synchronization around this function an 4211 * no synchronization around this function and the test result is 5952 * unreliable and only useful as advisory hin 4212 * unreliable and only useful as advisory hints or for debugging. 5953 * 4213 * 5954 * If @cpu is WORK_CPU_UNBOUND, the test is p 4214 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. 5955 * !! 4215 * Note that both per-cpu and unbound workqueues may be associated with 5956 * With the exception of ordered workqueues, !! 4216 * multiple pool_workqueues which have separate congested states. A 5957 * pool_workqueues, each with its own congest !! 4217 * workqueue being congested on one CPU doesn't mean the workqueue is also 5958 * congested on one CPU doesn't mean that the !! 4218 * contested on other CPUs / NUMA nodes. 5959 * other CPUs. << 5960 * 4219 * 5961 * Return: 4220 * Return: 5962 * %true if congested, %false otherwise. 4221 * %true if congested, %false otherwise. 5963 */ 4222 */ 5964 bool workqueue_congested(int cpu, struct work 4223 bool workqueue_congested(int cpu, struct workqueue_struct *wq) 5965 { 4224 { 5966 struct pool_workqueue *pwq; 4225 struct pool_workqueue *pwq; 5967 bool ret; 4226 bool ret; 5968 4227 5969 rcu_read_lock(); !! 4228 rcu_read_lock_sched(); 5970 preempt_disable(); << 5971 4229 5972 if (cpu == WORK_CPU_UNBOUND) 4230 if (cpu == WORK_CPU_UNBOUND) 5973 cpu = smp_processor_id(); 4231 cpu = smp_processor_id(); 5974 4232 5975 pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); !! 4233 if (!(wq->flags & WQ_UNBOUND)) 5976 ret = !list_empty(&pwq->inactive_work !! 4234 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); >> 4235 else >> 4236 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); 5977 4237 5978 preempt_enable(); !! 4238 ret = !list_empty(&pwq->delayed_works); 5979 rcu_read_unlock(); !! 4239 rcu_read_unlock_sched(); 5980 4240 5981 return ret; 4241 return ret; 5982 } 4242 } 5983 EXPORT_SYMBOL_GPL(workqueue_congested); 4243 EXPORT_SYMBOL_GPL(workqueue_congested); 5984 4244 5985 /** 4245 /** 5986 * work_busy - test whether a work is current 4246 * work_busy - test whether a work is currently pending or running 5987 * @work: the work to be tested 4247 * @work: the work to be tested 5988 * 4248 * 5989 * Test whether @work is currently pending or 4249 * Test whether @work is currently pending or running. There is no 5990 * synchronization around this function and t 4250 * synchronization around this function and the test result is 5991 * unreliable and only useful as advisory hin 4251 * unreliable and only useful as advisory hints or for debugging. 5992 * 4252 * 5993 * Return: 4253 * Return: 5994 * OR'd bitmask of WORK_BUSY_* bits. 4254 * OR'd bitmask of WORK_BUSY_* bits. 5995 */ 4255 */ 5996 unsigned int work_busy(struct work_struct *wo 4256 unsigned int work_busy(struct work_struct *work) 5997 { 4257 { 5998 struct worker_pool *pool; 4258 struct worker_pool *pool; 5999 unsigned long irq_flags; !! 4259 unsigned long flags; 6000 unsigned int ret = 0; 4260 unsigned int ret = 0; 6001 4261 6002 if (work_pending(work)) 4262 if (work_pending(work)) 6003 ret |= WORK_BUSY_PENDING; 4263 ret |= WORK_BUSY_PENDING; 6004 4264 6005 rcu_read_lock(); !! 4265 local_irq_save(flags); 6006 pool = get_work_pool(work); 4266 pool = get_work_pool(work); 6007 if (pool) { 4267 if (pool) { 6008 raw_spin_lock_irqsave(&pool-> !! 4268 spin_lock(&pool->lock); 6009 if (find_worker_executing_wor 4269 if (find_worker_executing_work(pool, work)) 6010 ret |= WORK_BUSY_RUNN 4270 ret |= WORK_BUSY_RUNNING; 6011 raw_spin_unlock_irqrestore(&p !! 4271 spin_unlock(&pool->lock); 6012 } 4272 } 6013 rcu_read_unlock(); !! 4273 local_irq_restore(flags); 6014 4274 6015 return ret; 4275 return ret; 6016 } 4276 } 6017 EXPORT_SYMBOL_GPL(work_busy); 4277 EXPORT_SYMBOL_GPL(work_busy); 6018 4278 6019 /** 4279 /** 6020 * set_worker_desc - set description for the 4280 * set_worker_desc - set description for the current work item 6021 * @fmt: printf-style format string 4281 * @fmt: printf-style format string 6022 * @...: arguments for the format string 4282 * @...: arguments for the format string 6023 * 4283 * 6024 * This function can be called by a running w 4284 * This function can be called by a running work function to describe what 6025 * the work item is about. If the worker tas 4285 * the work item is about. If the worker task gets dumped, this 6026 * information will be printed out together t 4286 * information will be printed out together to help debugging. The 6027 * description can be at most WORKER_DESC_LEN 4287 * description can be at most WORKER_DESC_LEN including the trailing '\0'. 6028 */ 4288 */ 6029 void set_worker_desc(const char *fmt, ...) 4289 void set_worker_desc(const char *fmt, ...) 6030 { 4290 { 6031 struct worker *worker = current_wq_wo 4291 struct worker *worker = current_wq_worker(); 6032 va_list args; 4292 va_list args; 6033 4293 6034 if (worker) { 4294 if (worker) { 6035 va_start(args, fmt); 4295 va_start(args, fmt); 6036 vsnprintf(worker->desc, sizeo 4296 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 6037 va_end(args); 4297 va_end(args); >> 4298 worker->desc_valid = true; 6038 } 4299 } 6039 } 4300 } 6040 EXPORT_SYMBOL_GPL(set_worker_desc); << 6041 4301 6042 /** 4302 /** 6043 * print_worker_info - print out worker infor 4303 * print_worker_info - print out worker information and description 6044 * @log_lvl: the log level to use when printi 4304 * @log_lvl: the log level to use when printing 6045 * @task: target task 4305 * @task: target task 6046 * 4306 * 6047 * If @task is a worker and currently executi 4307 * If @task is a worker and currently executing a work item, print out the 6048 * name of the workqueue being serviced and w 4308 * name of the workqueue being serviced and worker description set with 6049 * set_worker_desc() by the currently executi 4309 * set_worker_desc() by the currently executing work item. 6050 * 4310 * 6051 * This function can be safely called on any 4311 * This function can be safely called on any task as long as the 6052 * task_struct itself is accessible. While s 4312 * task_struct itself is accessible. While safe, this function isn't 6053 * synchronized and may print out mixups or g 4313 * synchronized and may print out mixups or garbages of limited length. 6054 */ 4314 */ 6055 void print_worker_info(const char *log_lvl, s 4315 void print_worker_info(const char *log_lvl, struct task_struct *task) 6056 { 4316 { 6057 work_func_t *fn = NULL; 4317 work_func_t *fn = NULL; 6058 char name[WQ_NAME_LEN] = { }; 4318 char name[WQ_NAME_LEN] = { }; 6059 char desc[WORKER_DESC_LEN] = { }; 4319 char desc[WORKER_DESC_LEN] = { }; 6060 struct pool_workqueue *pwq = NULL; 4320 struct pool_workqueue *pwq = NULL; 6061 struct workqueue_struct *wq = NULL; 4321 struct workqueue_struct *wq = NULL; >> 4322 bool desc_valid = false; 6062 struct worker *worker; 4323 struct worker *worker; 6063 4324 6064 if (!(task->flags & PF_WQ_WORKER)) 4325 if (!(task->flags & PF_WQ_WORKER)) 6065 return; 4326 return; 6066 4327 6067 /* 4328 /* 6068 * This function is called without an 4329 * This function is called without any synchronization and @task 6069 * could be in any state. Be careful 4330 * could be in any state. Be careful with dereferences. 6070 */ 4331 */ 6071 worker = kthread_probe_data(task); 4332 worker = kthread_probe_data(task); 6072 4333 6073 /* 4334 /* 6074 * Carefully copy the associated work !! 4335 * Carefully copy the associated workqueue's workfn and name. Keep 6075 * Keep the original last '\0' in cas !! 4336 * the original last '\0' in case the original contains garbage. 6076 */ 4337 */ 6077 copy_from_kernel_nofault(&fn, &worker !! 4338 probe_kernel_read(&fn, &worker->current_func, sizeof(fn)); 6078 copy_from_kernel_nofault(&pwq, &worke !! 4339 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq)); 6079 copy_from_kernel_nofault(&wq, &pwq->w !! 4340 probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); 6080 copy_from_kernel_nofault(name, wq->na !! 4341 probe_kernel_read(name, wq->name, sizeof(name) - 1); 6081 copy_from_kernel_nofault(desc, worker !! 4342 >> 4343 /* copy worker description */ >> 4344 probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid)); >> 4345 if (desc_valid) >> 4346 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1); 6082 4347 6083 if (fn || name[0] || desc[0]) { 4348 if (fn || name[0] || desc[0]) { 6084 printk("%sWorkqueue: %s %ps", !! 4349 printk("%sWorkqueue: %s %pf", log_lvl, name, fn); 6085 if (strcmp(name, desc)) !! 4350 if (desc[0]) 6086 pr_cont(" (%s)", desc 4351 pr_cont(" (%s)", desc); 6087 pr_cont("\n"); 4352 pr_cont("\n"); 6088 } 4353 } 6089 } 4354 } 6090 4355 6091 static void pr_cont_pool_info(struct worker_p 4356 static void pr_cont_pool_info(struct worker_pool *pool) 6092 { 4357 { 6093 pr_cont(" cpus=%*pbl", nr_cpumask_bit 4358 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); 6094 if (pool->node != NUMA_NO_NODE) 4359 if (pool->node != NUMA_NO_NODE) 6095 pr_cont(" node=%d", pool->nod 4360 pr_cont(" node=%d", pool->node); 6096 pr_cont(" flags=0x%x", pool->flags); !! 4361 pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); 6097 if (pool->flags & POOL_BH) << 6098 pr_cont(" bh%s", << 6099 pool->attrs->nice == << 6100 else << 6101 pr_cont(" nice=%d", pool->att << 6102 } 4362 } 6103 4363 6104 static void pr_cont_worker_id(struct worker * !! 4364 static void pr_cont_work(bool comma, struct work_struct *work) 6105 { << 6106 struct worker_pool *pool = worker->po << 6107 << 6108 if (pool->flags & WQ_BH) << 6109 pr_cont("bh%s", << 6110 pool->attrs->nice == << 6111 else << 6112 pr_cont("%d%s", task_pid_nr(w << 6113 worker->rescue_wq ? " << 6114 } << 6115 << 6116 struct pr_cont_work_struct { << 6117 bool comma; << 6118 work_func_t func; << 6119 long ctr; << 6120 }; << 6121 << 6122 static void pr_cont_work_flush(bool comma, wo << 6123 { << 6124 if (!pcwsp->ctr) << 6125 goto out_record; << 6126 if (func == pcwsp->func) { << 6127 pcwsp->ctr++; << 6128 return; << 6129 } << 6130 if (pcwsp->ctr == 1) << 6131 pr_cont("%s %ps", pcwsp->comm << 6132 else << 6133 pr_cont("%s %ld*%ps", pcwsp-> << 6134 pcwsp->ctr = 0; << 6135 out_record: << 6136 if ((long)func == -1L) << 6137 return; << 6138 pcwsp->comma = comma; << 6139 pcwsp->func = func; << 6140 pcwsp->ctr = 1; << 6141 } << 6142 << 6143 static void pr_cont_work(bool comma, struct w << 6144 { 4365 { 6145 if (work->func == wq_barrier_func) { 4366 if (work->func == wq_barrier_func) { 6146 struct wq_barrier *barr; 4367 struct wq_barrier *barr; 6147 4368 6148 barr = container_of(work, str 4369 barr = container_of(work, struct wq_barrier, work); 6149 4370 6150 pr_cont_work_flush(comma, (wo << 6151 pr_cont("%s BAR(%d)", comma ? 4371 pr_cont("%s BAR(%d)", comma ? "," : "", 6152 task_pid_nr(barr->tas 4372 task_pid_nr(barr->task)); 6153 } else { 4373 } else { 6154 if (!comma) !! 4374 pr_cont("%s %pf", comma ? "," : "", work->func); 6155 pr_cont_work_flush(co << 6156 pr_cont_work_flush(comma, wor << 6157 } 4375 } 6158 } 4376 } 6159 4377 6160 static void show_pwq(struct pool_workqueue *p 4378 static void show_pwq(struct pool_workqueue *pwq) 6161 { 4379 { 6162 struct pr_cont_work_struct pcws = { . << 6163 struct worker_pool *pool = pwq->pool; 4380 struct worker_pool *pool = pwq->pool; 6164 struct work_struct *work; 4381 struct work_struct *work; 6165 struct worker *worker; 4382 struct worker *worker; 6166 bool has_in_flight = false, has_pendi 4383 bool has_in_flight = false, has_pending = false; 6167 int bkt; 4384 int bkt; 6168 4385 6169 pr_info(" pwq %d:", pool->id); 4386 pr_info(" pwq %d:", pool->id); 6170 pr_cont_pool_info(pool); 4387 pr_cont_pool_info(pool); 6171 4388 6172 pr_cont(" active=%d refcnt=%d%s\n", !! 4389 pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active, 6173 pwq->nr_active, pwq->refcnt, << 6174 !list_empty(&pwq->mayday_node 4390 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); 6175 4391 6176 hash_for_each(pool->busy_hash, bkt, w 4392 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 6177 if (worker->current_pwq == pw 4393 if (worker->current_pwq == pwq) { 6178 has_in_flight = true; 4394 has_in_flight = true; 6179 break; 4395 break; 6180 } 4396 } 6181 } 4397 } 6182 if (has_in_flight) { 4398 if (has_in_flight) { 6183 bool comma = false; 4399 bool comma = false; 6184 4400 6185 pr_info(" in-flight:"); 4401 pr_info(" in-flight:"); 6186 hash_for_each(pool->busy_hash 4402 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 6187 if (worker->current_p 4403 if (worker->current_pwq != pwq) 6188 continue; 4404 continue; 6189 4405 6190 pr_cont(" %s", comma !! 4406 pr_cont("%s %d%s:%pf", comma ? "," : "", 6191 pr_cont_worker_id(wor !! 4407 task_pid_nr(worker->task), 6192 pr_cont(":%ps", worke !! 4408 worker == pwq->wq->rescuer ? "(RESCUER)" : "", >> 4409 worker->current_func); 6193 list_for_each_entry(w 4410 list_for_each_entry(work, &worker->scheduled, entry) 6194 pr_cont_work( !! 4411 pr_cont_work(false, work); 6195 pr_cont_work_flush(co << 6196 comma = true; 4412 comma = true; 6197 } 4413 } 6198 pr_cont("\n"); 4414 pr_cont("\n"); 6199 } 4415 } 6200 4416 6201 list_for_each_entry(work, &pool->work 4417 list_for_each_entry(work, &pool->worklist, entry) { 6202 if (get_work_pwq(work) == pwq 4418 if (get_work_pwq(work) == pwq) { 6203 has_pending = true; 4419 has_pending = true; 6204 break; 4420 break; 6205 } 4421 } 6206 } 4422 } 6207 if (has_pending) { 4423 if (has_pending) { 6208 bool comma = false; 4424 bool comma = false; 6209 4425 6210 pr_info(" pending:"); 4426 pr_info(" pending:"); 6211 list_for_each_entry(work, &po 4427 list_for_each_entry(work, &pool->worklist, entry) { 6212 if (get_work_pwq(work 4428 if (get_work_pwq(work) != pwq) 6213 continue; 4429 continue; 6214 4430 6215 pr_cont_work(comma, w !! 4431 pr_cont_work(comma, work); 6216 comma = !(*work_data_ 4432 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 6217 } 4433 } 6218 pr_cont_work_flush(comma, (wo << 6219 pr_cont("\n"); 4434 pr_cont("\n"); 6220 } 4435 } 6221 4436 6222 if (!list_empty(&pwq->inactive_works) !! 4437 if (!list_empty(&pwq->delayed_works)) { 6223 bool comma = false; 4438 bool comma = false; 6224 4439 6225 pr_info(" inactive:"); !! 4440 pr_info(" delayed:"); 6226 list_for_each_entry(work, &pw !! 4441 list_for_each_entry(work, &pwq->delayed_works, entry) { 6227 pr_cont_work(comma, w !! 4442 pr_cont_work(comma, work); 6228 comma = !(*work_data_ 4443 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 6229 } 4444 } 6230 pr_cont_work_flush(comma, (wo << 6231 pr_cont("\n"); 4445 pr_cont("\n"); 6232 } 4446 } 6233 } 4447 } 6234 4448 6235 /** 4449 /** 6236 * show_one_workqueue - dump state of specifi !! 4450 * show_workqueue_state - dump workqueue state 6237 * @wq: workqueue whose state will be printed << 6238 */ << 6239 void show_one_workqueue(struct workqueue_stru << 6240 { << 6241 struct pool_workqueue *pwq; << 6242 bool idle = true; << 6243 unsigned long irq_flags; << 6244 << 6245 for_each_pwq(pwq, wq) { << 6246 if (!pwq_is_empty(pwq)) { << 6247 idle = false; << 6248 break; << 6249 } << 6250 } << 6251 if (idle) /* Nothing to print for idl << 6252 return; << 6253 << 6254 pr_info("workqueue %s: flags=0x%x\n", << 6255 << 6256 for_each_pwq(pwq, wq) { << 6257 raw_spin_lock_irqsave(&pwq->p << 6258 if (!pwq_is_empty(pwq)) { << 6259 /* << 6260 * Defer printing to << 6261 * drivers that queue << 6262 * also taken in thei << 6263 */ << 6264 printk_deferred_enter << 6265 show_pwq(pwq); << 6266 printk_deferred_exit( << 6267 } << 6268 raw_spin_unlock_irqrestore(&p << 6269 /* << 6270 * We could be printing a lot << 6271 * sysrq-t -> show_all_workqu << 6272 * hard lockup. << 6273 */ << 6274 touch_nmi_watchdog(); << 6275 } << 6276 << 6277 } << 6278 << 6279 /** << 6280 * show_one_worker_pool - dump state of speci << 6281 * @pool: worker pool whose state will be pri << 6282 */ << 6283 static void show_one_worker_pool(struct worke << 6284 { << 6285 struct worker *worker; << 6286 bool first = true; << 6287 unsigned long irq_flags; << 6288 unsigned long hung = 0; << 6289 << 6290 raw_spin_lock_irqsave(&pool->lock, ir << 6291 if (pool->nr_workers == pool->nr_idle << 6292 goto next_pool; << 6293 << 6294 /* How long the first pending work is << 6295 if (!list_empty(&pool->worklist)) << 6296 hung = jiffies_to_msecs(jiffi << 6297 << 6298 /* << 6299 * Defer printing to avoid deadlocks << 6300 * queue work while holding locks als << 6301 * paths. << 6302 */ << 6303 printk_deferred_enter(); << 6304 pr_info("pool %d:", pool->id); << 6305 pr_cont_pool_info(pool); << 6306 pr_cont(" hung=%lus workers=%d", hung << 6307 if (pool->manager) << 6308 pr_cont(" manager: %d", << 6309 task_pid_nr(pool->man << 6310 list_for_each_entry(worker, &pool->id << 6311 pr_cont(" %s", first ? "idle: << 6312 pr_cont_worker_id(worker); << 6313 first = false; << 6314 } << 6315 pr_cont("\n"); << 6316 printk_deferred_exit(); << 6317 next_pool: << 6318 raw_spin_unlock_irqrestore(&pool->loc << 6319 /* << 6320 * We could be printing a lot from at << 6321 * sysrq-t -> show_all_workqueues(). << 6322 * hard lockup. << 6323 */ << 6324 touch_nmi_watchdog(); << 6325 << 6326 } << 6327 << 6328 /** << 6329 * show_all_workqueues - dump workqueue state << 6330 * 4451 * 6331 * Called from a sysrq handler and prints out !! 4452 * Called from a sysrq handler or try_to_freeze_tasks() and prints out >> 4453 * all busy workqueues and pools. 6332 */ 4454 */ 6333 void show_all_workqueues(void) !! 4455 void show_workqueue_state(void) 6334 { 4456 { 6335 struct workqueue_struct *wq; 4457 struct workqueue_struct *wq; 6336 struct worker_pool *pool; 4458 struct worker_pool *pool; >> 4459 unsigned long flags; 6337 int pi; 4460 int pi; 6338 4461 6339 rcu_read_lock(); !! 4462 rcu_read_lock_sched(); 6340 4463 6341 pr_info("Showing busy workqueues and 4464 pr_info("Showing busy workqueues and worker pools:\n"); 6342 4465 6343 list_for_each_entry_rcu(wq, &workqueu << 6344 show_one_workqueue(wq); << 6345 << 6346 for_each_pool(pool, pi) << 6347 show_one_worker_pool(pool); << 6348 << 6349 rcu_read_unlock(); << 6350 } << 6351 << 6352 /** << 6353 * show_freezable_workqueues - dump freezable << 6354 * << 6355 * Called from try_to_freeze_tasks() and prin << 6356 * still busy. << 6357 */ << 6358 void show_freezable_workqueues(void) << 6359 { << 6360 struct workqueue_struct *wq; << 6361 << 6362 rcu_read_lock(); << 6363 << 6364 pr_info("Showing freezable workqueues << 6365 << 6366 list_for_each_entry_rcu(wq, &workqueu 4466 list_for_each_entry_rcu(wq, &workqueues, list) { 6367 if (!(wq->flags & WQ_FREEZABL !! 4467 struct pool_workqueue *pwq; 6368 continue; !! 4468 bool idle = true; 6369 show_one_workqueue(wq); << 6370 } << 6371 << 6372 rcu_read_unlock(); << 6373 } << 6374 << 6375 /* used to show worker information through /p << 6376 void wq_worker_comm(char *buf, size_t size, s << 6377 { << 6378 /* stabilize PF_WQ_WORKER and worker << 6379 mutex_lock(&wq_pool_attach_mutex); << 6380 4469 6381 if (task->flags & PF_WQ_WORKER) { !! 4470 for_each_pwq(pwq, wq) { 6382 struct worker *worker = kthre !! 4471 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) { 6383 struct worker_pool *pool = wo !! 4472 idle = false; 6384 int off; !! 4473 break; >> 4474 } >> 4475 } >> 4476 if (idle) >> 4477 continue; 6385 4478 6386 off = format_worker_id(buf, s !! 4479 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 6387 4480 6388 if (pool) { !! 4481 for_each_pwq(pwq, wq) { 6389 raw_spin_lock_irq(&po !! 4482 spin_lock_irqsave(&pwq->pool->lock, flags); >> 4483 if (pwq->nr_active || !list_empty(&pwq->delayed_works)) >> 4484 show_pwq(pwq); >> 4485 spin_unlock_irqrestore(&pwq->pool->lock, flags); 6390 /* 4486 /* 6391 * ->desc tracks info !! 4487 * We could be printing a lot from atomic context, e.g. 6392 * set_worker_desc()) !! 4488 * sysrq-t -> show_workqueue_state(). Avoid triggering 6393 * current, prepend ' !! 4489 * hard lockup. 6394 */ 4490 */ 6395 if (worker->desc[0] ! !! 4491 touch_nmi_watchdog(); 6396 if (worker->c << 6397 scnpr << 6398 << 6399 else << 6400 scnpr << 6401 << 6402 } << 6403 raw_spin_unlock_irq(& << 6404 } 4492 } 6405 } else { << 6406 strscpy(buf, task->comm, size << 6407 } 4493 } 6408 4494 6409 mutex_unlock(&wq_pool_attach_mutex); !! 4495 for_each_pool(pool, pi) { 6410 } !! 4496 struct worker *worker; >> 4497 bool first = true; 6411 4498 6412 #ifdef CONFIG_SMP !! 4499 spin_lock_irqsave(&pool->lock, flags); >> 4500 if (pool->nr_workers == pool->nr_idle) >> 4501 goto next_pool; >> 4502 >> 4503 pr_info("pool %d:", pool->id); >> 4504 pr_cont_pool_info(pool); >> 4505 pr_cont(" hung=%us workers=%d", >> 4506 jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000, >> 4507 pool->nr_workers); >> 4508 if (pool->manager) >> 4509 pr_cont(" manager: %d", >> 4510 task_pid_nr(pool->manager->task)); >> 4511 list_for_each_entry(worker, &pool->idle_list, entry) { >> 4512 pr_cont(" %s%d", first ? "idle: " : "", >> 4513 task_pid_nr(worker->task)); >> 4514 first = false; >> 4515 } >> 4516 pr_cont("\n"); >> 4517 next_pool: >> 4518 spin_unlock_irqrestore(&pool->lock, flags); >> 4519 /* >> 4520 * We could be printing a lot from atomic context, e.g. >> 4521 * sysrq-t -> show_workqueue_state(). Avoid triggering >> 4522 * hard lockup. >> 4523 */ >> 4524 touch_nmi_watchdog(); >> 4525 } >> 4526 >> 4527 rcu_read_unlock_sched(); >> 4528 } 6413 4529 6414 /* 4530 /* 6415 * CPU hotplug. 4531 * CPU hotplug. 6416 * 4532 * 6417 * There are two challenges in supporting CPU 4533 * There are two challenges in supporting CPU hotplug. Firstly, there 6418 * are a lot of assumptions on strong associa 4534 * are a lot of assumptions on strong associations among work, pwq and 6419 * pool which make migrating pending and sche 4535 * pool which make migrating pending and scheduled works very 6420 * difficult to implement without impacting h 4536 * difficult to implement without impacting hot paths. Secondly, 6421 * worker pools serve mix of short, long and 4537 * worker pools serve mix of short, long and very long running works making 6422 * blocked draining impractical. 4538 * blocked draining impractical. 6423 * 4539 * 6424 * This is solved by allowing the pools to be 4540 * This is solved by allowing the pools to be disassociated from the CPU 6425 * running as an unbound one and allowing it 4541 * running as an unbound one and allowing it to be reattached later if the 6426 * cpu comes back online. 4542 * cpu comes back online. 6427 */ 4543 */ 6428 4544 6429 static void unbind_workers(int cpu) 4545 static void unbind_workers(int cpu) 6430 { 4546 { 6431 struct worker_pool *pool; 4547 struct worker_pool *pool; 6432 struct worker *worker; 4548 struct worker *worker; 6433 4549 6434 for_each_cpu_worker_pool(pool, cpu) { 4550 for_each_cpu_worker_pool(pool, cpu) { 6435 mutex_lock(&wq_pool_attach_mu !! 4551 mutex_lock(&pool->attach_mutex); 6436 raw_spin_lock_irq(&pool->lock !! 4552 spin_lock_irq(&pool->lock); 6437 4553 6438 /* 4554 /* 6439 * We've blocked all attach/d 4555 * We've blocked all attach/detach operations. Make all workers 6440 * unbound and set DISASSOCIA 4556 * unbound and set DISASSOCIATED. Before this, all workers 6441 * must be on the cpu. After !! 4557 * except for the ones which are still executing works from 6442 * And the preemption disable !! 4558 * before the last CPU down must be on the cpu. After 6443 * are guaranteed to see WORK !! 4559 * this, they may become diasporas. 6444 * is on the same cpu. << 6445 */ 4560 */ 6446 for_each_pool_worker(worker, 4561 for_each_pool_worker(worker, pool) 6447 worker->flags |= WORK 4562 worker->flags |= WORKER_UNBOUND; 6448 4563 6449 pool->flags |= POOL_DISASSOCI 4564 pool->flags |= POOL_DISASSOCIATED; 6450 4565 >> 4566 spin_unlock_irq(&pool->lock); >> 4567 mutex_unlock(&pool->attach_mutex); >> 4568 6451 /* 4569 /* 6452 * The handling of nr_running !! 4570 * Call schedule() so that we cross rq->lock and thus can 6453 * now. Zap nr_running. Aft !! 4571 * guarantee sched callbacks see the %WORKER_UNBOUND flag. 6454 * need_more_worker() and kee !! 4572 * This is necessary as scheduler callbacks may be invoked 6455 * long as the worklist is no !! 4573 * from other cpus. 6456 * an unbound (in terms of co !! 4574 */ >> 4575 schedule(); >> 4576 >> 4577 /* >> 4578 * Sched callbacks are disabled now. Zap nr_running. >> 4579 * After this, nr_running stays zero and need_more_worker() >> 4580 * and keep_working() are always true as long as the >> 4581 * worklist is not empty. This pool now behaves as an >> 4582 * unbound (in terms of concurrency management) pool which 6457 * are served by workers tied 4583 * are served by workers tied to the pool. 6458 */ 4584 */ 6459 pool->nr_running = 0; !! 4585 atomic_set(&pool->nr_running, 0); 6460 4586 6461 /* 4587 /* 6462 * With concurrency managemen 4588 * With concurrency management just turned off, a busy 6463 * worker blocking could lead 4589 * worker blocking could lead to lengthy stalls. Kick off 6464 * unbound chain execution of 4590 * unbound chain execution of currently pending work items. 6465 */ 4591 */ 6466 kick_pool(pool); !! 4592 spin_lock_irq(&pool->lock); 6467 !! 4593 wake_up_worker(pool); 6468 raw_spin_unlock_irq(&pool->lo !! 4594 spin_unlock_irq(&pool->lock); 6469 << 6470 for_each_pool_worker(worker, << 6471 unbind_worker(worker) << 6472 << 6473 mutex_unlock(&wq_pool_attach_ << 6474 } 4595 } 6475 } 4596 } 6476 4597 6477 /** 4598 /** 6478 * rebind_workers - rebind all workers of a p 4599 * rebind_workers - rebind all workers of a pool to the associated CPU 6479 * @pool: pool of interest 4600 * @pool: pool of interest 6480 * 4601 * 6481 * @pool->cpu is coming online. Rebind all w 4602 * @pool->cpu is coming online. Rebind all workers to the CPU. 6482 */ 4603 */ 6483 static void rebind_workers(struct worker_pool 4604 static void rebind_workers(struct worker_pool *pool) 6484 { 4605 { 6485 struct worker *worker; 4606 struct worker *worker; 6486 4607 6487 lockdep_assert_held(&wq_pool_attach_m !! 4608 lockdep_assert_held(&pool->attach_mutex); 6488 4609 6489 /* 4610 /* 6490 * Restore CPU affinity of all worker 4611 * Restore CPU affinity of all workers. As all idle workers should 6491 * be on the run-queue of the associa 4612 * be on the run-queue of the associated CPU before any local 6492 * wake-ups for concurrency managemen 4613 * wake-ups for concurrency management happen, restore CPU affinity 6493 * of all workers first and then clea 4614 * of all workers first and then clear UNBOUND. As we're called 6494 * from CPU_ONLINE, the following sho 4615 * from CPU_ONLINE, the following shouldn't fail. 6495 */ 4616 */ 6496 for_each_pool_worker(worker, pool) { !! 4617 for_each_pool_worker(worker, pool) 6497 kthread_set_per_cpu(worker->t << 6498 WARN_ON_ONCE(set_cpus_allowed 4618 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 6499 !! 4619 pool->attrs->cpumask) < 0); 6500 } << 6501 4620 6502 raw_spin_lock_irq(&pool->lock); !! 4621 spin_lock_irq(&pool->lock); 6503 4622 6504 pool->flags &= ~POOL_DISASSOCIATED; 4623 pool->flags &= ~POOL_DISASSOCIATED; 6505 4624 6506 for_each_pool_worker(worker, pool) { 4625 for_each_pool_worker(worker, pool) { 6507 unsigned int worker_flags = w 4626 unsigned int worker_flags = worker->flags; 6508 4627 6509 /* 4628 /* >> 4629 * A bound idle worker should actually be on the runqueue >> 4630 * of the associated CPU for local wake-ups targeting it to >> 4631 * work. Kick all idle workers so that they migrate to the >> 4632 * associated CPU. Doing this in the same loop as >> 4633 * replacing UNBOUND with REBOUND is safe as no worker will >> 4634 * be bound before @pool->lock is released. >> 4635 */ >> 4636 if (worker_flags & WORKER_IDLE) >> 4637 wake_up_process(worker->task); >> 4638 >> 4639 /* 6510 * We want to clear UNBOUND b 4640 * We want to clear UNBOUND but can't directly call 6511 * worker_clr_flags() or adju 4641 * worker_clr_flags() or adjust nr_running. Atomically 6512 * replace UNBOUND with anoth 4642 * replace UNBOUND with another NOT_RUNNING flag REBOUND. 6513 * @worker will clear REBOUND 4643 * @worker will clear REBOUND using worker_clr_flags() when 6514 * it initiates the next exec 4644 * it initiates the next execution cycle thus restoring 6515 * concurrency management. N 4645 * concurrency management. Note that when or whether 6516 * @worker clears REBOUND doe 4646 * @worker clears REBOUND doesn't affect correctness. 6517 * 4647 * 6518 * WRITE_ONCE() is necessary 4648 * WRITE_ONCE() is necessary because @worker->flags may be 6519 * tested without holding any 4649 * tested without holding any lock in 6520 * wq_worker_running(). With !! 4650 * wq_worker_waking_up(). Without it, NOT_RUNNING test may 6521 * fail incorrectly leading t 4651 * fail incorrectly leading to premature concurrency 6522 * management operations. 4652 * management operations. 6523 */ 4653 */ 6524 WARN_ON_ONCE(!(worker_flags & 4654 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 6525 worker_flags |= WORKER_REBOUN 4655 worker_flags |= WORKER_REBOUND; 6526 worker_flags &= ~WORKER_UNBOU 4656 worker_flags &= ~WORKER_UNBOUND; 6527 WRITE_ONCE(worker->flags, wor 4657 WRITE_ONCE(worker->flags, worker_flags); 6528 } 4658 } 6529 4659 6530 raw_spin_unlock_irq(&pool->lock); !! 4660 spin_unlock_irq(&pool->lock); 6531 } 4661 } 6532 4662 6533 /** 4663 /** 6534 * restore_unbound_workers_cpumask - restore 4664 * restore_unbound_workers_cpumask - restore cpumask of unbound workers 6535 * @pool: unbound pool of interest 4665 * @pool: unbound pool of interest 6536 * @cpu: the CPU which is coming up 4666 * @cpu: the CPU which is coming up 6537 * 4667 * 6538 * An unbound pool may end up with a cpumask 4668 * An unbound pool may end up with a cpumask which doesn't have any online 6539 * CPUs. When a worker of such pool get sche 4669 * CPUs. When a worker of such pool get scheduled, the scheduler resets 6540 * its cpus_allowed. If @cpu is in @pool's c 4670 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any 6541 * online CPU before, cpus_allowed of all its 4671 * online CPU before, cpus_allowed of all its workers should be restored. 6542 */ 4672 */ 6543 static void restore_unbound_workers_cpumask(s 4673 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) 6544 { 4674 { 6545 static cpumask_t cpumask; 4675 static cpumask_t cpumask; 6546 struct worker *worker; 4676 struct worker *worker; 6547 4677 6548 lockdep_assert_held(&wq_pool_attach_m !! 4678 lockdep_assert_held(&pool->attach_mutex); 6549 4679 6550 /* is @cpu allowed for @pool? */ 4680 /* is @cpu allowed for @pool? */ 6551 if (!cpumask_test_cpu(cpu, pool->attr 4681 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 6552 return; 4682 return; 6553 4683 6554 cpumask_and(&cpumask, pool->attrs->cp 4684 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); 6555 4685 6556 /* as we're called from CPU_ONLINE, t 4686 /* as we're called from CPU_ONLINE, the following shouldn't fail */ 6557 for_each_pool_worker(worker, pool) 4687 for_each_pool_worker(worker, pool) 6558 WARN_ON_ONCE(set_cpus_allowed 4688 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); 6559 } 4689 } 6560 4690 6561 int workqueue_prepare_cpu(unsigned int cpu) 4691 int workqueue_prepare_cpu(unsigned int cpu) 6562 { 4692 { 6563 struct worker_pool *pool; 4693 struct worker_pool *pool; 6564 4694 6565 for_each_cpu_worker_pool(pool, cpu) { 4695 for_each_cpu_worker_pool(pool, cpu) { 6566 if (pool->nr_workers) 4696 if (pool->nr_workers) 6567 continue; 4697 continue; 6568 if (!create_worker(pool)) 4698 if (!create_worker(pool)) 6569 return -ENOMEM; 4699 return -ENOMEM; 6570 } 4700 } 6571 return 0; 4701 return 0; 6572 } 4702 } 6573 4703 6574 int workqueue_online_cpu(unsigned int cpu) 4704 int workqueue_online_cpu(unsigned int cpu) 6575 { 4705 { 6576 struct worker_pool *pool; 4706 struct worker_pool *pool; 6577 struct workqueue_struct *wq; 4707 struct workqueue_struct *wq; 6578 int pi; 4708 int pi; 6579 4709 6580 mutex_lock(&wq_pool_mutex); 4710 mutex_lock(&wq_pool_mutex); 6581 4711 6582 cpumask_set_cpu(cpu, wq_online_cpumas << 6583 << 6584 for_each_pool(pool, pi) { 4712 for_each_pool(pool, pi) { 6585 /* BH pools aren't affected b !! 4713 mutex_lock(&pool->attach_mutex); 6586 if (pool->flags & POOL_BH) << 6587 continue; << 6588 4714 6589 mutex_lock(&wq_pool_attach_mu << 6590 if (pool->cpu == cpu) 4715 if (pool->cpu == cpu) 6591 rebind_workers(pool); 4716 rebind_workers(pool); 6592 else if (pool->cpu < 0) 4717 else if (pool->cpu < 0) 6593 restore_unbound_worke 4718 restore_unbound_workers_cpumask(pool, cpu); 6594 mutex_unlock(&wq_pool_attach_ << 6595 } << 6596 << 6597 /* update pod affinity of unbound wor << 6598 list_for_each_entry(wq, &workqueues, << 6599 struct workqueue_attrs *attrs << 6600 << 6601 if (attrs) { << 6602 const struct wq_pod_t << 6603 int tcpu; << 6604 4719 6605 for_each_cpu(tcpu, pt !! 4720 mutex_unlock(&pool->attach_mutex); 6606 unbound_wq_up << 6607 << 6608 mutex_lock(&wq->mutex << 6609 wq_update_node_max_ac << 6610 mutex_unlock(&wq->mut << 6611 } << 6612 } 4721 } 6613 4722 >> 4723 /* update NUMA affinity of unbound workqueues */ >> 4724 list_for_each_entry(wq, &workqueues, list) >> 4725 wq_update_unbound_numa(wq, cpu, true); >> 4726 6614 mutex_unlock(&wq_pool_mutex); 4727 mutex_unlock(&wq_pool_mutex); 6615 return 0; 4728 return 0; 6616 } 4729 } 6617 4730 6618 int workqueue_offline_cpu(unsigned int cpu) 4731 int workqueue_offline_cpu(unsigned int cpu) 6619 { 4732 { 6620 struct workqueue_struct *wq; 4733 struct workqueue_struct *wq; 6621 4734 6622 /* unbinding per-cpu workers should h 4735 /* unbinding per-cpu workers should happen on the local CPU */ 6623 if (WARN_ON(cpu != smp_processor_id() 4736 if (WARN_ON(cpu != smp_processor_id())) 6624 return -1; 4737 return -1; 6625 4738 6626 unbind_workers(cpu); 4739 unbind_workers(cpu); 6627 4740 6628 /* update pod affinity of unbound wor !! 4741 /* update NUMA affinity of unbound workqueues */ 6629 mutex_lock(&wq_pool_mutex); 4742 mutex_lock(&wq_pool_mutex); 6630 !! 4743 list_for_each_entry(wq, &workqueues, list) 6631 cpumask_clear_cpu(cpu, wq_online_cpum !! 4744 wq_update_unbound_numa(wq, cpu, false); 6632 << 6633 list_for_each_entry(wq, &workqueues, << 6634 struct workqueue_attrs *attrs << 6635 << 6636 if (attrs) { << 6637 const struct wq_pod_t << 6638 int tcpu; << 6639 << 6640 for_each_cpu(tcpu, pt << 6641 unbound_wq_up << 6642 << 6643 mutex_lock(&wq->mutex << 6644 wq_update_node_max_ac << 6645 mutex_unlock(&wq->mut << 6646 } << 6647 } << 6648 mutex_unlock(&wq_pool_mutex); 4745 mutex_unlock(&wq_pool_mutex); 6649 4746 6650 return 0; 4747 return 0; 6651 } 4748 } 6652 4749 >> 4750 #ifdef CONFIG_SMP >> 4751 6653 struct work_for_cpu { 4752 struct work_for_cpu { 6654 struct work_struct work; 4753 struct work_struct work; 6655 long (*fn)(void *); 4754 long (*fn)(void *); 6656 void *arg; 4755 void *arg; 6657 long ret; 4756 long ret; 6658 }; 4757 }; 6659 4758 6660 static void work_for_cpu_fn(struct work_struc 4759 static void work_for_cpu_fn(struct work_struct *work) 6661 { 4760 { 6662 struct work_for_cpu *wfc = container_ 4761 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 6663 4762 6664 wfc->ret = wfc->fn(wfc->arg); 4763 wfc->ret = wfc->fn(wfc->arg); 6665 } 4764 } 6666 4765 6667 /** 4766 /** 6668 * work_on_cpu_key - run a function in thread !! 4767 * work_on_cpu - run a function in thread context on a particular cpu 6669 * @cpu: the cpu to run on 4768 * @cpu: the cpu to run on 6670 * @fn: the function to run 4769 * @fn: the function to run 6671 * @arg: the function arg 4770 * @arg: the function arg 6672 * @key: The lock class key for lock debuggin << 6673 * 4771 * 6674 * It is up to the caller to ensure that the 4772 * It is up to the caller to ensure that the cpu doesn't go offline. 6675 * The caller must not hold any locks which w 4773 * The caller must not hold any locks which would prevent @fn from completing. 6676 * 4774 * 6677 * Return: The value @fn returns. 4775 * Return: The value @fn returns. 6678 */ 4776 */ 6679 long work_on_cpu_key(int cpu, long (*fn)(void !! 4777 long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 6680 void *arg, struct lock_c << 6681 { 4778 { 6682 struct work_for_cpu wfc = { .fn = fn, 4779 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 6683 4780 6684 INIT_WORK_ONSTACK_KEY(&wfc.work, work !! 4781 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 6685 schedule_work_on(cpu, &wfc.work); 4782 schedule_work_on(cpu, &wfc.work); 6686 flush_work(&wfc.work); 4783 flush_work(&wfc.work); 6687 destroy_work_on_stack(&wfc.work); 4784 destroy_work_on_stack(&wfc.work); 6688 return wfc.ret; 4785 return wfc.ret; 6689 } 4786 } 6690 EXPORT_SYMBOL_GPL(work_on_cpu_key); !! 4787 EXPORT_SYMBOL_GPL(work_on_cpu); 6691 4788 6692 /** 4789 /** 6693 * work_on_cpu_safe_key - run a function in t !! 4790 * work_on_cpu_safe - run a function in thread context on a particular cpu 6694 * @cpu: the cpu to run on 4791 * @cpu: the cpu to run on 6695 * @fn: the function to run 4792 * @fn: the function to run 6696 * @arg: the function argument 4793 * @arg: the function argument 6697 * @key: The lock class key for lock debuggin << 6698 * 4794 * 6699 * Disables CPU hotplug and calls work_on_cpu 4795 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold 6700 * any locks which would prevent @fn from com 4796 * any locks which would prevent @fn from completing. 6701 * 4797 * 6702 * Return: The value @fn returns. 4798 * Return: The value @fn returns. 6703 */ 4799 */ 6704 long work_on_cpu_safe_key(int cpu, long (*fn) !! 4800 long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg) 6705 void *arg, struct l << 6706 { 4801 { 6707 long ret = -ENODEV; 4802 long ret = -ENODEV; 6708 4803 6709 cpus_read_lock(); !! 4804 get_online_cpus(); 6710 if (cpu_online(cpu)) 4805 if (cpu_online(cpu)) 6711 ret = work_on_cpu_key(cpu, fn !! 4806 ret = work_on_cpu(cpu, fn, arg); 6712 cpus_read_unlock(); !! 4807 put_online_cpus(); 6713 return ret; 4808 return ret; 6714 } 4809 } 6715 EXPORT_SYMBOL_GPL(work_on_cpu_safe_key); !! 4810 EXPORT_SYMBOL_GPL(work_on_cpu_safe); 6716 #endif /* CONFIG_SMP */ 4811 #endif /* CONFIG_SMP */ 6717 4812 6718 #ifdef CONFIG_FREEZER 4813 #ifdef CONFIG_FREEZER 6719 4814 6720 /** 4815 /** 6721 * freeze_workqueues_begin - begin freezing w 4816 * freeze_workqueues_begin - begin freezing workqueues 6722 * 4817 * 6723 * Start freezing workqueues. After this fun 4818 * Start freezing workqueues. After this function returns, all freezable 6724 * workqueues will queue new works to their i !! 4819 * workqueues will queue new works to their delayed_works list instead of 6725 * pool->worklist. 4820 * pool->worklist. 6726 * 4821 * 6727 * CONTEXT: 4822 * CONTEXT: 6728 * Grabs and releases wq_pool_mutex, wq->mute 4823 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 6729 */ 4824 */ 6730 void freeze_workqueues_begin(void) 4825 void freeze_workqueues_begin(void) 6731 { 4826 { 6732 struct workqueue_struct *wq; 4827 struct workqueue_struct *wq; >> 4828 struct pool_workqueue *pwq; 6733 4829 6734 mutex_lock(&wq_pool_mutex); 4830 mutex_lock(&wq_pool_mutex); 6735 4831 6736 WARN_ON_ONCE(workqueue_freezing); 4832 WARN_ON_ONCE(workqueue_freezing); 6737 workqueue_freezing = true; 4833 workqueue_freezing = true; 6738 4834 6739 list_for_each_entry(wq, &workqueues, 4835 list_for_each_entry(wq, &workqueues, list) { 6740 mutex_lock(&wq->mutex); 4836 mutex_lock(&wq->mutex); 6741 wq_adjust_max_active(wq); !! 4837 for_each_pwq(pwq, wq) >> 4838 pwq_adjust_max_active(pwq); 6742 mutex_unlock(&wq->mutex); 4839 mutex_unlock(&wq->mutex); 6743 } 4840 } 6744 4841 6745 mutex_unlock(&wq_pool_mutex); 4842 mutex_unlock(&wq_pool_mutex); 6746 } 4843 } 6747 4844 6748 /** 4845 /** 6749 * freeze_workqueues_busy - are freezable wor 4846 * freeze_workqueues_busy - are freezable workqueues still busy? 6750 * 4847 * 6751 * Check whether freezing is complete. This 4848 * Check whether freezing is complete. This function must be called 6752 * between freeze_workqueues_begin() and thaw 4849 * between freeze_workqueues_begin() and thaw_workqueues(). 6753 * 4850 * 6754 * CONTEXT: 4851 * CONTEXT: 6755 * Grabs and releases wq_pool_mutex. 4852 * Grabs and releases wq_pool_mutex. 6756 * 4853 * 6757 * Return: 4854 * Return: 6758 * %true if some freezable workqueues are sti 4855 * %true if some freezable workqueues are still busy. %false if freezing 6759 * is complete. 4856 * is complete. 6760 */ 4857 */ 6761 bool freeze_workqueues_busy(void) 4858 bool freeze_workqueues_busy(void) 6762 { 4859 { 6763 bool busy = false; 4860 bool busy = false; 6764 struct workqueue_struct *wq; 4861 struct workqueue_struct *wq; 6765 struct pool_workqueue *pwq; 4862 struct pool_workqueue *pwq; 6766 4863 6767 mutex_lock(&wq_pool_mutex); 4864 mutex_lock(&wq_pool_mutex); 6768 4865 6769 WARN_ON_ONCE(!workqueue_freezing); 4866 WARN_ON_ONCE(!workqueue_freezing); 6770 4867 6771 list_for_each_entry(wq, &workqueues, 4868 list_for_each_entry(wq, &workqueues, list) { 6772 if (!(wq->flags & WQ_FREEZABL 4869 if (!(wq->flags & WQ_FREEZABLE)) 6773 continue; 4870 continue; 6774 /* 4871 /* 6775 * nr_active is monotonically 4872 * nr_active is monotonically decreasing. It's safe 6776 * to peek without lock. 4873 * to peek without lock. 6777 */ 4874 */ 6778 rcu_read_lock(); !! 4875 rcu_read_lock_sched(); 6779 for_each_pwq(pwq, wq) { 4876 for_each_pwq(pwq, wq) { 6780 WARN_ON_ONCE(pwq->nr_ 4877 WARN_ON_ONCE(pwq->nr_active < 0); 6781 if (pwq->nr_active) { 4878 if (pwq->nr_active) { 6782 busy = true; 4879 busy = true; 6783 rcu_read_unlo !! 4880 rcu_read_unlock_sched(); 6784 goto out_unlo 4881 goto out_unlock; 6785 } 4882 } 6786 } 4883 } 6787 rcu_read_unlock(); !! 4884 rcu_read_unlock_sched(); 6788 } 4885 } 6789 out_unlock: 4886 out_unlock: 6790 mutex_unlock(&wq_pool_mutex); 4887 mutex_unlock(&wq_pool_mutex); 6791 return busy; 4888 return busy; 6792 } 4889 } 6793 4890 6794 /** 4891 /** 6795 * thaw_workqueues - thaw workqueues 4892 * thaw_workqueues - thaw workqueues 6796 * 4893 * 6797 * Thaw workqueues. Normal queueing is resto 4894 * Thaw workqueues. Normal queueing is restored and all collected 6798 * frozen works are transferred to their resp 4895 * frozen works are transferred to their respective pool worklists. 6799 * 4896 * 6800 * CONTEXT: 4897 * CONTEXT: 6801 * Grabs and releases wq_pool_mutex, wq->mute 4898 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 6802 */ 4899 */ 6803 void thaw_workqueues(void) 4900 void thaw_workqueues(void) 6804 { 4901 { 6805 struct workqueue_struct *wq; 4902 struct workqueue_struct *wq; >> 4903 struct pool_workqueue *pwq; 6806 4904 6807 mutex_lock(&wq_pool_mutex); 4905 mutex_lock(&wq_pool_mutex); 6808 4906 6809 if (!workqueue_freezing) 4907 if (!workqueue_freezing) 6810 goto out_unlock; 4908 goto out_unlock; 6811 4909 6812 workqueue_freezing = false; 4910 workqueue_freezing = false; 6813 4911 6814 /* restore max_active and repopulate 4912 /* restore max_active and repopulate worklist */ 6815 list_for_each_entry(wq, &workqueues, 4913 list_for_each_entry(wq, &workqueues, list) { 6816 mutex_lock(&wq->mutex); 4914 mutex_lock(&wq->mutex); 6817 wq_adjust_max_active(wq); !! 4915 for_each_pwq(pwq, wq) >> 4916 pwq_adjust_max_active(pwq); 6818 mutex_unlock(&wq->mutex); 4917 mutex_unlock(&wq->mutex); 6819 } 4918 } 6820 4919 6821 out_unlock: 4920 out_unlock: 6822 mutex_unlock(&wq_pool_mutex); 4921 mutex_unlock(&wq_pool_mutex); 6823 } 4922 } 6824 #endif /* CONFIG_FREEZER */ 4923 #endif /* CONFIG_FREEZER */ 6825 4924 6826 static int workqueue_apply_unbound_cpumask(co !! 4925 static int workqueue_apply_unbound_cpumask(void) 6827 { 4926 { 6828 LIST_HEAD(ctxs); 4927 LIST_HEAD(ctxs); 6829 int ret = 0; 4928 int ret = 0; 6830 struct workqueue_struct *wq; 4929 struct workqueue_struct *wq; 6831 struct apply_wqattrs_ctx *ctx, *n; 4930 struct apply_wqattrs_ctx *ctx, *n; 6832 4931 6833 lockdep_assert_held(&wq_pool_mutex); 4932 lockdep_assert_held(&wq_pool_mutex); 6834 4933 6835 list_for_each_entry(wq, &workqueues, 4934 list_for_each_entry(wq, &workqueues, list) { 6836 if (!(wq->flags & WQ_UNBOUND) !! 4935 if (!(wq->flags & WQ_UNBOUND)) >> 4936 continue; >> 4937 /* creating multiple pwqs breaks ordering guarantee */ >> 4938 if (wq->flags & __WQ_ORDERED) 6837 continue; 4939 continue; 6838 4940 6839 ctx = apply_wqattrs_prepare(w !! 4941 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs); 6840 if (IS_ERR(ctx)) { !! 4942 if (!ctx) { 6841 ret = PTR_ERR(ctx); !! 4943 ret = -ENOMEM; 6842 break; 4944 break; 6843 } 4945 } 6844 4946 6845 list_add_tail(&ctx->list, &ct 4947 list_add_tail(&ctx->list, &ctxs); 6846 } 4948 } 6847 4949 6848 list_for_each_entry_safe(ctx, n, &ctx 4950 list_for_each_entry_safe(ctx, n, &ctxs, list) { 6849 if (!ret) 4951 if (!ret) 6850 apply_wqattrs_commit( 4952 apply_wqattrs_commit(ctx); 6851 apply_wqattrs_cleanup(ctx); 4953 apply_wqattrs_cleanup(ctx); 6852 } 4954 } 6853 4955 6854 if (!ret) { << 6855 mutex_lock(&wq_pool_attach_mu << 6856 cpumask_copy(wq_unbound_cpuma << 6857 mutex_unlock(&wq_pool_attach_ << 6858 } << 6859 return ret; 4956 return ret; 6860 } 4957 } 6861 4958 6862 /** 4959 /** 6863 * workqueue_unbound_exclude_cpumask - Exclud !! 4960 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask 6864 * @exclude_cpumask: the cpumask to be exclud !! 4961 * @cpumask: the cpumask to set >> 4962 * >> 4963 * The low-level workqueues cpumask is a global cpumask that limits >> 4964 * the affinity of all unbound workqueues. This function check the @cpumask >> 4965 * and apply it to all unbound workqueues and updates all pwqs of them. 6865 * 4966 * 6866 * This function can be called from cpuset co !! 4967 * Retun: 0 - Success 6867 * CPUs that should be excluded from wq_unbou !! 4968 * -EINVAL - Invalid @cpumask >> 4969 * -ENOMEM - Failed to allocate memory for attrs or pwqs. 6868 */ 4970 */ 6869 int workqueue_unbound_exclude_cpumask(cpumask !! 4971 int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) 6870 { 4972 { 6871 cpumask_var_t cpumask; !! 4973 int ret = -EINVAL; 6872 int ret = 0; !! 4974 cpumask_var_t saved_cpumask; 6873 4975 6874 if (!zalloc_cpumask_var(&cpumask, GFP !! 4976 if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) 6875 return -ENOMEM; 4977 return -ENOMEM; 6876 4978 6877 mutex_lock(&wq_pool_mutex); << 6878 << 6879 /* 4979 /* 6880 * If the operation fails, it will fa !! 4980 * Not excluding isolated cpus on purpose. 6881 * wq_requested_unbound_cpumask which !! 4981 * If the user wishes to include them, we allow that. 6882 * (HK_TYPE_WQ ∩ HK_TYPE_DOMAIN) ho !! 4982 */ 6883 * by any subsequent write to workque !! 4983 cpumask_and(cpumask, cpumask, cpu_possible_mask); 6884 */ !! 4984 if (!cpumask_empty(cpumask)) { 6885 if (!cpumask_andnot(cpumask, wq_reque !! 4985 apply_wqattrs_lock(); 6886 cpumask_copy(cpumask, wq_requ << 6887 if (!cpumask_equal(cpumask, wq_unboun << 6888 ret = workqueue_apply_unbound << 6889 << 6890 /* Save the current isolated cpumask << 6891 if (!ret) << 6892 cpumask_copy(wq_isolated_cpum << 6893 << 6894 mutex_unlock(&wq_pool_mutex); << 6895 free_cpumask_var(cpumask); << 6896 return ret; << 6897 } << 6898 << 6899 static int parse_affn_scope(const char *val) << 6900 { << 6901 int i; << 6902 << 6903 for (i = 0; i < ARRAY_SIZE(wq_affn_na << 6904 if (!strncasecmp(val, wq_affn << 6905 return i; << 6906 } << 6907 return -EINVAL; << 6908 } << 6909 << 6910 static int wq_affn_dfl_set(const char *val, c << 6911 { << 6912 struct workqueue_struct *wq; << 6913 int affn, cpu; << 6914 << 6915 affn = parse_affn_scope(val); << 6916 if (affn < 0) << 6917 return affn; << 6918 if (affn == WQ_AFFN_DFL) << 6919 return -EINVAL; << 6920 4986 6921 cpus_read_lock(); !! 4987 /* save the old wq_unbound_cpumask. */ 6922 mutex_lock(&wq_pool_mutex); !! 4988 cpumask_copy(saved_cpumask, wq_unbound_cpumask); 6923 4989 6924 wq_affn_dfl = affn; !! 4990 /* update wq_unbound_cpumask at first and apply it to wqs. */ >> 4991 cpumask_copy(wq_unbound_cpumask, cpumask); >> 4992 ret = workqueue_apply_unbound_cpumask(); >> 4993 >> 4994 /* restore the wq_unbound_cpumask when failed. */ >> 4995 if (ret < 0) >> 4996 cpumask_copy(wq_unbound_cpumask, saved_cpumask); 6925 4997 6926 list_for_each_entry(wq, &workqueues, !! 4998 apply_wqattrs_unlock(); 6927 for_each_online_cpu(cpu) << 6928 unbound_wq_update_pwq << 6929 } 4999 } 6930 5000 6931 mutex_unlock(&wq_pool_mutex); !! 5001 free_cpumask_var(saved_cpumask); 6932 cpus_read_unlock(); !! 5002 return ret; 6933 << 6934 return 0; << 6935 } << 6936 << 6937 static int wq_affn_dfl_get(char *buffer, cons << 6938 { << 6939 return scnprintf(buffer, PAGE_SIZE, " << 6940 } 5003 } 6941 5004 6942 static const struct kernel_param_ops wq_affn_ << 6943 .set = wq_affn_dfl_set, << 6944 .get = wq_affn_dfl_get, << 6945 }; << 6946 << 6947 module_param_cb(default_affinity_scope, &wq_a << 6948 << 6949 #ifdef CONFIG_SYSFS 5005 #ifdef CONFIG_SYSFS 6950 /* 5006 /* 6951 * Workqueues with WQ_SYSFS flag set is visib 5007 * Workqueues with WQ_SYSFS flag set is visible to userland via 6952 * /sys/bus/workqueue/devices/WQ_NAME. All v 5008 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the 6953 * following attributes. 5009 * following attributes. 6954 * 5010 * 6955 * per_cpu RO bool : whether the !! 5011 * per_cpu RO bool : whether the workqueue is per-cpu or unbound 6956 * max_active RW int : maximum num !! 5012 * max_active RW int : maximum number of in-flight work items 6957 * 5013 * 6958 * Unbound workqueues have the following extr 5014 * Unbound workqueues have the following extra attributes. 6959 * 5015 * 6960 * nice RW int : nice value !! 5016 * pool_ids RO int : the associated pool IDs for each node 6961 * cpumask RW mask : bitmask of !! 5017 * nice RW int : nice value of the workers 6962 * affinity_scope RW str : worker CPU !! 5018 * cpumask RW mask : bitmask of allowed CPUs for the workers 6963 * affinity_strict RW bool : worker CPU !! 5019 * numa RW bool : whether enable NUMA affinity 6964 */ 5020 */ 6965 struct wq_device { 5021 struct wq_device { 6966 struct workqueue_struct *wq; 5022 struct workqueue_struct *wq; 6967 struct device dev; 5023 struct device dev; 6968 }; 5024 }; 6969 5025 6970 static struct workqueue_struct *dev_to_wq(str 5026 static struct workqueue_struct *dev_to_wq(struct device *dev) 6971 { 5027 { 6972 struct wq_device *wq_dev = container_ 5028 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 6973 5029 6974 return wq_dev->wq; 5030 return wq_dev->wq; 6975 } 5031 } 6976 5032 6977 static ssize_t per_cpu_show(struct device *de 5033 static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr, 6978 char *buf) 5034 char *buf) 6979 { 5035 { 6980 struct workqueue_struct *wq = dev_to_ 5036 struct workqueue_struct *wq = dev_to_wq(dev); 6981 5037 6982 return scnprintf(buf, PAGE_SIZE, "%d\ 5038 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 6983 } 5039 } 6984 static DEVICE_ATTR_RO(per_cpu); 5040 static DEVICE_ATTR_RO(per_cpu); 6985 5041 6986 static ssize_t max_active_show(struct device 5042 static ssize_t max_active_show(struct device *dev, 6987 struct device_ 5043 struct device_attribute *attr, char *buf) 6988 { 5044 { 6989 struct workqueue_struct *wq = dev_to_ 5045 struct workqueue_struct *wq = dev_to_wq(dev); 6990 5046 6991 return scnprintf(buf, PAGE_SIZE, "%d\ 5047 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 6992 } 5048 } 6993 5049 6994 static ssize_t max_active_store(struct device 5050 static ssize_t max_active_store(struct device *dev, 6995 struct device 5051 struct device_attribute *attr, const char *buf, 6996 size_t count) 5052 size_t count) 6997 { 5053 { 6998 struct workqueue_struct *wq = dev_to_ 5054 struct workqueue_struct *wq = dev_to_wq(dev); 6999 int val; 5055 int val; 7000 5056 7001 if (sscanf(buf, "%d", &val) != 1 || v 5057 if (sscanf(buf, "%d", &val) != 1 || val <= 0) 7002 return -EINVAL; 5058 return -EINVAL; 7003 5059 7004 workqueue_set_max_active(wq, val); 5060 workqueue_set_max_active(wq, val); 7005 return count; 5061 return count; 7006 } 5062 } 7007 static DEVICE_ATTR_RW(max_active); 5063 static DEVICE_ATTR_RW(max_active); 7008 5064 7009 static struct attribute *wq_sysfs_attrs[] = { 5065 static struct attribute *wq_sysfs_attrs[] = { 7010 &dev_attr_per_cpu.attr, 5066 &dev_attr_per_cpu.attr, 7011 &dev_attr_max_active.attr, 5067 &dev_attr_max_active.attr, 7012 NULL, 5068 NULL, 7013 }; 5069 }; 7014 ATTRIBUTE_GROUPS(wq_sysfs); 5070 ATTRIBUTE_GROUPS(wq_sysfs); 7015 5071 >> 5072 static ssize_t wq_pool_ids_show(struct device *dev, >> 5073 struct device_attribute *attr, char *buf) >> 5074 { >> 5075 struct workqueue_struct *wq = dev_to_wq(dev); >> 5076 const char *delim = ""; >> 5077 int node, written = 0; >> 5078 >> 5079 rcu_read_lock_sched(); >> 5080 for_each_node(node) { >> 5081 written += scnprintf(buf + written, PAGE_SIZE - written, >> 5082 "%s%d:%d", delim, node, >> 5083 unbound_pwq_by_node(wq, node)->pool->id); >> 5084 delim = " "; >> 5085 } >> 5086 written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); >> 5087 rcu_read_unlock_sched(); >> 5088 >> 5089 return written; >> 5090 } >> 5091 7016 static ssize_t wq_nice_show(struct device *de 5092 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, 7017 char *buf) 5093 char *buf) 7018 { 5094 { 7019 struct workqueue_struct *wq = dev_to_ 5095 struct workqueue_struct *wq = dev_to_wq(dev); 7020 int written; 5096 int written; 7021 5097 7022 mutex_lock(&wq->mutex); 5098 mutex_lock(&wq->mutex); 7023 written = scnprintf(buf, PAGE_SIZE, " 5099 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); 7024 mutex_unlock(&wq->mutex); 5100 mutex_unlock(&wq->mutex); 7025 5101 7026 return written; 5102 return written; 7027 } 5103 } 7028 5104 7029 /* prepare workqueue_attrs for sysfs store op 5105 /* prepare workqueue_attrs for sysfs store operations */ 7030 static struct workqueue_attrs *wq_sysfs_prep_ 5106 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) 7031 { 5107 { 7032 struct workqueue_attrs *attrs; 5108 struct workqueue_attrs *attrs; 7033 5109 7034 lockdep_assert_held(&wq_pool_mutex); 5110 lockdep_assert_held(&wq_pool_mutex); 7035 5111 7036 attrs = alloc_workqueue_attrs(); !! 5112 attrs = alloc_workqueue_attrs(GFP_KERNEL); 7037 if (!attrs) 5113 if (!attrs) 7038 return NULL; 5114 return NULL; 7039 5115 7040 copy_workqueue_attrs(attrs, wq->unbou 5116 copy_workqueue_attrs(attrs, wq->unbound_attrs); 7041 return attrs; 5117 return attrs; 7042 } 5118 } 7043 5119 7044 static ssize_t wq_nice_store(struct device *d 5120 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, 7045 const char *buf, 5121 const char *buf, size_t count) 7046 { 5122 { 7047 struct workqueue_struct *wq = dev_to_ 5123 struct workqueue_struct *wq = dev_to_wq(dev); 7048 struct workqueue_attrs *attrs; 5124 struct workqueue_attrs *attrs; 7049 int ret = -ENOMEM; 5125 int ret = -ENOMEM; 7050 5126 7051 apply_wqattrs_lock(); 5127 apply_wqattrs_lock(); 7052 5128 7053 attrs = wq_sysfs_prep_attrs(wq); 5129 attrs = wq_sysfs_prep_attrs(wq); 7054 if (!attrs) 5130 if (!attrs) 7055 goto out_unlock; 5131 goto out_unlock; 7056 5132 7057 if (sscanf(buf, "%d", &attrs->nice) = 5133 if (sscanf(buf, "%d", &attrs->nice) == 1 && 7058 attrs->nice >= MIN_NICE && attrs- 5134 attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) 7059 ret = apply_workqueue_attrs_l 5135 ret = apply_workqueue_attrs_locked(wq, attrs); 7060 else 5136 else 7061 ret = -EINVAL; 5137 ret = -EINVAL; 7062 5138 7063 out_unlock: 5139 out_unlock: 7064 apply_wqattrs_unlock(); 5140 apply_wqattrs_unlock(); 7065 free_workqueue_attrs(attrs); 5141 free_workqueue_attrs(attrs); 7066 return ret ?: count; 5142 return ret ?: count; 7067 } 5143 } 7068 5144 7069 static ssize_t wq_cpumask_show(struct device 5145 static ssize_t wq_cpumask_show(struct device *dev, 7070 struct device_ 5146 struct device_attribute *attr, char *buf) 7071 { 5147 { 7072 struct workqueue_struct *wq = dev_to_ 5148 struct workqueue_struct *wq = dev_to_wq(dev); 7073 int written; 5149 int written; 7074 5150 7075 mutex_lock(&wq->mutex); 5151 mutex_lock(&wq->mutex); 7076 written = scnprintf(buf, PAGE_SIZE, " 5152 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 7077 cpumask_pr_args(w 5153 cpumask_pr_args(wq->unbound_attrs->cpumask)); 7078 mutex_unlock(&wq->mutex); 5154 mutex_unlock(&wq->mutex); 7079 return written; 5155 return written; 7080 } 5156 } 7081 5157 7082 static ssize_t wq_cpumask_store(struct device 5158 static ssize_t wq_cpumask_store(struct device *dev, 7083 struct device 5159 struct device_attribute *attr, 7084 const char *b 5160 const char *buf, size_t count) 7085 { 5161 { 7086 struct workqueue_struct *wq = dev_to_ 5162 struct workqueue_struct *wq = dev_to_wq(dev); 7087 struct workqueue_attrs *attrs; 5163 struct workqueue_attrs *attrs; 7088 int ret = -ENOMEM; 5164 int ret = -ENOMEM; 7089 5165 7090 apply_wqattrs_lock(); 5166 apply_wqattrs_lock(); 7091 5167 7092 attrs = wq_sysfs_prep_attrs(wq); 5168 attrs = wq_sysfs_prep_attrs(wq); 7093 if (!attrs) 5169 if (!attrs) 7094 goto out_unlock; 5170 goto out_unlock; 7095 5171 7096 ret = cpumask_parse(buf, attrs->cpuma 5172 ret = cpumask_parse(buf, attrs->cpumask); 7097 if (!ret) 5173 if (!ret) 7098 ret = apply_workqueue_attrs_l 5174 ret = apply_workqueue_attrs_locked(wq, attrs); 7099 5175 7100 out_unlock: 5176 out_unlock: 7101 apply_wqattrs_unlock(); 5177 apply_wqattrs_unlock(); 7102 free_workqueue_attrs(attrs); 5178 free_workqueue_attrs(attrs); 7103 return ret ?: count; 5179 return ret ?: count; 7104 } 5180 } 7105 5181 7106 static ssize_t wq_affn_scope_show(struct devi !! 5182 static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr, 7107 struct devi !! 5183 char *buf) 7108 { 5184 { 7109 struct workqueue_struct *wq = dev_to_ 5185 struct workqueue_struct *wq = dev_to_wq(dev); 7110 int written; 5186 int written; 7111 5187 7112 mutex_lock(&wq->mutex); 5188 mutex_lock(&wq->mutex); 7113 if (wq->unbound_attrs->affn_scope == !! 5189 written = scnprintf(buf, PAGE_SIZE, "%d\n", 7114 written = scnprintf(buf, PAGE !! 5190 !wq->unbound_attrs->no_numa); 7115 wq_affn_n << 7116 wq_affn_n << 7117 else << 7118 written = scnprintf(buf, PAGE << 7119 wq_affn_n << 7120 mutex_unlock(&wq->mutex); 5191 mutex_unlock(&wq->mutex); 7121 5192 7122 return written; 5193 return written; 7123 } 5194 } 7124 5195 7125 static ssize_t wq_affn_scope_store(struct dev !! 5196 static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr, 7126 struct dev !! 5197 const char *buf, size_t count) 7127 const char << 7128 { << 7129 struct workqueue_struct *wq = dev_to_ << 7130 struct workqueue_attrs *attrs; << 7131 int affn, ret = -ENOMEM; << 7132 << 7133 affn = parse_affn_scope(buf); << 7134 if (affn < 0) << 7135 return affn; << 7136 << 7137 apply_wqattrs_lock(); << 7138 attrs = wq_sysfs_prep_attrs(wq); << 7139 if (attrs) { << 7140 attrs->affn_scope = affn; << 7141 ret = apply_workqueue_attrs_l << 7142 } << 7143 apply_wqattrs_unlock(); << 7144 free_workqueue_attrs(attrs); << 7145 return ret ?: count; << 7146 } << 7147 << 7148 static ssize_t wq_affinity_strict_show(struct << 7149 struct << 7150 { << 7151 struct workqueue_struct *wq = dev_to_ << 7152 << 7153 return scnprintf(buf, PAGE_SIZE, "%d\ << 7154 wq->unbound_attrs->a << 7155 } << 7156 << 7157 static ssize_t wq_affinity_strict_store(struc << 7158 struc << 7159 const << 7160 { 5198 { 7161 struct workqueue_struct *wq = dev_to_ 5199 struct workqueue_struct *wq = dev_to_wq(dev); 7162 struct workqueue_attrs *attrs; 5200 struct workqueue_attrs *attrs; 7163 int v, ret = -ENOMEM; 5201 int v, ret = -ENOMEM; 7164 5202 7165 if (sscanf(buf, "%d", &v) != 1) << 7166 return -EINVAL; << 7167 << 7168 apply_wqattrs_lock(); 5203 apply_wqattrs_lock(); >> 5204 7169 attrs = wq_sysfs_prep_attrs(wq); 5205 attrs = wq_sysfs_prep_attrs(wq); 7170 if (attrs) { !! 5206 if (!attrs) 7171 attrs->affn_strict = (bool)v; !! 5207 goto out_unlock; >> 5208 >> 5209 ret = -EINVAL; >> 5210 if (sscanf(buf, "%d", &v) == 1) { >> 5211 attrs->no_numa = !v; 7172 ret = apply_workqueue_attrs_l 5212 ret = apply_workqueue_attrs_locked(wq, attrs); 7173 } 5213 } >> 5214 >> 5215 out_unlock: 7174 apply_wqattrs_unlock(); 5216 apply_wqattrs_unlock(); 7175 free_workqueue_attrs(attrs); 5217 free_workqueue_attrs(attrs); 7176 return ret ?: count; 5218 return ret ?: count; 7177 } 5219 } 7178 5220 7179 static struct device_attribute wq_sysfs_unbou 5221 static struct device_attribute wq_sysfs_unbound_attrs[] = { >> 5222 __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL), 7180 __ATTR(nice, 0644, wq_nice_show, wq_n 5223 __ATTR(nice, 0644, wq_nice_show, wq_nice_store), 7181 __ATTR(cpumask, 0644, wq_cpumask_show 5224 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), 7182 __ATTR(affinity_scope, 0644, wq_affn_ !! 5225 __ATTR(numa, 0644, wq_numa_show, wq_numa_store), 7183 __ATTR(affinity_strict, 0644, wq_affi << 7184 __ATTR_NULL, 5226 __ATTR_NULL, 7185 }; 5227 }; 7186 5228 7187 static const struct bus_type wq_subsys = { !! 5229 static struct bus_type wq_subsys = { 7188 .name = "wo 5230 .name = "workqueue", 7189 .dev_groups = wq_ 5231 .dev_groups = wq_sysfs_groups, 7190 }; 5232 }; 7191 5233 7192 /** !! 5234 static ssize_t wq_unbound_cpumask_show(struct device *dev, 7193 * workqueue_set_unbound_cpumask - Set the l !! 5235 struct device_attribute *attr, char *buf) 7194 * @cpumask: the cpumask to set << 7195 * << 7196 * The low-level workqueues cpumask is a glo << 7197 * the affinity of all unbound workqueues. << 7198 * and apply it to all unbound workqueues an << 7199 * << 7200 * Return: 0 - Success << 7201 * -EINVAL - Invalid @cpumask << 7202 * -ENOMEM - Failed to allocate << 7203 */ << 7204 static int workqueue_set_unbound_cpumask(cpum << 7205 { << 7206 int ret = -EINVAL; << 7207 << 7208 /* << 7209 * Not excluding isolated cpus on pur << 7210 * If the user wishes to include them << 7211 */ << 7212 cpumask_and(cpumask, cpumask, cpu_pos << 7213 if (!cpumask_empty(cpumask)) { << 7214 ret = 0; << 7215 apply_wqattrs_lock(); << 7216 if (!cpumask_equal(cpumask, w << 7217 ret = workqueue_apply << 7218 if (!ret) << 7219 cpumask_copy(wq_reque << 7220 apply_wqattrs_unlock(); << 7221 } << 7222 << 7223 return ret; << 7224 } << 7225 << 7226 static ssize_t __wq_cpumask_show(struct devic << 7227 struct device_attribute *attr << 7228 { 5236 { 7229 int written; 5237 int written; 7230 5238 7231 mutex_lock(&wq_pool_mutex); 5239 mutex_lock(&wq_pool_mutex); 7232 written = scnprintf(buf, PAGE_SIZE, " !! 5240 written = scnprintf(buf, PAGE_SIZE, "%*pb\n", >> 5241 cpumask_pr_args(wq_unbound_cpumask)); 7233 mutex_unlock(&wq_pool_mutex); 5242 mutex_unlock(&wq_pool_mutex); 7234 5243 7235 return written; 5244 return written; 7236 } 5245 } 7237 5246 7238 static ssize_t cpumask_requested_show(struct !! 5247 static ssize_t wq_unbound_cpumask_store(struct device *dev, 7239 struct device_attribute *attr << 7240 { << 7241 return __wq_cpumask_show(dev, attr, b << 7242 } << 7243 static DEVICE_ATTR_RO(cpumask_requested); << 7244 << 7245 static ssize_t cpumask_isolated_show(struct d << 7246 struct device_attribute *attr << 7247 { << 7248 return __wq_cpumask_show(dev, attr, b << 7249 } << 7250 static DEVICE_ATTR_RO(cpumask_isolated); << 7251 << 7252 static ssize_t cpumask_show(struct device *de << 7253 struct device_attribute *attr << 7254 { << 7255 return __wq_cpumask_show(dev, attr, b << 7256 } << 7257 << 7258 static ssize_t cpumask_store(struct device *d << 7259 struct device_attribute *attr 5248 struct device_attribute *attr, const char *buf, size_t count) 7260 { 5249 { 7261 cpumask_var_t cpumask; 5250 cpumask_var_t cpumask; 7262 int ret; 5251 int ret; 7263 5252 7264 if (!zalloc_cpumask_var(&cpumask, GFP 5253 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL)) 7265 return -ENOMEM; 5254 return -ENOMEM; 7266 5255 7267 ret = cpumask_parse(buf, cpumask); 5256 ret = cpumask_parse(buf, cpumask); 7268 if (!ret) 5257 if (!ret) 7269 ret = workqueue_set_unbound_c 5258 ret = workqueue_set_unbound_cpumask(cpumask); 7270 5259 7271 free_cpumask_var(cpumask); 5260 free_cpumask_var(cpumask); 7272 return ret ? ret : count; 5261 return ret ? ret : count; 7273 } 5262 } 7274 static DEVICE_ATTR_RW(cpumask); << 7275 5263 7276 static struct attribute *wq_sysfs_cpumask_att !! 5264 static struct device_attribute wq_sysfs_cpumask_attr = 7277 &dev_attr_cpumask.attr, !! 5265 __ATTR(cpumask, 0644, wq_unbound_cpumask_show, 7278 &dev_attr_cpumask_requested.attr, !! 5266 wq_unbound_cpumask_store); 7279 &dev_attr_cpumask_isolated.attr, << 7280 NULL, << 7281 }; << 7282 ATTRIBUTE_GROUPS(wq_sysfs_cpumask); << 7283 5267 7284 static int __init wq_sysfs_init(void) 5268 static int __init wq_sysfs_init(void) 7285 { 5269 { 7286 return subsys_virtual_register(&wq_su !! 5270 int err; >> 5271 >> 5272 err = subsys_virtual_register(&wq_subsys, NULL); >> 5273 if (err) >> 5274 return err; >> 5275 >> 5276 return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr); 7287 } 5277 } 7288 core_initcall(wq_sysfs_init); 5278 core_initcall(wq_sysfs_init); 7289 5279 7290 static void wq_device_release(struct device * 5280 static void wq_device_release(struct device *dev) 7291 { 5281 { 7292 struct wq_device *wq_dev = container_ 5282 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); 7293 5283 7294 kfree(wq_dev); 5284 kfree(wq_dev); 7295 } 5285 } 7296 5286 7297 /** 5287 /** 7298 * workqueue_sysfs_register - make a workqueu 5288 * workqueue_sysfs_register - make a workqueue visible in sysfs 7299 * @wq: the workqueue to register 5289 * @wq: the workqueue to register 7300 * 5290 * 7301 * Expose @wq in sysfs under /sys/bus/workque 5291 * Expose @wq in sysfs under /sys/bus/workqueue/devices. 7302 * alloc_workqueue*() automatically calls thi 5292 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set 7303 * which is the preferred method. 5293 * which is the preferred method. 7304 * 5294 * 7305 * Workqueue user should use this function di 5295 * Workqueue user should use this function directly iff it wants to apply 7306 * workqueue_attrs before making the workqueu 5296 * workqueue_attrs before making the workqueue visible in sysfs; otherwise, 7307 * apply_workqueue_attrs() may race against u 5297 * apply_workqueue_attrs() may race against userland updating the 7308 * attributes. 5298 * attributes. 7309 * 5299 * 7310 * Return: 0 on success, -errno on failure. 5300 * Return: 0 on success, -errno on failure. 7311 */ 5301 */ 7312 int workqueue_sysfs_register(struct workqueue 5302 int workqueue_sysfs_register(struct workqueue_struct *wq) 7313 { 5303 { 7314 struct wq_device *wq_dev; 5304 struct wq_device *wq_dev; 7315 int ret; 5305 int ret; 7316 5306 7317 /* 5307 /* 7318 * Adjusting max_active breaks orderi !! 5308 * Adjusting max_active or creating new pwqs by applying 7319 * ordered workqueues. !! 5309 * attributes breaks ordering guarantee. Disallow exposing ordered >> 5310 * workqueues. 7320 */ 5311 */ 7321 if (WARN_ON(wq->flags & __WQ_ORDERED) !! 5312 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 7322 return -EINVAL; 5313 return -EINVAL; 7323 5314 7324 wq->wq_dev = wq_dev = kzalloc(sizeof( 5315 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 7325 if (!wq_dev) 5316 if (!wq_dev) 7326 return -ENOMEM; 5317 return -ENOMEM; 7327 5318 7328 wq_dev->wq = wq; 5319 wq_dev->wq = wq; 7329 wq_dev->dev.bus = &wq_subsys; 5320 wq_dev->dev.bus = &wq_subsys; 7330 wq_dev->dev.release = wq_device_relea 5321 wq_dev->dev.release = wq_device_release; 7331 dev_set_name(&wq_dev->dev, "%s", wq-> 5322 dev_set_name(&wq_dev->dev, "%s", wq->name); 7332 5323 7333 /* 5324 /* 7334 * unbound_attrs are created separate 5325 * unbound_attrs are created separately. Suppress uevent until 7335 * everything is ready. 5326 * everything is ready. 7336 */ 5327 */ 7337 dev_set_uevent_suppress(&wq_dev->dev, 5328 dev_set_uevent_suppress(&wq_dev->dev, true); 7338 5329 7339 ret = device_register(&wq_dev->dev); 5330 ret = device_register(&wq_dev->dev); 7340 if (ret) { 5331 if (ret) { 7341 put_device(&wq_dev->dev); 5332 put_device(&wq_dev->dev); 7342 wq->wq_dev = NULL; 5333 wq->wq_dev = NULL; 7343 return ret; 5334 return ret; 7344 } 5335 } 7345 5336 7346 if (wq->flags & WQ_UNBOUND) { 5337 if (wq->flags & WQ_UNBOUND) { 7347 struct device_attribute *attr 5338 struct device_attribute *attr; 7348 5339 7349 for (attr = wq_sysfs_unbound_ 5340 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { 7350 ret = device_create_f 5341 ret = device_create_file(&wq_dev->dev, attr); 7351 if (ret) { 5342 if (ret) { 7352 device_unregi 5343 device_unregister(&wq_dev->dev); 7353 wq->wq_dev = 5344 wq->wq_dev = NULL; 7354 return ret; 5345 return ret; 7355 } 5346 } 7356 } 5347 } 7357 } 5348 } 7358 5349 7359 dev_set_uevent_suppress(&wq_dev->dev, 5350 dev_set_uevent_suppress(&wq_dev->dev, false); 7360 kobject_uevent(&wq_dev->dev.kobj, KOB 5351 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 7361 return 0; 5352 return 0; 7362 } 5353 } 7363 5354 7364 /** 5355 /** 7365 * workqueue_sysfs_unregister - undo workqueu 5356 * workqueue_sysfs_unregister - undo workqueue_sysfs_register() 7366 * @wq: the workqueue to unregister 5357 * @wq: the workqueue to unregister 7367 * 5358 * 7368 * If @wq is registered to sysfs by workqueue 5359 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. 7369 */ 5360 */ 7370 static void workqueue_sysfs_unregister(struct 5361 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) 7371 { 5362 { 7372 struct wq_device *wq_dev = wq->wq_dev 5363 struct wq_device *wq_dev = wq->wq_dev; 7373 5364 7374 if (!wq->wq_dev) 5365 if (!wq->wq_dev) 7375 return; 5366 return; 7376 5367 7377 wq->wq_dev = NULL; 5368 wq->wq_dev = NULL; 7378 device_unregister(&wq_dev->dev); 5369 device_unregister(&wq_dev->dev); 7379 } 5370 } 7380 #else /* CONFIG_SYSFS */ 5371 #else /* CONFIG_SYSFS */ 7381 static void workqueue_sysfs_unregister(struct 5372 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } 7382 #endif /* CONFIG_SYSFS */ 5373 #endif /* CONFIG_SYSFS */ 7383 5374 7384 /* 5375 /* 7385 * Workqueue watchdog. 5376 * Workqueue watchdog. 7386 * 5377 * 7387 * Stall may be caused by various bugs - miss 5378 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal 7388 * flush dependency, a concurrency managed wo 5379 * flush dependency, a concurrency managed work item which stays RUNNING 7389 * indefinitely. Workqueue stalls can be ver 5380 * indefinitely. Workqueue stalls can be very difficult to debug as the 7390 * usual warning mechanisms don't trigger and 5381 * usual warning mechanisms don't trigger and internal workqueue state is 7391 * largely opaque. 5382 * largely opaque. 7392 * 5383 * 7393 * Workqueue watchdog monitors all worker poo 5384 * Workqueue watchdog monitors all worker pools periodically and dumps 7394 * state if some pools failed to make forward 5385 * state if some pools failed to make forward progress for a while where 7395 * forward progress is defined as the first i 5386 * forward progress is defined as the first item on ->worklist changing. 7396 * 5387 * 7397 * This mechanism is controlled through the k 5388 * This mechanism is controlled through the kernel parameter 7398 * "workqueue.watchdog_thresh" which can be u 5389 * "workqueue.watchdog_thresh" which can be updated at runtime through the 7399 * corresponding sysfs parameter file. 5390 * corresponding sysfs parameter file. 7400 */ 5391 */ 7401 #ifdef CONFIG_WQ_WATCHDOG 5392 #ifdef CONFIG_WQ_WATCHDOG 7402 5393 7403 static unsigned long wq_watchdog_thresh = 30; 5394 static unsigned long wq_watchdog_thresh = 30; 7404 static struct timer_list wq_watchdog_timer; 5395 static struct timer_list wq_watchdog_timer; 7405 5396 7406 static unsigned long wq_watchdog_touched = IN 5397 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES; 7407 static DEFINE_PER_CPU(unsigned long, wq_watch 5398 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES; 7408 5399 7409 /* << 7410 * Show workers that might prevent the proces << 7411 * The only candidates are CPU-bound workers << 7412 * Pending work items should be handled by an << 7413 * in all other situations. << 7414 */ << 7415 static void show_cpu_pool_hog(struct worker_p << 7416 { << 7417 struct worker *worker; << 7418 unsigned long irq_flags; << 7419 int bkt; << 7420 << 7421 raw_spin_lock_irqsave(&pool->lock, ir << 7422 << 7423 hash_for_each(pool->busy_hash, bkt, w << 7424 if (task_is_running(worker->t << 7425 /* << 7426 * Defer printing to << 7427 * drivers that queue << 7428 * also taken in thei << 7429 */ << 7430 printk_deferred_enter << 7431 << 7432 pr_info("pool %d:\n", << 7433 sched_show_task(worke << 7434 << 7435 printk_deferred_exit( << 7436 } << 7437 } << 7438 << 7439 raw_spin_unlock_irqrestore(&pool->loc << 7440 } << 7441 << 7442 static void show_cpu_pools_hogs(void) << 7443 { << 7444 struct worker_pool *pool; << 7445 int pi; << 7446 << 7447 pr_info("Showing backtraces of runnin << 7448 << 7449 rcu_read_lock(); << 7450 << 7451 for_each_pool(pool, pi) { << 7452 if (pool->cpu_stall) << 7453 show_cpu_pool_hog(poo << 7454 << 7455 } << 7456 << 7457 rcu_read_unlock(); << 7458 } << 7459 << 7460 static void wq_watchdog_reset_touched(void) 5400 static void wq_watchdog_reset_touched(void) 7461 { 5401 { 7462 int cpu; 5402 int cpu; 7463 5403 7464 wq_watchdog_touched = jiffies; 5404 wq_watchdog_touched = jiffies; 7465 for_each_possible_cpu(cpu) 5405 for_each_possible_cpu(cpu) 7466 per_cpu(wq_watchdog_touched_c 5406 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 7467 } 5407 } 7468 5408 7469 static void wq_watchdog_timer_fn(struct timer 5409 static void wq_watchdog_timer_fn(struct timer_list *unused) 7470 { 5410 { 7471 unsigned long thresh = READ_ONCE(wq_w 5411 unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ; 7472 bool lockup_detected = false; 5412 bool lockup_detected = false; 7473 bool cpu_pool_stall = false; << 7474 unsigned long now = jiffies; << 7475 struct worker_pool *pool; 5413 struct worker_pool *pool; 7476 int pi; 5414 int pi; 7477 5415 7478 if (!thresh) 5416 if (!thresh) 7479 return; 5417 return; 7480 5418 7481 rcu_read_lock(); 5419 rcu_read_lock(); 7482 5420 7483 for_each_pool(pool, pi) { 5421 for_each_pool(pool, pi) { 7484 unsigned long pool_ts, touche 5422 unsigned long pool_ts, touched, ts; 7485 5423 7486 pool->cpu_stall = false; << 7487 if (list_empty(&pool->worklis 5424 if (list_empty(&pool->worklist)) 7488 continue; 5425 continue; 7489 5426 7490 /* << 7491 * If a virtual machine is st << 7492 * the watchdog like a stall. << 7493 */ << 7494 kvm_check_and_clear_guest_pau << 7495 << 7496 /* get the latest of pool and 5427 /* get the latest of pool and touched timestamps */ 7497 if (pool->cpu >= 0) << 7498 touched = READ_ONCE(p << 7499 else << 7500 touched = READ_ONCE(w << 7501 pool_ts = READ_ONCE(pool->wat 5428 pool_ts = READ_ONCE(pool->watchdog_ts); >> 5429 touched = READ_ONCE(wq_watchdog_touched); 7502 5430 7503 if (time_after(pool_ts, touch 5431 if (time_after(pool_ts, touched)) 7504 ts = pool_ts; 5432 ts = pool_ts; 7505 else 5433 else 7506 ts = touched; 5434 ts = touched; 7507 5435 >> 5436 if (pool->cpu >= 0) { >> 5437 unsigned long cpu_touched = >> 5438 READ_ONCE(per_cpu(wq_watchdog_touched_cpu, >> 5439 pool->cpu)); >> 5440 if (time_after(cpu_touched, ts)) >> 5441 ts = cpu_touched; >> 5442 } >> 5443 7508 /* did we stall? */ 5444 /* did we stall? */ 7509 if (time_after(now, ts + thre !! 5445 if (time_after(jiffies, ts + thresh)) { 7510 lockup_detected = tru 5446 lockup_detected = true; 7511 if (pool->cpu >= 0 && << 7512 pool->cpu_sta << 7513 cpu_pool_stal << 7514 } << 7515 pr_emerg("BUG: workqu 5447 pr_emerg("BUG: workqueue lockup - pool"); 7516 pr_cont_pool_info(poo 5448 pr_cont_pool_info(pool); 7517 pr_cont(" stuck for % 5449 pr_cont(" stuck for %us!\n", 7518 jiffies_to_ms !! 5450 jiffies_to_msecs(jiffies - pool_ts) / 1000); 7519 } 5451 } 7520 << 7521 << 7522 } 5452 } 7523 5453 7524 rcu_read_unlock(); 5454 rcu_read_unlock(); 7525 5455 7526 if (lockup_detected) 5456 if (lockup_detected) 7527 show_all_workqueues(); !! 5457 show_workqueue_state(); 7528 << 7529 if (cpu_pool_stall) << 7530 show_cpu_pools_hogs(); << 7531 5458 7532 wq_watchdog_reset_touched(); 5459 wq_watchdog_reset_touched(); 7533 mod_timer(&wq_watchdog_timer, jiffies 5460 mod_timer(&wq_watchdog_timer, jiffies + thresh); 7534 } 5461 } 7535 5462 7536 notrace void wq_watchdog_touch(int cpu) !! 5463 void wq_watchdog_touch(int cpu) 7537 { 5464 { 7538 unsigned long thresh = READ_ONCE(wq_w << 7539 unsigned long touch_ts = READ_ONCE(wq << 7540 unsigned long now = jiffies; << 7541 << 7542 if (cpu >= 0) 5465 if (cpu >= 0) 7543 per_cpu(wq_watchdog_touched_c !! 5466 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 7544 else 5467 else 7545 WARN_ONCE(1, "%s should be ca !! 5468 wq_watchdog_touched = jiffies; 7546 << 7547 /* Don't unnecessarily store to globa << 7548 if (time_after(now, touch_ts + thresh << 7549 WRITE_ONCE(wq_watchdog_touche << 7550 } 5469 } 7551 5470 7552 static void wq_watchdog_set_thresh(unsigned l 5471 static void wq_watchdog_set_thresh(unsigned long thresh) 7553 { 5472 { 7554 wq_watchdog_thresh = 0; 5473 wq_watchdog_thresh = 0; 7555 del_timer_sync(&wq_watchdog_timer); 5474 del_timer_sync(&wq_watchdog_timer); 7556 5475 7557 if (thresh) { 5476 if (thresh) { 7558 wq_watchdog_thresh = thresh; 5477 wq_watchdog_thresh = thresh; 7559 wq_watchdog_reset_touched(); 5478 wq_watchdog_reset_touched(); 7560 mod_timer(&wq_watchdog_timer, 5479 mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ); 7561 } 5480 } 7562 } 5481 } 7563 5482 7564 static int wq_watchdog_param_set_thresh(const 5483 static int wq_watchdog_param_set_thresh(const char *val, 7565 const 5484 const struct kernel_param *kp) 7566 { 5485 { 7567 unsigned long thresh; 5486 unsigned long thresh; 7568 int ret; 5487 int ret; 7569 5488 7570 ret = kstrtoul(val, 0, &thresh); 5489 ret = kstrtoul(val, 0, &thresh); 7571 if (ret) 5490 if (ret) 7572 return ret; 5491 return ret; 7573 5492 7574 if (system_wq) 5493 if (system_wq) 7575 wq_watchdog_set_thresh(thresh 5494 wq_watchdog_set_thresh(thresh); 7576 else 5495 else 7577 wq_watchdog_thresh = thresh; 5496 wq_watchdog_thresh = thresh; 7578 5497 7579 return 0; 5498 return 0; 7580 } 5499 } 7581 5500 7582 static const struct kernel_param_ops wq_watch 5501 static const struct kernel_param_ops wq_watchdog_thresh_ops = { 7583 .set = wq_watchdog_param_set_thres 5502 .set = wq_watchdog_param_set_thresh, 7584 .get = param_get_ulong, 5503 .get = param_get_ulong, 7585 }; 5504 }; 7586 5505 7587 module_param_cb(watchdog_thresh, &wq_watchdog 5506 module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh, 7588 0644); 5507 0644); 7589 5508 7590 static void wq_watchdog_init(void) 5509 static void wq_watchdog_init(void) 7591 { 5510 { 7592 timer_setup(&wq_watchdog_timer, wq_wa 5511 timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE); 7593 wq_watchdog_set_thresh(wq_watchdog_th 5512 wq_watchdog_set_thresh(wq_watchdog_thresh); 7594 } 5513 } 7595 5514 7596 #else /* CONFIG_WQ_WATCHDOG */ 5515 #else /* CONFIG_WQ_WATCHDOG */ 7597 5516 7598 static inline void wq_watchdog_init(void) { } 5517 static inline void wq_watchdog_init(void) { } 7599 5518 7600 #endif /* CONFIG_WQ_WATCHDOG */ 5519 #endif /* CONFIG_WQ_WATCHDOG */ 7601 5520 7602 static void bh_pool_kick_normal(struct irq_wo !! 5521 static void __init wq_numa_init(void) 7603 { 5522 { 7604 raise_softirq_irqoff(TASKLET_SOFTIRQ) !! 5523 cpumask_var_t *tbl; 7605 } !! 5524 int node, cpu; 7606 5525 7607 static void bh_pool_kick_highpri(struct irq_w !! 5526 if (num_possible_nodes() <= 1) 7608 { !! 5527 return; 7609 raise_softirq_irqoff(HI_SOFTIRQ); << 7610 } << 7611 5528 7612 static void __init restrict_unbound_cpumask(c !! 5529 if (wq_disable_numa) { 7613 { !! 5530 pr_info("workqueue: NUMA affinity support disabled\n"); 7614 if (!cpumask_intersects(wq_unbound_cp << 7615 pr_warn("workqueue: Restricti << 7616 cpumask_pr_args(wq_un << 7617 return; 5531 return; 7618 } 5532 } 7619 5533 7620 cpumask_and(wq_unbound_cpumask, wq_un !! 5534 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL); 7621 } !! 5535 BUG_ON(!wq_update_unbound_numa_attrs_buf); 7622 5536 7623 static void __init init_cpu_worker_pool(struc !! 5537 /* 7624 { !! 5538 * We want masks of possible CPUs of each node which isn't readily 7625 BUG_ON(init_worker_pool(pool)); !! 5539 * available. Build one from cpu_to_node() which should have been 7626 pool->cpu = cpu; !! 5540 * fully initialized by now. 7627 cpumask_copy(pool->attrs->cpumask, cp !! 5541 */ 7628 cpumask_copy(pool->attrs->__pod_cpuma !! 5542 tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL); 7629 pool->attrs->nice = nice; !! 5543 BUG_ON(!tbl); 7630 pool->attrs->affn_strict = true; << 7631 pool->node = cpu_to_node(cpu); << 7632 5544 7633 /* alloc pool ID */ !! 5545 for_each_node(node) 7634 mutex_lock(&wq_pool_mutex); !! 5546 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL, 7635 BUG_ON(worker_pool_assign_id(pool)); !! 5547 node_online(node) ? node : NUMA_NO_NODE)); 7636 mutex_unlock(&wq_pool_mutex); !! 5548 >> 5549 for_each_possible_cpu(cpu) { >> 5550 node = cpu_to_node(cpu); >> 5551 if (WARN_ON(node == NUMA_NO_NODE)) { >> 5552 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu); >> 5553 /* happens iff arch is bonkers, let's just proceed */ >> 5554 return; >> 5555 } >> 5556 cpumask_set_cpu(cpu, tbl[node]); >> 5557 } >> 5558 >> 5559 wq_numa_possible_cpumask = tbl; >> 5560 wq_numa_enabled = true; 7637 } 5561 } 7638 5562 7639 /** 5563 /** 7640 * workqueue_init_early - early init for work 5564 * workqueue_init_early - early init for workqueue subsystem 7641 * 5565 * 7642 * This is the first step of three-staged wor !! 5566 * This is the first half of two-staged workqueue subsystem initialization 7643 * invoked as soon as the bare basics - memor !! 5567 * and invoked as soon as the bare basics - memory allocation, cpumasks and 7644 * up. It sets up all the data structures and !! 5568 * idr are up. It sets up all the data structures and system workqueues 7645 * boot code to create workqueues and queue/c !! 5569 * and allows early boot code to create workqueues and queue/cancel work 7646 * execution starts only after kthreads can b !! 5570 * items. Actual work item execution starts only after kthreads can be 7647 * before early initcalls. !! 5571 * created and scheduled right before early initcalls. 7648 */ 5572 */ 7649 void __init workqueue_init_early(void) !! 5573 int __init workqueue_init_early(void) 7650 { 5574 { 7651 struct wq_pod_type *pt = &wq_pod_type << 7652 int std_nice[NR_STD_WORKER_POOLS] = { 5575 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 7653 void (*irq_work_fns[2])(struct irq_wo << 7654 << 7655 int i, cpu; 5576 int i, cpu; 7656 5577 7657 BUILD_BUG_ON(__alignof__(struct pool_ !! 5578 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 7658 5579 7659 BUG_ON(!alloc_cpumask_var(&wq_online_ << 7660 BUG_ON(!alloc_cpumask_var(&wq_unbound 5580 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); 7661 BUG_ON(!alloc_cpumask_var(&wq_request !! 5581 cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN)); 7662 BUG_ON(!zalloc_cpumask_var(&wq_isolat << 7663 << 7664 cpumask_copy(wq_online_cpumask, cpu_o << 7665 cpumask_copy(wq_unbound_cpumask, cpu_ << 7666 restrict_unbound_cpumask("HK_TYPE_WQ" << 7667 restrict_unbound_cpumask("HK_TYPE_DOM << 7668 if (!cpumask_empty(&wq_cmdline_cpumas << 7669 restrict_unbound_cpumask("wor << 7670 << 7671 cpumask_copy(wq_requested_unbound_cpu << 7672 5582 7673 pwq_cache = KMEM_CACHE(pool_workqueue 5583 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 7674 5584 7675 unbound_wq_update_pwq_attrs_buf = all !! 5585 /* initialize CPU pools */ 7676 BUG_ON(!unbound_wq_update_pwq_attrs_b << 7677 << 7678 /* << 7679 * If nohz_full is enabled, set power << 7680 * This allows workqueue items to be << 7681 */ << 7682 if (housekeeping_enabled(HK_TYPE_TICK << 7683 wq_power_efficient = true; << 7684 << 7685 /* initialize WQ_AFFN_SYSTEM pods */ << 7686 pt->pod_cpus = kcalloc(1, sizeof(pt-> << 7687 pt->pod_node = kcalloc(1, sizeof(pt-> << 7688 pt->cpu_pod = kcalloc(nr_cpu_ids, siz << 7689 BUG_ON(!pt->pod_cpus || !pt->pod_node << 7690 << 7691 BUG_ON(!zalloc_cpumask_var_node(&pt-> << 7692 << 7693 pt->nr_pods = 1; << 7694 cpumask_copy(pt->pod_cpus[0], cpu_pos << 7695 pt->pod_node[0] = NUMA_NO_NODE; << 7696 pt->cpu_pod[0] = 0; << 7697 << 7698 /* initialize BH and CPU pools */ << 7699 for_each_possible_cpu(cpu) { 5586 for_each_possible_cpu(cpu) { 7700 struct worker_pool *pool; 5587 struct worker_pool *pool; 7701 5588 7702 i = 0; 5589 i = 0; 7703 for_each_bh_worker_pool(pool, !! 5590 for_each_cpu_worker_pool(pool, cpu) { 7704 init_cpu_worker_pool( !! 5591 BUG_ON(init_worker_pool(pool)); 7705 pool->flags |= POOL_B !! 5592 pool->cpu = cpu; 7706 init_irq_work(bh_pool !! 5593 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 7707 i++; !! 5594 pool->attrs->nice = std_nice[i++]; 7708 } !! 5595 pool->node = cpu_to_node(cpu); 7709 5596 7710 i = 0; !! 5597 /* alloc pool ID */ 7711 for_each_cpu_worker_pool(pool !! 5598 mutex_lock(&wq_pool_mutex); 7712 init_cpu_worker_pool( !! 5599 BUG_ON(worker_pool_assign_id(pool)); >> 5600 mutex_unlock(&wq_pool_mutex); >> 5601 } 7713 } 5602 } 7714 5603 7715 /* create default unbound and ordered 5604 /* create default unbound and ordered wq attrs */ 7716 for (i = 0; i < NR_STD_WORKER_POOLS; 5605 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 7717 struct workqueue_attrs *attrs 5606 struct workqueue_attrs *attrs; 7718 5607 7719 BUG_ON(!(attrs = alloc_workqu !! 5608 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 7720 attrs->nice = std_nice[i]; 5609 attrs->nice = std_nice[i]; 7721 unbound_std_wq_attrs[i] = att 5610 unbound_std_wq_attrs[i] = attrs; 7722 5611 7723 /* 5612 /* 7724 * An ordered wq should have 5613 * An ordered wq should have only one pwq as ordering is 7725 * guaranteed by max_active w 5614 * guaranteed by max_active which is enforced by pwqs. >> 5615 * Turn off NUMA so that dfl_pwq is used for all nodes. 7726 */ 5616 */ 7727 BUG_ON(!(attrs = alloc_workqu !! 5617 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 7728 attrs->nice = std_nice[i]; 5618 attrs->nice = std_nice[i]; 7729 attrs->ordered = true; !! 5619 attrs->no_numa = true; 7730 ordered_wq_attrs[i] = attrs; 5620 ordered_wq_attrs[i] = attrs; 7731 } 5621 } 7732 5622 7733 system_wq = alloc_workqueue("events", 5623 system_wq = alloc_workqueue("events", 0, 0); 7734 system_highpri_wq = alloc_workqueue(" 5624 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 7735 system_long_wq = alloc_workqueue("eve 5625 system_long_wq = alloc_workqueue("events_long", 0, 0); 7736 system_unbound_wq = alloc_workqueue(" 5626 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 7737 W !! 5627 WQ_UNBOUND_MAX_ACTIVE); 7738 system_freezable_wq = alloc_workqueue 5628 system_freezable_wq = alloc_workqueue("events_freezable", 7739 5629 WQ_FREEZABLE, 0); 7740 system_power_efficient_wq = alloc_wor 5630 system_power_efficient_wq = alloc_workqueue("events_power_efficient", 7741 5631 WQ_POWER_EFFICIENT, 0); 7742 system_freezable_power_efficient_wq = !! 5632 system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", 7743 5633 WQ_FREEZABLE | WQ_POWER_EFFICIENT, 7744 5634 0); 7745 system_bh_wq = alloc_workqueue("event << 7746 system_bh_highpri_wq = alloc_workqueu << 7747 << 7748 BUG_ON(!system_wq || !system_highpri_ 5635 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 7749 !system_unbound_wq || !system_ 5636 !system_unbound_wq || !system_freezable_wq || 7750 !system_power_efficient_wq || 5637 !system_power_efficient_wq || 7751 !system_freezable_power_effici !! 5638 !system_freezable_power_efficient_wq); 7752 !system_bh_wq || !system_bh_hi << 7753 } << 7754 5639 7755 static void __init wq_cpu_intensive_thresh_in !! 5640 return 0; 7756 { << 7757 unsigned long thresh; << 7758 unsigned long bogo; << 7759 << 7760 pwq_release_worker = kthread_create_w << 7761 BUG_ON(IS_ERR(pwq_release_worker)); << 7762 << 7763 /* if the user set it to a specific v << 7764 if (wq_cpu_intensive_thresh_us != ULO << 7765 return; << 7766 << 7767 /* << 7768 * The default of 10ms is derived fro << 7769 * 2023) processors can do a lot in 1 << 7770 * most consider human-perceivable. H << 7771 * lot slower CPUs including microcon << 7772 * too low. << 7773 * << 7774 * Let's scale up the threshold upto << 7775 * This is by no means accurate but i << 7776 * is still useful even when the thre << 7777 * the reports would usually be appli << 7778 * operating on longer thresholds won << 7779 * usefulness. << 7780 */ << 7781 thresh = 10 * USEC_PER_MSEC; << 7782 << 7783 /* see init/calibrate.c for lpj -> Bo << 7784 bogo = max_t(unsigned long, loops_per << 7785 if (bogo < 4000) << 7786 thresh = min_t(unsigned long, << 7787 << 7788 pr_debug("wq_cpu_intensive_thresh: lp << 7789 loops_per_jiffy, bogo, thres << 7790 << 7791 wq_cpu_intensive_thresh_us = thresh; << 7792 } 5641 } 7793 5642 7794 /** 5643 /** 7795 * workqueue_init - bring workqueue subsystem 5644 * workqueue_init - bring workqueue subsystem fully online 7796 * 5645 * 7797 * This is the second step of three-staged wo !! 5646 * This is the latter half of two-staged workqueue subsystem initialization 7798 * and invoked as soon as kthreads can be cre !! 5647 * and invoked as soon as kthreads can be created and scheduled. 7799 * been created and work items queued on them !! 5648 * Workqueues have been created and work items queued on them, but there 7800 * executing the work items yet. Populate the !! 5649 * are no kworkers executing the work items yet. Populate the worker pools 7801 * workers and enable future kworker creation !! 5650 * with the initial workers and enable future kworker creations. 7802 */ 5651 */ 7803 void __init workqueue_init(void) !! 5652 int __init workqueue_init(void) 7804 { 5653 { 7805 struct workqueue_struct *wq; 5654 struct workqueue_struct *wq; 7806 struct worker_pool *pool; 5655 struct worker_pool *pool; 7807 int cpu, bkt; 5656 int cpu, bkt; 7808 5657 7809 wq_cpu_intensive_thresh_init(); !! 5658 /* >> 5659 * It'd be simpler to initialize NUMA in workqueue_init_early() but >> 5660 * CPU to node mapping may not be available that early on some >> 5661 * archs such as power and arm64. As per-cpu pools created >> 5662 * previously could be missing node hint and unbound pools NUMA >> 5663 * affinity, fix them up. >> 5664 * >> 5665 * Also, while iterating workqueues, create rescuers if requested. >> 5666 */ >> 5667 wq_numa_init(); 7810 5668 7811 mutex_lock(&wq_pool_mutex); 5669 mutex_lock(&wq_pool_mutex); 7812 5670 7813 /* << 7814 * Per-cpu pools created earlier coul << 7815 * up. Also, create a rescuer for wor << 7816 */ << 7817 for_each_possible_cpu(cpu) { 5671 for_each_possible_cpu(cpu) { 7818 for_each_bh_worker_pool(pool, !! 5672 for_each_cpu_worker_pool(pool, cpu) { 7819 pool->node = cpu_to_n << 7820 for_each_cpu_worker_pool(pool << 7821 pool->node = cpu_to_n 5673 pool->node = cpu_to_node(cpu); >> 5674 } 7822 } 5675 } 7823 5676 7824 list_for_each_entry(wq, &workqueues, 5677 list_for_each_entry(wq, &workqueues, list) { >> 5678 wq_update_unbound_numa(wq, smp_processor_id(), true); 7825 WARN(init_rescuer(wq), 5679 WARN(init_rescuer(wq), 7826 "workqueue: failed to cr 5680 "workqueue: failed to create early rescuer for %s", 7827 wq->name); 5681 wq->name); 7828 } 5682 } 7829 5683 7830 mutex_unlock(&wq_pool_mutex); 5684 mutex_unlock(&wq_pool_mutex); 7831 5685 7832 /* !! 5686 /* create the initial workers */ 7833 * Create the initial workers. A BH p << 7834 * represents the shared BH execution << 7835 * affected by hotplug events. Create << 7836 * possible CPUs here. << 7837 */ << 7838 for_each_possible_cpu(cpu) << 7839 for_each_bh_worker_pool(pool, << 7840 BUG_ON(!create_worker << 7841 << 7842 for_each_online_cpu(cpu) { 5687 for_each_online_cpu(cpu) { 7843 for_each_cpu_worker_pool(pool 5688 for_each_cpu_worker_pool(pool, cpu) { 7844 pool->flags &= ~POOL_ 5689 pool->flags &= ~POOL_DISASSOCIATED; 7845 BUG_ON(!create_worker 5690 BUG_ON(!create_worker(pool)); 7846 } 5691 } 7847 } 5692 } 7848 5693 7849 hash_for_each(unbound_pool_hash, bkt, 5694 hash_for_each(unbound_pool_hash, bkt, pool, hash_node) 7850 BUG_ON(!create_worker(pool)); 5695 BUG_ON(!create_worker(pool)); 7851 5696 7852 wq_online = true; 5697 wq_online = true; 7853 wq_watchdog_init(); 5698 wq_watchdog_init(); 7854 } << 7855 << 7856 /* << 7857 * Initialize @pt by first initializing @pt-> << 7858 * @cpu_shares_pod(). Each subset of CPUs tha << 7859 * and consecutive pod ID. The rest of @pt is << 7860 */ << 7861 static void __init init_pod_type(struct wq_po << 7862 bool (*cpus_ << 7863 { << 7864 int cur, pre, cpu, pod; << 7865 5699 7866 pt->nr_pods = 0; !! 5700 return 0; 7867 << 7868 /* init @pt->cpu_pod[] according to @ << 7869 pt->cpu_pod = kcalloc(nr_cpu_ids, siz << 7870 BUG_ON(!pt->cpu_pod); << 7871 << 7872 for_each_possible_cpu(cur) { << 7873 for_each_possible_cpu(pre) { << 7874 if (pre >= cur) { << 7875 pt->cpu_pod[c << 7876 break; << 7877 } << 7878 if (cpus_share_pod(cu << 7879 pt->cpu_pod[c << 7880 break; << 7881 } << 7882 } << 7883 } << 7884 << 7885 /* init the rest to match @pt->cpu_po << 7886 pt->pod_cpus = kcalloc(pt->nr_pods, s << 7887 pt->pod_node = kcalloc(pt->nr_pods, s << 7888 BUG_ON(!pt->pod_cpus || !pt->pod_node << 7889 << 7890 for (pod = 0; pod < pt->nr_pods; pod+ << 7891 BUG_ON(!zalloc_cpumask_var(&p << 7892 << 7893 for_each_possible_cpu(cpu) { << 7894 cpumask_set_cpu(cpu, pt->pod_ << 7895 pt->pod_node[pt->cpu_pod[cpu] << 7896 } << 7897 } << 7898 << 7899 static bool __init cpus_dont_share(int cpu0, << 7900 { << 7901 return false; << 7902 } << 7903 << 7904 static bool __init cpus_share_smt(int cpu0, i << 7905 { << 7906 #ifdef CONFIG_SCHED_SMT << 7907 return cpumask_test_cpu(cpu0, cpu_smt << 7908 #else << 7909 return false; << 7910 #endif << 7911 } << 7912 << 7913 static bool __init cpus_share_numa(int cpu0, << 7914 { << 7915 return cpu_to_node(cpu0) == cpu_to_no << 7916 } << 7917 << 7918 /** << 7919 * workqueue_init_topology - initialize CPU p << 7920 * << 7921 * This is the third step of three-staged wor << 7922 * invoked after SMP and topology information << 7923 * initializes the unbound CPU pods according << 7924 */ << 7925 void __init workqueue_init_topology(void) << 7926 { << 7927 struct workqueue_struct *wq; << 7928 int cpu; << 7929 << 7930 init_pod_type(&wq_pod_types[WQ_AFFN_C << 7931 init_pod_type(&wq_pod_types[WQ_AFFN_S << 7932 init_pod_type(&wq_pod_types[WQ_AFFN_C << 7933 init_pod_type(&wq_pod_types[WQ_AFFN_N << 7934 << 7935 wq_topo_initialized = true; << 7936 << 7937 mutex_lock(&wq_pool_mutex); << 7938 << 7939 /* << 7940 * Workqueues allocated earlier would << 7941 * worker pool. Explicitly call unbou << 7942 * and CPU combinations to apply per- << 7943 */ << 7944 list_for_each_entry(wq, &workqueues, << 7945 for_each_online_cpu(cpu) << 7946 unbound_wq_update_pwq << 7947 if (wq->flags & WQ_UNBOUND) { << 7948 mutex_lock(&wq->mutex << 7949 wq_update_node_max_ac << 7950 mutex_unlock(&wq->mut << 7951 } << 7952 } << 7953 << 7954 mutex_unlock(&wq_pool_mutex); << 7955 } << 7956 << 7957 void __warn_flushing_systemwide_wq(void) << 7958 { << 7959 pr_warn("WARNING: Flushing system-wid << 7960 dump_stack(); << 7961 } << 7962 EXPORT_SYMBOL(__warn_flushing_systemwide_wq); << 7963 << 7964 static int __init workqueue_unbound_cpus_setu << 7965 { << 7966 if (cpulist_parse(str, &wq_cmdline_cp << 7967 cpumask_clear(&wq_cmdline_cpu << 7968 pr_warn("workqueue.unbound_cp << 7969 } << 7970 << 7971 return 1; << 7972 } 5701 } 7973 __setup("workqueue.unbound_cpus=", workqueue_ << 7974 5702
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.