1 // SPDX-License-Identifier: GPL-2.0-only << 2 /* 1 /* 3 * kernel/workqueue.c - generic async executio 2 * kernel/workqueue.c - generic async execution with shared worker pool 4 * 3 * 5 * Copyright (C) 2002 Ingo Molnar 4 * Copyright (C) 2002 Ingo Molnar 6 * 5 * 7 * Derived from the taskqueue/keventd code b 6 * Derived from the taskqueue/keventd code by: 8 * David Woodhouse <dwmw2@infradead.org> 7 * David Woodhouse <dwmw2@infradead.org> 9 * Andrew Morton 8 * Andrew Morton 10 * Kai Petzke <wpp@marie.physik.tu-berlin. 9 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 11 * Theodore Ts'o <tytso@mit.edu> 10 * Theodore Ts'o <tytso@mit.edu> 12 * 11 * 13 * Made to use alloc_percpu by Christoph Lamet 12 * Made to use alloc_percpu by Christoph Lameter. 14 * 13 * 15 * Copyright (C) 2010 SUSE Linux Pro 14 * Copyright (C) 2010 SUSE Linux Products GmbH 16 * Copyright (C) 2010 Tejun Heo <tj@ 15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 17 * 16 * 18 * This is the generic async execution mechani 17 * This is the generic async execution mechanism. Work items as are 19 * executed in process context. The worker po 18 * executed in process context. The worker pool is shared and 20 * automatically managed. There are two worke !! 19 * automatically managed. There is one worker pool for each CPU and 21 * normal work items and the other for high pr !! 20 * one extra for works which are better served by workers which are 22 * pools for workqueues which are not bound to !! 21 * not bound to any specific CPU. 23 * number of these backing pools is dynamic. << 24 * 22 * 25 * Please read Documentation/core-api/workqueu !! 23 * Please read Documentation/workqueue.txt for details. 26 */ 24 */ 27 25 28 #include <linux/export.h> 26 #include <linux/export.h> 29 #include <linux/kernel.h> 27 #include <linux/kernel.h> 30 #include <linux/sched.h> 28 #include <linux/sched.h> 31 #include <linux/init.h> 29 #include <linux/init.h> 32 #include <linux/interrupt.h> << 33 #include <linux/signal.h> 30 #include <linux/signal.h> 34 #include <linux/completion.h> 31 #include <linux/completion.h> 35 #include <linux/workqueue.h> 32 #include <linux/workqueue.h> 36 #include <linux/slab.h> 33 #include <linux/slab.h> 37 #include <linux/cpu.h> 34 #include <linux/cpu.h> 38 #include <linux/notifier.h> 35 #include <linux/notifier.h> 39 #include <linux/kthread.h> 36 #include <linux/kthread.h> 40 #include <linux/hardirq.h> 37 #include <linux/hardirq.h> 41 #include <linux/mempolicy.h> 38 #include <linux/mempolicy.h> 42 #include <linux/freezer.h> 39 #include <linux/freezer.h> >> 40 #include <linux/kallsyms.h> 43 #include <linux/debug_locks.h> 41 #include <linux/debug_locks.h> 44 #include <linux/lockdep.h> 42 #include <linux/lockdep.h> 45 #include <linux/idr.h> 43 #include <linux/idr.h> 46 #include <linux/jhash.h> 44 #include <linux/jhash.h> 47 #include <linux/hashtable.h> 45 #include <linux/hashtable.h> 48 #include <linux/rculist.h> 46 #include <linux/rculist.h> 49 #include <linux/nodemask.h> 47 #include <linux/nodemask.h> 50 #include <linux/moduleparam.h> 48 #include <linux/moduleparam.h> 51 #include <linux/uaccess.h> 49 #include <linux/uaccess.h> 52 #include <linux/sched/isolation.h> << 53 #include <linux/sched/debug.h> << 54 #include <linux/nmi.h> << 55 #include <linux/kvm_para.h> << 56 #include <linux/delay.h> << 57 #include <linux/irq_work.h> << 58 50 59 #include "workqueue_internal.h" 51 #include "workqueue_internal.h" 60 52 61 enum worker_pool_flags { !! 53 enum { 62 /* 54 /* 63 * worker_pool flags 55 * worker_pool flags 64 * 56 * 65 * A bound pool is either associated o 57 * A bound pool is either associated or disassociated with its CPU. 66 * While associated (!DISASSOCIATED), 58 * While associated (!DISASSOCIATED), all workers are bound to the 67 * CPU and none has %WORKER_UNBOUND se 59 * CPU and none has %WORKER_UNBOUND set and concurrency management 68 * is in effect. 60 * is in effect. 69 * 61 * 70 * While DISASSOCIATED, the cpu may be 62 * While DISASSOCIATED, the cpu may be offline and all workers have 71 * %WORKER_UNBOUND set and concurrency 63 * %WORKER_UNBOUND set and concurrency management disabled, and may 72 * be executing on any CPU. The pool 64 * be executing on any CPU. The pool behaves as an unbound one. 73 * 65 * 74 * Note that DISASSOCIATED should be f 66 * Note that DISASSOCIATED should be flipped only while holding 75 * wq_pool_attach_mutex to avoid chang !! 67 * manager_mutex to avoid changing binding state while 76 * worker_attach_to_pool() is in progr !! 68 * create_worker() is in progress. 77 * << 78 * As there can only be one concurrent << 79 * BH pool is per-CPU and always DISAS << 80 */ 69 */ 81 POOL_BH = 1 << 0, !! 70 POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 82 POOL_MANAGER_ACTIVE = 1 << 1, << 83 POOL_DISASSOCIATED = 1 << 2, 71 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 84 POOL_BH_DRAINING = 1 << 3, !! 72 POOL_FREEZING = 1 << 3, /* freeze in progress */ 85 }; << 86 73 87 enum worker_flags { << 88 /* worker flags */ 74 /* worker flags */ >> 75 WORKER_STARTED = 1 << 0, /* started */ 89 WORKER_DIE = 1 << 1, 76 WORKER_DIE = 1 << 1, /* die die die */ 90 WORKER_IDLE = 1 << 2, 77 WORKER_IDLE = 1 << 2, /* is idle */ 91 WORKER_PREP = 1 << 3, 78 WORKER_PREP = 1 << 3, /* preparing to run works */ 92 WORKER_CPU_INTENSIVE = 1 << 6, 79 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 93 WORKER_UNBOUND = 1 << 7, 80 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 94 WORKER_REBOUND = 1 << 8, 81 WORKER_REBOUND = 1 << 8, /* worker was rebound */ 95 82 96 WORKER_NOT_RUNNING = WORKER_PREP 83 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_CPU_INTENSIVE | 97 WORKER_UNBOU 84 WORKER_UNBOUND | WORKER_REBOUND, 98 }; << 99 85 100 enum work_cancel_flags { << 101 WORK_CANCEL_DELAYED = 1 << 0, << 102 WORK_CANCEL_DISABLE = 1 << 1, << 103 }; << 104 << 105 enum wq_internal_consts { << 106 NR_STD_WORKER_POOLS = 2, 86 NR_STD_WORKER_POOLS = 2, /* # standard pools per cpu */ 107 87 108 UNBOUND_POOL_HASH_ORDER = 6, 88 UNBOUND_POOL_HASH_ORDER = 6, /* hashed by pool->attrs */ 109 BUSY_WORKER_HASH_ORDER = 6, 89 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 110 90 111 MAX_IDLE_WORKERS_RATIO = 4, 91 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 112 IDLE_WORKER_TIMEOUT = 300 * HZ, 92 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 113 93 114 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 94 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, 115 95 /* call for help after 10ms 116 96 (min two ticks) */ 117 MAYDAY_INTERVAL = HZ / 10, 97 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 118 CREATE_COOLDOWN = HZ, 98 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 119 99 120 /* 100 /* 121 * Rescue workers are used only on eme 101 * Rescue workers are used only on emergencies and shared by 122 * all cpus. Give MIN_NICE. !! 102 * all cpus. Give -20. 123 */ 103 */ 124 RESCUER_NICE_LEVEL = MIN_NICE, !! 104 RESCUER_NICE_LEVEL = -20, 125 HIGHPRI_NICE_LEVEL = MIN_NICE, !! 105 HIGHPRI_NICE_LEVEL = -20, 126 106 127 WQ_NAME_LEN = 32, !! 107 WQ_NAME_LEN = 24, 128 WORKER_ID_LEN = 10 + WQ_NAME << 129 }; 108 }; 130 109 131 /* 110 /* 132 * We don't want to trap softirq for too long. << 133 * MAX_SOFTIRQ_RESTART in kernel/softirq.c. Th << 134 * msecs_to_jiffies() can't be an initializer. << 135 */ << 136 #define BH_WORKER_JIFFIES msecs_to_jiffi << 137 #define BH_WORKER_RESTARTS 10 << 138 << 139 /* << 140 * Structure fields follow one of the followin 111 * Structure fields follow one of the following exclusion rules. 141 * 112 * 142 * I: Modifiable by initialization/destruction 113 * I: Modifiable by initialization/destruction paths and read-only for 143 * everyone else. 114 * everyone else. 144 * 115 * 145 * P: Preemption protected. Disabling preempt 116 * P: Preemption protected. Disabling preemption is enough and should 146 * only be modified and accessed from the l 117 * only be modified and accessed from the local cpu. 147 * 118 * 148 * L: pool->lock protected. Access with pool- 119 * L: pool->lock protected. Access with pool->lock held. 149 * 120 * 150 * LN: pool->lock and wq_node_nr_active->lock !! 121 * X: During normal operation, modification requires pool->lock and should 151 * reads. !! 122 * be done only from local cpu. Either disabling preemption on local 152 * !! 123 * cpu or grabbing pool->lock is enough for read access. If 153 * K: Only modified by worker while holding po !! 124 * POOL_DISASSOCIATED is set, it's identical to L. 154 * self, while holding pool->lock or from I << 155 * kworker. << 156 * 125 * 157 * S: Only modified by worker self. !! 126 * MG: pool->manager_mutex and pool->lock protected. Writes require both 158 * !! 127 * locks. Reads can happen under either lock. 159 * A: wq_pool_attach_mutex protected. << 160 * 128 * 161 * PL: wq_pool_mutex protected. 129 * PL: wq_pool_mutex protected. 162 * 130 * 163 * PR: wq_pool_mutex protected for writes. RC !! 131 * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. 164 * << 165 * PW: wq_pool_mutex and wq->mutex protected f << 166 * << 167 * PWR: wq_pool_mutex and wq->mutex protected << 168 * RCU for reads. << 169 * 132 * 170 * WQ: wq->mutex protected. 133 * WQ: wq->mutex protected. 171 * 134 * 172 * WR: wq->mutex protected for writes. RCU pr !! 135 * WR: wq->mutex protected for writes. Sched-RCU protected for reads. 173 * << 174 * WO: wq->mutex protected for writes. Updated << 175 * with READ_ONCE() without locking. << 176 * 136 * 177 * MD: wq_mayday_lock protected. 137 * MD: wq_mayday_lock protected. 178 * << 179 * WD: Used internally by the watchdog. << 180 */ 138 */ 181 139 182 /* struct worker is defined in workqueue_inter 140 /* struct worker is defined in workqueue_internal.h */ 183 141 184 struct worker_pool { 142 struct worker_pool { 185 raw_spinlock_t lock; !! 143 spinlock_t lock; /* the pool lock */ 186 int cpu; 144 int cpu; /* I: the associated cpu */ 187 int node; 145 int node; /* I: the associated node ID */ 188 int id; 146 int id; /* I: pool ID */ 189 unsigned int flags; !! 147 unsigned int flags; /* X: flags */ 190 << 191 unsigned long watchdog_ts; << 192 bool cpu_stall; << 193 << 194 /* << 195 * The counter is incremented in a pro << 196 * w/ preemption disabled, and decreme << 197 * but w/ pool->lock held. The readers << 198 * guaranteed to see if the counter re << 199 */ << 200 int nr_running; << 201 148 202 struct list_head worklist; 149 struct list_head worklist; /* L: list of pending works */ 203 << 204 int nr_workers; 150 int nr_workers; /* L: total number of workers */ 205 int nr_idle; << 206 151 207 struct list_head idle_list; !! 152 /* nr_idle includes the ones off idle_list for rebinding */ 208 struct timer_list idle_timer; !! 153 int nr_idle; /* L: currently idle ones */ 209 struct work_struct idle_cull_work << 210 154 211 struct timer_list mayday_timer; !! 155 struct list_head idle_list; /* X: list of idle workers */ >> 156 struct timer_list idle_timer; /* L: worker idle timeout */ >> 157 struct timer_list mayday_timer; /* L: SOS timer for workers */ 212 158 213 /* a workers is either on busy_hash or 159 /* a workers is either on busy_hash or idle_list, or the manager */ 214 DECLARE_HASHTABLE(busy_hash, BUSY_WORK 160 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); 215 161 /* L: hash of busy workers */ 216 162 217 struct worker *manager; !! 163 /* see manage_workers() for details on the two manager mutexes */ 218 struct list_head workers; !! 164 struct mutex manager_arb; /* manager arbitration */ 219 !! 165 struct mutex manager_mutex; /* manager exclusion */ 220 struct ida worker_ida; !! 166 struct idr worker_idr; /* MG: worker IDs and iteration */ 221 167 222 struct workqueue_attrs *attrs; 168 struct workqueue_attrs *attrs; /* I: worker attributes */ 223 struct hlist_node hash_node; 169 struct hlist_node hash_node; /* PL: unbound_pool_hash node */ 224 int refcnt; 170 int refcnt; /* PL: refcnt for unbound pools */ 225 171 226 /* 172 /* 227 * Destruction of pool is RCU protecte !! 173 * The current concurrency level. As it's likely to be accessed >> 174 * from other CPUs during try_to_wake_up(), put it in a separate >> 175 * cacheline. >> 176 */ >> 177 atomic_t nr_running ____cacheline_aligned_in_smp; >> 178 >> 179 /* >> 180 * Destruction of pool is sched-RCU protected to allow dereferences 228 * from get_work_pool(). 181 * from get_work_pool(). 229 */ 182 */ 230 struct rcu_head rcu; 183 struct rcu_head rcu; 231 }; !! 184 } ____cacheline_aligned_in_smp; 232 185 233 /* 186 /* 234 * Per-pool_workqueue statistics. These can be !! 187 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS 235 * tools/workqueue/wq_monitor.py. << 236 */ << 237 enum pool_workqueue_stats { << 238 PWQ_STAT_STARTED, /* work items << 239 PWQ_STAT_COMPLETED, /* work items << 240 PWQ_STAT_CPU_TIME, /* total CPU t << 241 PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_inte << 242 PWQ_STAT_CM_WAKEUP, /* concurrency << 243 PWQ_STAT_REPATRIATED, /* unbound wor << 244 PWQ_STAT_MAYDAY, /* maydays to << 245 PWQ_STAT_RESCUED, /* linked work << 246 << 247 PWQ_NR_STATS, << 248 }; << 249 << 250 /* << 251 * The per-pool workqueue. While queued, bits << 252 * of work_struct->data are used for flags and 188 * of work_struct->data are used for flags and the remaining high bits 253 * point to the pwq; thus, pwqs need to be ali 189 * point to the pwq; thus, pwqs need to be aligned at two's power of the 254 * number of flag bits. 190 * number of flag bits. 255 */ 191 */ 256 struct pool_workqueue { 192 struct pool_workqueue { 257 struct worker_pool *pool; 193 struct worker_pool *pool; /* I: the associated pool */ 258 struct workqueue_struct *wq; 194 struct workqueue_struct *wq; /* I: the owning workqueue */ 259 int work_color; 195 int work_color; /* L: current color */ 260 int flush_color; 196 int flush_color; /* L: flushing color */ 261 int refcnt; 197 int refcnt; /* L: reference count */ 262 int nr_in_flight[W 198 int nr_in_flight[WORK_NR_COLORS]; 263 199 /* L: nr of in_flight works */ 264 bool plugged; << 265 << 266 /* << 267 * nr_active management and WORK_STRUC << 268 * << 269 * When pwq->nr_active >= max_active, << 270 * pwq->inactive_works instead of pool << 271 * WORK_STRUCT_INACTIVE. << 272 * << 273 * All work items marked with WORK_STR << 274 * nr_active and all work items in pwq << 275 * WORK_STRUCT_INACTIVE. But not all W << 276 * in pwq->inactive_works. Some of the << 277 * pool->worklist or worker->scheduled << 278 * wq_barrier which is used for flush_ << 279 * in nr_active. For non-barrier work << 280 * WORK_STRUCT_INACTIVE iff it is in p << 281 */ << 282 int nr_active; 200 int nr_active; /* L: nr of active works */ 283 struct list_head inactive_works !! 201 int max_active; /* L: max active works */ 284 struct list_head pending_node; !! 202 struct list_head delayed_works; /* L: delayed works */ 285 struct list_head pwqs_node; 203 struct list_head pwqs_node; /* WR: node on wq->pwqs */ 286 struct list_head mayday_node; 204 struct list_head mayday_node; /* MD: node on wq->maydays */ 287 205 288 u64 stats[PWQ_NR_S << 289 << 290 /* 206 /* 291 * Release of unbound pwq is punted to !! 207 * Release of unbound pwq is punted to system_wq. See put_pwq() 292 * and pwq_release_workfn() for detail !! 208 * and pwq_unbound_release_workfn() for details. pool_workqueue 293 * RCU protected so that the first pwq !! 209 * itself is also sched-RCU protected so that the first pwq can be 294 * grabbing wq->mutex. !! 210 * determined without grabbing wq->mutex. 295 */ 211 */ 296 struct kthread_work release_work; !! 212 struct work_struct unbound_release_work; 297 struct rcu_head rcu; 213 struct rcu_head rcu; 298 } __aligned(1 << WORK_STRUCT_PWQ_SHIFT); !! 214 } __aligned(1 << WORK_STRUCT_FLAG_BITS); 299 215 300 /* 216 /* 301 * Structure used to wait for workqueue flush. 217 * Structure used to wait for workqueue flush. 302 */ 218 */ 303 struct wq_flusher { 219 struct wq_flusher { 304 struct list_head list; 220 struct list_head list; /* WQ: list of flushers */ 305 int flush_color; 221 int flush_color; /* WQ: flush color waiting for */ 306 struct completion done; 222 struct completion done; /* flush completion */ 307 }; 223 }; 308 224 309 struct wq_device; 225 struct wq_device; 310 226 311 /* 227 /* 312 * Unlike in a per-cpu workqueue where max_act << 313 * on each CPU, in an unbound workqueue, max_a << 314 * As sharing a single nr_active across multip << 315 * the counting and enforcement is per NUMA no << 316 * << 317 * The following struct is used to enforce per << 318 * to start executing a work item, it should i << 319 * tryinc_node_nr_active(). If acquisition fai << 320 * ->max, the pwq is queued on ->pending_pwqs. << 321 * and decrement ->nr, node_activate_pending_p << 322 * round-robin order. << 323 */ << 324 struct wq_node_nr_active { << 325 int max; << 326 atomic_t nr; << 327 raw_spinlock_t lock; << 328 struct list_head pending_pwqs; << 329 }; << 330 << 331 /* << 332 * The externally visible workqueue. It relay 228 * The externally visible workqueue. It relays the issued work items to 333 * the appropriate worker_pool through its poo 229 * the appropriate worker_pool through its pool_workqueues. 334 */ 230 */ 335 struct workqueue_struct { 231 struct workqueue_struct { 336 struct list_head pwqs; 232 struct list_head pwqs; /* WR: all pwqs of this wq */ 337 struct list_head list; !! 233 struct list_head list; /* PL: list of all workqueues */ 338 234 339 struct mutex mutex; 235 struct mutex mutex; /* protects this wq */ 340 int work_color; 236 int work_color; /* WQ: current work color */ 341 int flush_color; 237 int flush_color; /* WQ: current flush color */ 342 atomic_t nr_pwqs_to_flu 238 atomic_t nr_pwqs_to_flush; /* flush in progress */ 343 struct wq_flusher *first_flusher 239 struct wq_flusher *first_flusher; /* WQ: first flusher */ 344 struct list_head flusher_queue; 240 struct list_head flusher_queue; /* WQ: flush waiters */ 345 struct list_head flusher_overfl 241 struct list_head flusher_overflow; /* WQ: flush overflow list */ 346 242 347 struct list_head maydays; 243 struct list_head maydays; /* MD: pwqs requesting rescue */ 348 struct worker *rescuer; !! 244 struct worker *rescuer; /* I: rescue worker */ 349 245 350 int nr_drainers; 246 int nr_drainers; /* WQ: drain in progress */ >> 247 int saved_max_active; /* WQ: saved pwq max_active */ 351 248 352 /* See alloc_workqueue() function comm !! 249 struct workqueue_attrs *unbound_attrs; /* WQ: only for unbound wqs */ 353 int max_active; !! 250 struct pool_workqueue *dfl_pwq; /* WQ: only for unbound wqs */ 354 int min_active; << 355 int saved_max_acti << 356 int saved_min_acti << 357 << 358 struct workqueue_attrs *unbound_attrs << 359 struct pool_workqueue __rcu *dfl_pwq; << 360 251 361 #ifdef CONFIG_SYSFS 252 #ifdef CONFIG_SYSFS 362 struct wq_device *wq_dev; 253 struct wq_device *wq_dev; /* I: for sysfs interface */ 363 #endif 254 #endif 364 #ifdef CONFIG_LOCKDEP 255 #ifdef CONFIG_LOCKDEP 365 char *lock_name; << 366 struct lock_class_key key; << 367 struct lockdep_map lockdep_map; 256 struct lockdep_map lockdep_map; 368 #endif 257 #endif 369 char name[WQ_NAME_L 258 char name[WQ_NAME_LEN]; /* I: workqueue name */ 370 259 371 /* << 372 * Destruction of workqueue_struct is << 373 * the workqueues list without grabbin << 374 * This is used to dump all workqueues << 375 */ << 376 struct rcu_head rcu; << 377 << 378 /* hot fields used during command issu 260 /* hot fields used during command issue, aligned to cacheline */ 379 unsigned int flags ____cach 261 unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ 380 struct pool_workqueue __rcu * __percpu !! 262 struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */ 381 struct wq_node_nr_active *node_nr_acti !! 263 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */ 382 }; 264 }; 383 265 384 /* !! 266 static struct kmem_cache *pwq_cache; 385 * Each pod type describes how CPUs should be << 386 * See the comment above workqueue_attrs->affn << 387 */ << 388 struct wq_pod_type { << 389 int nr_pods; << 390 cpumask_var_t *pod_cpus; << 391 int *pod_node; << 392 int *cpu_pod; << 393 }; << 394 << 395 struct work_offq_data { << 396 u32 pool_id; << 397 u32 disable; << 398 u32 flags; << 399 }; << 400 << 401 static const char *wq_affn_names[WQ_AFFN_NR_TY << 402 [WQ_AFFN_DFL] = "default", << 403 [WQ_AFFN_CPU] = "cpu", << 404 [WQ_AFFN_SMT] = "smt", << 405 [WQ_AFFN_CACHE] = "cache", << 406 [WQ_AFFN_NUMA] = "numa", << 407 [WQ_AFFN_SYSTEM] = "system", << 408 }; << 409 << 410 /* << 411 * Per-cpu work items which run for longer tha << 412 * automatically considered CPU intensive and << 413 * management to prevent them from noticeably << 414 * ULONG_MAX indicates that the user hasn't ov << 415 * The actual value is initialized in wq_cpu_i << 416 */ << 417 static unsigned long wq_cpu_intensive_thresh_u << 418 module_param_named(cpu_intensive_thresh_us, wq << 419 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT << 420 static unsigned int wq_cpu_intensive_warning_t << 421 module_param_named(cpu_intensive_warning_thres << 422 #endif << 423 << 424 /* see the comment above the definition of WQ_ << 425 static bool wq_power_efficient = IS_ENABLED(CO << 426 module_param_named(power_efficient, wq_power_e << 427 267 428 static bool wq_online; /* can !! 268 static int wq_numa_tbl_len; /* highest possible NUMA node id + 1 */ 429 static bool wq_topo_initialized __read_mostly !! 269 static cpumask_var_t *wq_numa_possible_cpumask; >> 270 /* possible CPUs of each node */ 430 271 431 static struct kmem_cache *pwq_cache; !! 272 static bool wq_disable_numa; >> 273 module_param_named(disable_numa, wq_disable_numa, bool, 0444); 432 274 433 static struct wq_pod_type wq_pod_types[WQ_AFFN !! 275 static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ 434 static enum wq_affn_scope wq_affn_dfl = WQ_AFF << 435 276 436 /* buf for wq_update_unbound_pod_attrs(), prot !! 277 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */ 437 static struct workqueue_attrs *unbound_wq_upda !! 278 static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; 438 279 439 static DEFINE_MUTEX(wq_pool_mutex); /* pro 280 static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 440 static DEFINE_MUTEX(wq_pool_attach_mutex); /* !! 281 static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 441 static DEFINE_RAW_SPINLOCK(wq_mayday_lock); << 442 /* wait for manager to go away */ << 443 static struct rcuwait manager_wait = __RCUWAIT << 444 282 445 static LIST_HEAD(workqueues); /* PR: !! 283 static LIST_HEAD(workqueues); /* PL: list of all workqueues */ 446 static bool workqueue_freezing; /* PL: 284 static bool workqueue_freezing; /* PL: have wqs started freezing? */ 447 285 448 /* PL: mirror the cpu_online_mask excluding th << 449 static cpumask_var_t wq_online_cpumask; << 450 << 451 /* PL&A: allowable cpus for unbound wqs and wo << 452 static cpumask_var_t wq_unbound_cpumask; << 453 << 454 /* PL: user requested unbound cpumask via sysf << 455 static cpumask_var_t wq_requested_unbound_cpum << 456 << 457 /* PL: isolated cpumask to be excluded from un << 458 static cpumask_var_t wq_isolated_cpumask; << 459 << 460 /* for further constrain wq_unbound_cpumask by << 461 static struct cpumask wq_cmdline_cpumask __ini << 462 << 463 /* CPU where unbound work was last round robin << 464 static DEFINE_PER_CPU(int, wq_rr_cpu_last); << 465 << 466 /* << 467 * Local execution of unbound work items is no << 468 * following always forces round-robin CPU sel << 469 * to uncover usages which depend on it. << 470 */ << 471 #ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU << 472 static bool wq_debug_force_rr_cpu = true; << 473 #else << 474 static bool wq_debug_force_rr_cpu = false; << 475 #endif << 476 module_param_named(debug_force_rr_cpu, wq_debu << 477 << 478 /* to raise softirq for the BH worker pools on << 479 static DEFINE_PER_CPU_SHARED_ALIGNED(struct ir << 480 bh_pool_i << 481 << 482 /* the BH worker pools */ << 483 static DEFINE_PER_CPU_SHARED_ALIGNED(struct wo << 484 bh_worker << 485 << 486 /* the per-cpu worker pools */ 286 /* the per-cpu worker pools */ 487 static DEFINE_PER_CPU_SHARED_ALIGNED(struct wo 287 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], 488 cpu_worke 288 cpu_worker_pools); 489 289 490 static DEFINE_IDR(worker_pool_idr); /* PR: 290 static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */ 491 291 492 /* PL: hash of all unbound pools keyed by pool 292 /* PL: hash of all unbound pools keyed by pool->attrs */ 493 static DEFINE_HASHTABLE(unbound_pool_hash, UNB 293 static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER); 494 294 495 /* I: attributes used when instantiating stand 295 /* I: attributes used when instantiating standard unbound pools on demand */ 496 static struct workqueue_attrs *unbound_std_wq_ 296 static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 497 297 498 /* I: attributes used when instantiating order 298 /* I: attributes used when instantiating ordered pools on demand */ 499 static struct workqueue_attrs *ordered_wq_attr 299 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; 500 300 501 /* !! 301 struct workqueue_struct *system_wq __read_mostly; 502 * I: kthread_worker to release pwq's. pwq rel << 503 * process context while holding a pool lock. << 504 * worker to avoid A-A deadlocks. << 505 */ << 506 static struct kthread_worker *pwq_release_work << 507 << 508 struct workqueue_struct *system_wq __ro_after_ << 509 EXPORT_SYMBOL(system_wq); 302 EXPORT_SYMBOL(system_wq); 510 struct workqueue_struct *system_highpri_wq __r !! 303 struct workqueue_struct *system_highpri_wq __read_mostly; 511 EXPORT_SYMBOL_GPL(system_highpri_wq); 304 EXPORT_SYMBOL_GPL(system_highpri_wq); 512 struct workqueue_struct *system_long_wq __ro_a !! 305 struct workqueue_struct *system_long_wq __read_mostly; 513 EXPORT_SYMBOL_GPL(system_long_wq); 306 EXPORT_SYMBOL_GPL(system_long_wq); 514 struct workqueue_struct *system_unbound_wq __r !! 307 struct workqueue_struct *system_unbound_wq __read_mostly; 515 EXPORT_SYMBOL_GPL(system_unbound_wq); 308 EXPORT_SYMBOL_GPL(system_unbound_wq); 516 struct workqueue_struct *system_freezable_wq _ !! 309 struct workqueue_struct *system_freezable_wq __read_mostly; 517 EXPORT_SYMBOL_GPL(system_freezable_wq); 310 EXPORT_SYMBOL_GPL(system_freezable_wq); 518 struct workqueue_struct *system_power_efficien << 519 EXPORT_SYMBOL_GPL(system_power_efficient_wq); << 520 struct workqueue_struct *system_freezable_powe << 521 EXPORT_SYMBOL_GPL(system_freezable_power_effic << 522 struct workqueue_struct *system_bh_wq; << 523 EXPORT_SYMBOL_GPL(system_bh_wq); << 524 struct workqueue_struct *system_bh_highpri_wq; << 525 EXPORT_SYMBOL_GPL(system_bh_highpri_wq); << 526 311 527 static int worker_thread(void *__worker); 312 static int worker_thread(void *__worker); 528 static void workqueue_sysfs_unregister(struct !! 313 static void copy_workqueue_attrs(struct workqueue_attrs *to, 529 static void show_pwq(struct pool_workqueue *pw !! 314 const struct workqueue_attrs *from); 530 static void show_one_worker_pool(struct worker << 531 315 532 #define CREATE_TRACE_POINTS 316 #define CREATE_TRACE_POINTS 533 #include <trace/events/workqueue.h> 317 #include <trace/events/workqueue.h> 534 318 535 #define assert_rcu_or_pool_mutex() 319 #define assert_rcu_or_pool_mutex() \ 536 RCU_LOCKDEP_WARN(!rcu_read_lock_any_he !! 320 rcu_lockdep_assert(rcu_read_lock_sched_held() || \ 537 !lockdep_is_held(&wq_ !! 321 lockdep_is_held(&wq_pool_mutex), \ 538 "RCU or wq_pool_mutex !! 322 "sched RCU or wq_pool_mutex should be held") 539 !! 323 540 #define assert_rcu_or_wq_mutex_or_pool_mutex(w !! 324 #define assert_rcu_or_wq_mutex(wq) \ 541 RCU_LOCKDEP_WARN(!rcu_read_lock_any_he !! 325 rcu_lockdep_assert(rcu_read_lock_sched_held() || \ 542 !lockdep_is_held(&wq- !! 326 lockdep_is_held(&wq->mutex), \ 543 !lockdep_is_held(&wq_ !! 327 "sched RCU or wq->mutex should be held") 544 "RCU, wq->mutex or wq !! 328 545 !! 329 #ifdef CONFIG_LOCKDEP 546 #define for_each_bh_worker_pool(pool, cpu) !! 330 #define assert_manager_or_pool_lock(pool) \ 547 for ((pool) = &per_cpu(bh_worker_pools !! 331 WARN_ONCE(debug_locks && \ 548 (pool) < &per_cpu(bh_worker_pools !! 332 !lockdep_is_held(&(pool)->manager_mutex) && \ 549 (pool)++) !! 333 !lockdep_is_held(&(pool)->lock), \ >> 334 "pool->manager_mutex or ->lock should be held") >> 335 #else >> 336 #define assert_manager_or_pool_lock(pool) do { } while (0) >> 337 #endif 550 338 551 #define for_each_cpu_worker_pool(pool, cpu) 339 #define for_each_cpu_worker_pool(pool, cpu) \ 552 for ((pool) = &per_cpu(cpu_worker_pool 340 for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ 553 (pool) < &per_cpu(cpu_worker_pool 341 (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ 554 (pool)++) 342 (pool)++) 555 343 556 /** 344 /** 557 * for_each_pool - iterate through all worker_ 345 * for_each_pool - iterate through all worker_pools in the system 558 * @pool: iteration cursor 346 * @pool: iteration cursor 559 * @pi: integer used for iteration 347 * @pi: integer used for iteration 560 * 348 * 561 * This must be called either with wq_pool_mut !! 349 * This must be called either with wq_pool_mutex held or sched RCU read 562 * locked. If the pool needs to be used beyon 350 * locked. If the pool needs to be used beyond the locking in effect, the 563 * caller is responsible for guaranteeing that 351 * caller is responsible for guaranteeing that the pool stays online. 564 * 352 * 565 * The if/else clause exists only for the lock 353 * The if/else clause exists only for the lockdep assertion and can be 566 * ignored. 354 * ignored. 567 */ 355 */ 568 #define for_each_pool(pool, pi) 356 #define for_each_pool(pool, pi) \ 569 idr_for_each_entry(&worker_pool_idr, p 357 idr_for_each_entry(&worker_pool_idr, pool, pi) \ 570 if (({ assert_rcu_or_pool_mute 358 if (({ assert_rcu_or_pool_mutex(); false; })) { } \ 571 else 359 else 572 360 573 /** 361 /** 574 * for_each_pool_worker - iterate through all 362 * for_each_pool_worker - iterate through all workers of a worker_pool 575 * @worker: iteration cursor 363 * @worker: iteration cursor >> 364 * @wi: integer used for iteration 576 * @pool: worker_pool to iterate workers of 365 * @pool: worker_pool to iterate workers of 577 * 366 * 578 * This must be called with wq_pool_attach_mut !! 367 * This must be called with either @pool->manager_mutex or ->lock held. 579 * 368 * 580 * The if/else clause exists only for the lock 369 * The if/else clause exists only for the lockdep assertion and can be 581 * ignored. 370 * ignored. 582 */ 371 */ 583 #define for_each_pool_worker(worker, pool) !! 372 #define for_each_pool_worker(worker, wi, pool) \ 584 list_for_each_entry((worker), &(pool)- !! 373 idr_for_each_entry(&(pool)->worker_idr, (worker), (wi)) \ 585 if (({ lockdep_assert_held(&wq !! 374 if (({ assert_manager_or_pool_lock((pool)); false; })) { } \ 586 else 375 else 587 376 588 /** 377 /** 589 * for_each_pwq - iterate through all pool_wor 378 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 590 * @pwq: iteration cursor 379 * @pwq: iteration cursor 591 * @wq: the target workqueue 380 * @wq: the target workqueue 592 * 381 * 593 * This must be called either with wq->mutex h !! 382 * This must be called either with wq->mutex held or sched RCU read locked. 594 * If the pwq needs to be used beyond the lock 383 * If the pwq needs to be used beyond the locking in effect, the caller is 595 * responsible for guaranteeing that the pwq s 384 * responsible for guaranteeing that the pwq stays online. 596 * 385 * 597 * The if/else clause exists only for the lock 386 * The if/else clause exists only for the lockdep assertion and can be 598 * ignored. 387 * ignored. 599 */ 388 */ 600 #define for_each_pwq(pwq, wq) 389 #define for_each_pwq(pwq, wq) \ 601 list_for_each_entry_rcu((pwq), &(wq)-> !! 390 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ 602 lockdep_is_he !! 391 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ >> 392 else 603 393 604 #ifdef CONFIG_DEBUG_OBJECTS_WORK 394 #ifdef CONFIG_DEBUG_OBJECTS_WORK 605 395 606 static const struct debug_obj_descr work_debug !! 396 static struct debug_obj_descr work_debug_descr; 607 397 608 static void *work_debug_hint(void *addr) 398 static void *work_debug_hint(void *addr) 609 { 399 { 610 return ((struct work_struct *) addr)-> 400 return ((struct work_struct *) addr)->func; 611 } 401 } 612 402 613 static bool work_is_static_object(void *addr) !! 403 /* >> 404 * fixup_init is called when: >> 405 * - an active object is initialized >> 406 */ >> 407 static int work_fixup_init(void *addr, enum debug_obj_state state) 614 { 408 { 615 struct work_struct *work = addr; 409 struct work_struct *work = addr; 616 410 617 return test_bit(WORK_STRUCT_STATIC_BIT !! 411 switch (state) { >> 412 case ODEBUG_STATE_ACTIVE: >> 413 cancel_work_sync(work); >> 414 debug_object_init(work, &work_debug_descr); >> 415 return 1; >> 416 default: >> 417 return 0; >> 418 } 618 } 419 } 619 420 620 /* 421 /* 621 * fixup_init is called when: !! 422 * fixup_activate is called when: 622 * - an active object is initialized !! 423 * - an active object is activated >> 424 * - an unknown object is activated (might be a statically initialized object) 623 */ 425 */ 624 static bool work_fixup_init(void *addr, enum d !! 426 static int work_fixup_activate(void *addr, enum debug_obj_state state) 625 { 427 { 626 struct work_struct *work = addr; 428 struct work_struct *work = addr; 627 429 628 switch (state) { 430 switch (state) { >> 431 >> 432 case ODEBUG_STATE_NOTAVAILABLE: >> 433 /* >> 434 * This is not really a fixup. The work struct was >> 435 * statically initialized. We just make sure that it >> 436 * is tracked in the object tracker. >> 437 */ >> 438 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { >> 439 debug_object_init(work, &work_debug_descr); >> 440 debug_object_activate(work, &work_debug_descr); >> 441 return 0; >> 442 } >> 443 WARN_ON_ONCE(1); >> 444 return 0; >> 445 629 case ODEBUG_STATE_ACTIVE: 446 case ODEBUG_STATE_ACTIVE: 630 cancel_work_sync(work); !! 447 WARN_ON(1); 631 debug_object_init(work, &work_ !! 448 632 return true; << 633 default: 449 default: 634 return false; !! 450 return 0; 635 } 451 } 636 } 452 } 637 453 638 /* 454 /* 639 * fixup_free is called when: 455 * fixup_free is called when: 640 * - an active object is freed 456 * - an active object is freed 641 */ 457 */ 642 static bool work_fixup_free(void *addr, enum d !! 458 static int work_fixup_free(void *addr, enum debug_obj_state state) 643 { 459 { 644 struct work_struct *work = addr; 460 struct work_struct *work = addr; 645 461 646 switch (state) { 462 switch (state) { 647 case ODEBUG_STATE_ACTIVE: 463 case ODEBUG_STATE_ACTIVE: 648 cancel_work_sync(work); 464 cancel_work_sync(work); 649 debug_object_free(work, &work_ 465 debug_object_free(work, &work_debug_descr); 650 return true; !! 466 return 1; 651 default: 467 default: 652 return false; !! 468 return 0; 653 } 469 } 654 } 470 } 655 471 656 static const struct debug_obj_descr work_debug !! 472 static struct debug_obj_descr work_debug_descr = { 657 .name = "work_struct", 473 .name = "work_struct", 658 .debug_hint = work_debug_hint, 474 .debug_hint = work_debug_hint, 659 .is_static_object = work_is_static_obj << 660 .fixup_init = work_fixup_init, 475 .fixup_init = work_fixup_init, >> 476 .fixup_activate = work_fixup_activate, 661 .fixup_free = work_fixup_free, 477 .fixup_free = work_fixup_free, 662 }; 478 }; 663 479 664 static inline void debug_work_activate(struct 480 static inline void debug_work_activate(struct work_struct *work) 665 { 481 { 666 debug_object_activate(work, &work_debu 482 debug_object_activate(work, &work_debug_descr); 667 } 483 } 668 484 669 static inline void debug_work_deactivate(struc 485 static inline void debug_work_deactivate(struct work_struct *work) 670 { 486 { 671 debug_object_deactivate(work, &work_de 487 debug_object_deactivate(work, &work_debug_descr); 672 } 488 } 673 489 674 void __init_work(struct work_struct *work, int 490 void __init_work(struct work_struct *work, int onstack) 675 { 491 { 676 if (onstack) 492 if (onstack) 677 debug_object_init_on_stack(wor 493 debug_object_init_on_stack(work, &work_debug_descr); 678 else 494 else 679 debug_object_init(work, &work_ 495 debug_object_init(work, &work_debug_descr); 680 } 496 } 681 EXPORT_SYMBOL_GPL(__init_work); 497 EXPORT_SYMBOL_GPL(__init_work); 682 498 683 void destroy_work_on_stack(struct work_struct 499 void destroy_work_on_stack(struct work_struct *work) 684 { 500 { 685 debug_object_free(work, &work_debug_de 501 debug_object_free(work, &work_debug_descr); 686 } 502 } 687 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 503 EXPORT_SYMBOL_GPL(destroy_work_on_stack); 688 504 689 void destroy_delayed_work_on_stack(struct dela << 690 { << 691 destroy_timer_on_stack(&work->timer); << 692 debug_object_free(&work->work, &work_d << 693 } << 694 EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stac << 695 << 696 #else 505 #else 697 static inline void debug_work_activate(struct 506 static inline void debug_work_activate(struct work_struct *work) { } 698 static inline void debug_work_deactivate(struc 507 static inline void debug_work_deactivate(struct work_struct *work) { } 699 #endif 508 #endif 700 509 701 /** !! 510 /* allocate ID and assign it to @pool */ 702 * worker_pool_assign_id - allocate ID and ass << 703 * @pool: the pool pointer of interest << 704 * << 705 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) << 706 * successfully, -errno on failure. << 707 */ << 708 static int worker_pool_assign_id(struct worker 511 static int worker_pool_assign_id(struct worker_pool *pool) 709 { 512 { 710 int ret; 513 int ret; 711 514 712 lockdep_assert_held(&wq_pool_mutex); 515 lockdep_assert_held(&wq_pool_mutex); 713 516 714 ret = idr_alloc(&worker_pool_idr, pool !! 517 ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); 715 GFP_KERNEL); << 716 if (ret >= 0) { 518 if (ret >= 0) { 717 pool->id = ret; 519 pool->id = ret; 718 return 0; 520 return 0; 719 } 521 } 720 return ret; 522 return ret; 721 } 523 } 722 524 723 static struct pool_workqueue __rcu ** << 724 unbound_pwq_slot(struct workqueue_struct *wq, << 725 { << 726 if (cpu >= 0) << 727 return per_cpu_ptr(wq->cpu_pwq, << 728 else << 729 return &wq->dfl_pwq; << 730 } << 731 << 732 /* @cpu < 0 for dfl_pwq */ << 733 static struct pool_workqueue *unbound_pwq(stru << 734 { << 735 return rcu_dereference_check(*unbound_ << 736 lockdep_i << 737 lockdep_i << 738 } << 739 << 740 /** 525 /** 741 * unbound_effective_cpumask - effective cpuma !! 526 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node 742 * @wq: workqueue of interest !! 527 * @wq: the target workqueue >> 528 * @node: the node ID 743 * 529 * 744 * @wq->unbound_attrs->cpumask contains the cp !! 530 * This must be called either with pwq_lock held or sched RCU read locked. 745 * is masked with wq_unbound_cpumask to determ !! 531 * If the pwq needs to be used beyond the locking in effect, the caller is 746 * default pwq is always mapped to the pool wi !! 532 * responsible for guaranteeing that the pwq stays online. 747 */ 533 */ 748 static struct cpumask *unbound_effective_cpuma !! 534 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, >> 535 int node) 749 { 536 { 750 return unbound_pwq(wq, -1)->pool->attr !! 537 assert_rcu_or_wq_mutex(wq); >> 538 return rcu_dereference_raw(wq->numa_pwq_tbl[node]); 751 } 539 } 752 540 753 static unsigned int work_color_to_flags(int co 541 static unsigned int work_color_to_flags(int color) 754 { 542 { 755 return color << WORK_STRUCT_COLOR_SHIF 543 return color << WORK_STRUCT_COLOR_SHIFT; 756 } 544 } 757 545 758 static int get_work_color(unsigned long work_d !! 546 static int get_work_color(struct work_struct *work) 759 { 547 { 760 return (work_data >> WORK_STRUCT_COLOR !! 548 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 761 ((1 << WORK_STRUCT_COLOR_BITS) 549 ((1 << WORK_STRUCT_COLOR_BITS) - 1); 762 } 550 } 763 551 764 static int work_next_color(int color) 552 static int work_next_color(int color) 765 { 553 { 766 return (color + 1) % WORK_NR_COLORS; 554 return (color + 1) % WORK_NR_COLORS; 767 } 555 } 768 556 769 static unsigned long pool_offq_flags(struct wo << 770 { << 771 return (pool->flags & POOL_BH) ? WORK_ << 772 } << 773 << 774 /* 557 /* 775 * While queued, %WORK_STRUCT_PWQ is set and n 558 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 776 * contain the pointer to the queued pwq. Onc 559 * contain the pointer to the queued pwq. Once execution starts, the flag 777 * is cleared and the high bits contain OFFQ f 560 * is cleared and the high bits contain OFFQ flags and pool ID. 778 * 561 * 779 * set_work_pwq(), set_work_pool_and_clear_pen !! 562 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling() 780 * can be used to set the pwq, pool or clear w !! 563 * and clear_work_data() can be used to set the pwq, pool or clear 781 * only be called while the work is owned - ie !! 564 * work->data. These functions should only be called while the work is >> 565 * owned - ie. while the PENDING bit is set. 782 * 566 * 783 * get_work_pool() and get_work_pwq() can be u 567 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq 784 * corresponding to a work. Pool is available 568 * corresponding to a work. Pool is available once the work has been 785 * queued anywhere after initialization until 569 * queued anywhere after initialization until it is sync canceled. pwq is 786 * available only while the work item is queue 570 * available only while the work item is queued. >> 571 * >> 572 * %WORK_OFFQ_CANCELING is used to mark a work item which is being >> 573 * canceled. While being canceled, a work item may have its PENDING set >> 574 * but stay off timer and worklist for arbitrarily long and nobody should >> 575 * try to steal the PENDING bit. 787 */ 576 */ 788 static inline void set_work_data(struct work_s !! 577 static inline void set_work_data(struct work_struct *work, unsigned long data, >> 578 unsigned long flags) 789 { 579 { 790 WARN_ON_ONCE(!work_pending(work)); 580 WARN_ON_ONCE(!work_pending(work)); 791 atomic_long_set(&work->data, data | wo !! 581 atomic_long_set(&work->data, data | flags | work_static(work)); 792 } 582 } 793 583 794 static void set_work_pwq(struct work_struct *w 584 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, 795 unsigned long flags) !! 585 unsigned long extra_flags) 796 { 586 { 797 set_work_data(work, (unsigned long)pwq !! 587 set_work_data(work, (unsigned long)pwq, 798 WORK_STRUCT_PWQ | flags) !! 588 WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags); 799 } 589 } 800 590 801 static void set_work_pool_and_keep_pending(str 591 static void set_work_pool_and_keep_pending(struct work_struct *work, 802 int !! 592 int pool_id) 803 { 593 { 804 set_work_data(work, ((unsigned long)po !! 594 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 805 WORK_STRUCT_PENDING | fl !! 595 WORK_STRUCT_PENDING); 806 } 596 } 807 597 808 static void set_work_pool_and_clear_pending(st 598 static void set_work_pool_and_clear_pending(struct work_struct *work, 809 in !! 599 int pool_id) 810 { 600 { 811 /* 601 /* 812 * The following wmb is paired with th 602 * The following wmb is paired with the implied mb in 813 * test_and_set_bit(PENDING) and ensur 603 * test_and_set_bit(PENDING) and ensures all updates to @work made 814 * here are visible to and precede any 604 * here are visible to and precede any updates by the next PENDING 815 * owner. 605 * owner. 816 */ 606 */ 817 smp_wmb(); 607 smp_wmb(); 818 set_work_data(work, ((unsigned long)po !! 608 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 819 flags); << 820 /* 609 /* 821 * The following mb guarantees that pr 610 * The following mb guarantees that previous clear of a PENDING bit 822 * will not be reordered with any spec 611 * will not be reordered with any speculative LOADS or STORES from 823 * work->current_func, which is execut 612 * work->current_func, which is executed afterwards. This possible 824 * reordering can lead to a missed exe !! 613 * reordering can lead to a missed execution on attempt to qeueue 825 * the same @work. E.g. consider this 614 * the same @work. E.g. consider this case: 826 * 615 * 827 * CPU#0 CPU 616 * CPU#0 CPU#1 828 * ---------------------------- --- 617 * ---------------------------- -------------------------------- 829 * 618 * 830 * 1 STORE event_indicated 619 * 1 STORE event_indicated 831 * 2 queue_work_on() { 620 * 2 queue_work_on() { 832 * 3 test_and_set_bit(PENDING) 621 * 3 test_and_set_bit(PENDING) 833 * 4 } set 622 * 4 } set_..._and_clear_pending() { 834 * 5 s 623 * 5 set_work_data() # clear bit 835 * 6 s 624 * 6 smp_mb() 836 * 7 wor 625 * 7 work->current_func() { 837 * 8 626 * 8 LOAD event_indicated 838 * } 627 * } 839 * 628 * 840 * Without an explicit full barrier sp 629 * Without an explicit full barrier speculative LOAD on line 8 can 841 * be executed before CPU#0 does STORE 630 * be executed before CPU#0 does STORE on line 1. If that happens, 842 * CPU#0 observes the PENDING bit is s 631 * CPU#0 observes the PENDING bit is still set and new execution of 843 * a @work is not queued in a hope, th 632 * a @work is not queued in a hope, that CPU#1 will eventually 844 * finish the queued @work. Meanwhile 633 * finish the queued @work. Meanwhile CPU#1 does not see 845 * event_indicated is set, because spe 634 * event_indicated is set, because speculative LOAD was executed 846 * before actual STORE. 635 * before actual STORE. 847 */ 636 */ 848 smp_mb(); 637 smp_mb(); 849 } 638 } 850 639 851 static inline struct pool_workqueue *work_stru !! 640 static void clear_work_data(struct work_struct *work) 852 { 641 { 853 return (struct pool_workqueue *)(data !! 642 smp_wmb(); /* see set_work_pool_and_clear_pending() */ >> 643 set_work_data(work, WORK_STRUCT_NO_POOL, 0); 854 } 644 } 855 645 856 static struct pool_workqueue *get_work_pwq(str 646 static struct pool_workqueue *get_work_pwq(struct work_struct *work) 857 { 647 { 858 unsigned long data = atomic_long_read( 648 unsigned long data = atomic_long_read(&work->data); 859 649 860 if (data & WORK_STRUCT_PWQ) 650 if (data & WORK_STRUCT_PWQ) 861 return work_struct_pwq(data); !! 651 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 862 else 652 else 863 return NULL; 653 return NULL; 864 } 654 } 865 655 866 /** 656 /** 867 * get_work_pool - return the worker_pool a gi 657 * get_work_pool - return the worker_pool a given work was associated with 868 * @work: the work item of interest 658 * @work: the work item of interest 869 * 659 * >> 660 * Return the worker_pool @work was last associated with. %NULL if none. >> 661 * 870 * Pools are created and destroyed under wq_po 662 * Pools are created and destroyed under wq_pool_mutex, and allows read 871 * access under RCU read lock. As such, this !! 663 * access under sched-RCU read lock. As such, this function should be 872 * called under wq_pool_mutex or inside of a r !! 664 * called under wq_pool_mutex or with preemption disabled. 873 * 665 * 874 * All fields of the returned pool are accessi 666 * All fields of the returned pool are accessible as long as the above 875 * mentioned locking is in effect. If the ret 667 * mentioned locking is in effect. If the returned pool needs to be used 876 * beyond the critical section, the caller is 668 * beyond the critical section, the caller is responsible for ensuring the 877 * returned pool is and stays online. 669 * returned pool is and stays online. 878 * << 879 * Return: The worker_pool @work was last asso << 880 */ 670 */ 881 static struct worker_pool *get_work_pool(struc 671 static struct worker_pool *get_work_pool(struct work_struct *work) 882 { 672 { 883 unsigned long data = atomic_long_read( 673 unsigned long data = atomic_long_read(&work->data); 884 int pool_id; 674 int pool_id; 885 675 886 assert_rcu_or_pool_mutex(); 676 assert_rcu_or_pool_mutex(); 887 677 888 if (data & WORK_STRUCT_PWQ) 678 if (data & WORK_STRUCT_PWQ) 889 return work_struct_pwq(data)-> !! 679 return ((struct pool_workqueue *) >> 680 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; 890 681 891 pool_id = data >> WORK_OFFQ_POOL_SHIFT 682 pool_id = data >> WORK_OFFQ_POOL_SHIFT; 892 if (pool_id == WORK_OFFQ_POOL_NONE) 683 if (pool_id == WORK_OFFQ_POOL_NONE) 893 return NULL; 684 return NULL; 894 685 895 return idr_find(&worker_pool_idr, pool 686 return idr_find(&worker_pool_idr, pool_id); 896 } 687 } 897 688 898 static unsigned long shift_and_mask(unsigned l !! 689 /** >> 690 * get_work_pool_id - return the worker pool ID a given work is associated with >> 691 * @work: the work item of interest >> 692 * >> 693 * Return the worker_pool ID @work was last associated with. >> 694 * %WORK_OFFQ_POOL_NONE if none. >> 695 */ >> 696 static int get_work_pool_id(struct work_struct *work) 899 { 697 { 900 return (v >> shift) & ((1U << bits) - !! 698 unsigned long data = atomic_long_read(&work->data); >> 699 >> 700 if (data & WORK_STRUCT_PWQ) >> 701 return ((struct pool_workqueue *) >> 702 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; >> 703 >> 704 return data >> WORK_OFFQ_POOL_SHIFT; 901 } 705 } 902 706 903 static void work_offqd_unpack(struct work_offq !! 707 static void mark_work_canceling(struct work_struct *work) 904 { 708 { 905 WARN_ON_ONCE(data & WORK_STRUCT_PWQ); !! 709 unsigned long pool_id = get_work_pool_id(work); 906 710 907 offqd->pool_id = shift_and_mask(data, !! 711 pool_id <<= WORK_OFFQ_POOL_SHIFT; 908 WORK_O !! 712 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); 909 offqd->disable = shift_and_mask(data, << 910 WORK_O << 911 offqd->flags = data & WORK_OFFQ_FLAG_M << 912 } 713 } 913 714 914 static unsigned long work_offqd_pack_flags(str !! 715 static bool work_is_canceling(struct work_struct *work) 915 { 716 { 916 return ((unsigned long)offqd->disable !! 717 unsigned long data = atomic_long_read(&work->data); 917 ((unsigned long)offqd->flags); !! 718 >> 719 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 918 } 720 } 919 721 920 /* 722 /* 921 * Policy functions. These define the policie 723 * Policy functions. These define the policies on how the global worker 922 * pools are managed. Unless noted otherwise, 724 * pools are managed. Unless noted otherwise, these functions assume that 923 * they're being called with pool->lock held. 725 * they're being called with pool->lock held. 924 */ 726 */ 925 727 >> 728 static bool __need_more_worker(struct worker_pool *pool) >> 729 { >> 730 return !atomic_read(&pool->nr_running); >> 731 } >> 732 926 /* 733 /* 927 * Need to wake up a worker? Called from anyt 734 * Need to wake up a worker? Called from anything but currently 928 * running workers. 735 * running workers. 929 * 736 * 930 * Note that, because unbound workers never co 737 * Note that, because unbound workers never contribute to nr_running, this 931 * function will always return %true for unbou 738 * function will always return %true for unbound pools as long as the 932 * worklist isn't empty. 739 * worklist isn't empty. 933 */ 740 */ 934 static bool need_more_worker(struct worker_poo 741 static bool need_more_worker(struct worker_pool *pool) 935 { 742 { 936 return !list_empty(&pool->worklist) && !! 743 return !list_empty(&pool->worklist) && __need_more_worker(pool); 937 } 744 } 938 745 939 /* Can I start working? Called from busy but 746 /* Can I start working? Called from busy but !running workers. */ 940 static bool may_start_working(struct worker_po 747 static bool may_start_working(struct worker_pool *pool) 941 { 748 { 942 return pool->nr_idle; 749 return pool->nr_idle; 943 } 750 } 944 751 945 /* Do I need to keep working? Called from cur 752 /* Do I need to keep working? Called from currently running workers. */ 946 static bool keep_working(struct worker_pool *p 753 static bool keep_working(struct worker_pool *pool) 947 { 754 { 948 return !list_empty(&pool->worklist) && !! 755 return !list_empty(&pool->worklist) && >> 756 atomic_read(&pool->nr_running) <= 1; 949 } 757 } 950 758 951 /* Do we need a new worker? Called from manag 759 /* Do we need a new worker? Called from manager. */ 952 static bool need_to_create_worker(struct worke 760 static bool need_to_create_worker(struct worker_pool *pool) 953 { 761 { 954 return need_more_worker(pool) && !may_ 762 return need_more_worker(pool) && !may_start_working(pool); 955 } 763 } 956 764 >> 765 /* Do I need to be the manager? */ >> 766 static bool need_to_manage_workers(struct worker_pool *pool) >> 767 { >> 768 return need_to_create_worker(pool) || >> 769 (pool->flags & POOL_MANAGE_WORKERS); >> 770 } >> 771 957 /* Do we have too many workers and should some 772 /* Do we have too many workers and should some go away? */ 958 static bool too_many_workers(struct worker_poo 773 static bool too_many_workers(struct worker_pool *pool) 959 { 774 { 960 bool managing = pool->flags & POOL_MAN !! 775 bool managing = mutex_is_locked(&pool->manager_arb); 961 int nr_idle = pool->nr_idle + managing 776 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 962 int nr_busy = pool->nr_workers - nr_id 777 int nr_busy = pool->nr_workers - nr_idle; 963 778 >> 779 /* >> 780 * nr_idle and idle_list may disagree if idle rebinding is in >> 781 * progress. Never return %true if idle_list is empty. >> 782 */ >> 783 if (list_empty(&pool->idle_list)) >> 784 return false; >> 785 964 return nr_idle > 2 && (nr_idle - 2) * 786 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 965 } 787 } 966 788 967 /** !! 789 /* 968 * worker_set_flags - set worker flags and adj !! 790 * Wake up functions. 969 * @worker: self << 970 * @flags: flags to set << 971 * << 972 * Set @flags in @worker->flags and adjust nr_ << 973 */ 791 */ 974 static inline void worker_set_flags(struct wor !! 792 >> 793 /* Return the first worker. Safe with preemption disabled */ >> 794 static struct worker *first_worker(struct worker_pool *pool) 975 { 795 { 976 struct worker_pool *pool = worker->poo !! 796 if (unlikely(list_empty(&pool->idle_list))) >> 797 return NULL; 977 798 978 lockdep_assert_held(&pool->lock); !! 799 return list_first_entry(&pool->idle_list, struct worker, entry); >> 800 } 979 801 980 /* If transitioning into NOT_RUNNING, !! 802 /** 981 if ((flags & WORKER_NOT_RUNNING) && !! 803 * wake_up_worker - wake up an idle worker 982 !(worker->flags & WORKER_NOT_RUNNI !! 804 * @pool: worker pool to wake worker from 983 pool->nr_running--; !! 805 * 984 } !! 806 * Wake up the first idle worker of @pool. >> 807 * >> 808 * CONTEXT: >> 809 * spin_lock_irq(pool->lock). >> 810 */ >> 811 static void wake_up_worker(struct worker_pool *pool) >> 812 { >> 813 struct worker *worker = first_worker(pool); 985 814 986 worker->flags |= flags; !! 815 if (likely(worker)) >> 816 wake_up_process(worker->task); 987 } 817 } 988 818 989 /** 819 /** 990 * worker_clr_flags - clear worker flags and a !! 820 * wq_worker_waking_up - a worker is waking up 991 * @worker: self !! 821 * @task: task waking up 992 * @flags: flags to clear !! 822 * @cpu: CPU @task is waking up to 993 * 823 * 994 * Clear @flags in @worker->flags and adjust n !! 824 * This function is called during try_to_wake_up() when a worker is >> 825 * being awoken. >> 826 * >> 827 * CONTEXT: >> 828 * spin_lock_irq(rq->lock) 995 */ 829 */ 996 static inline void worker_clr_flags(struct wor !! 830 void wq_worker_waking_up(struct task_struct *task, int cpu) 997 { 831 { 998 struct worker_pool *pool = worker->poo !! 832 struct worker *worker = kthread_data(task); 999 unsigned int oflags = worker->flags; << 1000 833 1001 lockdep_assert_held(&pool->lock); !! 834 if (!(worker->flags & WORKER_NOT_RUNNING)) { >> 835 WARN_ON_ONCE(worker->pool->cpu != cpu); >> 836 atomic_inc(&worker->pool->nr_running); >> 837 } >> 838 } 1002 839 1003 worker->flags &= ~flags; !! 840 /** >> 841 * wq_worker_sleeping - a worker is going to sleep >> 842 * @task: task going to sleep >> 843 * @cpu: CPU in question, must be the current CPU number >> 844 * >> 845 * This function is called during schedule() when a busy worker is >> 846 * going to sleep. Worker on the same cpu can be woken up by >> 847 * returning pointer to its task. >> 848 * >> 849 * CONTEXT: >> 850 * spin_lock_irq(rq->lock) >> 851 * >> 852 * RETURNS: >> 853 * Worker task on @cpu to wake up, %NULL if none. >> 854 */ >> 855 struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) >> 856 { >> 857 struct worker *worker = kthread_data(task), *to_wakeup = NULL; >> 858 struct worker_pool *pool; 1004 859 1005 /* 860 /* 1006 * If transitioning out of NOT_RUNNIN !! 861 * Rescuers, which may not have all the fields set up like normal 1007 * that the nested NOT_RUNNING is not !! 862 * workers, also reach here, let's not access anything before 1008 * of multiple flags, not a single fl !! 863 * checking NOT_RUNNING. 1009 */ 864 */ 1010 if ((flags & WORKER_NOT_RUNNING) && ( !! 865 if (worker->flags & WORKER_NOT_RUNNING) 1011 if (!(worker->flags & WORKER_ !! 866 return NULL; 1012 pool->nr_running++; << 1013 } << 1014 867 1015 /* Return the first idle worker. Called with !! 868 pool = worker->pool; 1016 static struct worker *first_idle_worker(struc !! 869 1017 { !! 870 /* this can only happen on the local cpu */ 1018 if (unlikely(list_empty(&pool->idle_l !! 871 if (WARN_ON_ONCE(cpu != raw_smp_processor_id())) 1019 return NULL; 872 return NULL; 1020 873 1021 return list_first_entry(&pool->idle_l !! 874 /* >> 875 * The counterpart of the following dec_and_test, implied mb, >> 876 * worklist not empty test sequence is in insert_work(). >> 877 * Please read comment there. >> 878 * >> 879 * NOT_RUNNING is clear. This means that we're bound to and >> 880 * running on the local cpu w/ rq lock held and preemption >> 881 * disabled, which in turn means that none else could be >> 882 * manipulating idle_list, so dereferencing idle_list without pool >> 883 * lock is safe. >> 884 */ >> 885 if (atomic_dec_and_test(&pool->nr_running) && >> 886 !list_empty(&pool->worklist)) >> 887 to_wakeup = first_worker(pool); >> 888 return to_wakeup ? to_wakeup->task : NULL; 1022 } 889 } 1023 890 1024 /** 891 /** 1025 * worker_enter_idle - enter idle state !! 892 * worker_set_flags - set worker flags and adjust nr_running accordingly 1026 * @worker: worker which is entering idle sta !! 893 * @worker: self >> 894 * @flags: flags to set >> 895 * @wakeup: wakeup an idle worker if necessary 1027 * 896 * 1028 * @worker is entering idle state. Update st !! 897 * Set @flags in @worker->flags and adjust nr_running accordingly. If 1029 * necessary. !! 898 * nr_running becomes zero and @wakeup is %true, an idle worker is >> 899 * woken up. 1030 * 900 * 1031 * LOCKING: !! 901 * CONTEXT: 1032 * raw_spin_lock_irq(pool->lock). !! 902 * spin_lock_irq(pool->lock) 1033 */ 903 */ 1034 static void worker_enter_idle(struct worker * !! 904 static inline void worker_set_flags(struct worker *worker, unsigned int flags, >> 905 bool wakeup) 1035 { 906 { 1036 struct worker_pool *pool = worker->po 907 struct worker_pool *pool = worker->pool; 1037 908 1038 if (WARN_ON_ONCE(worker->flags & WORK !! 909 WARN_ON_ONCE(worker->task != current); 1039 WARN_ON_ONCE(!list_empty(&worker- << 1040 (worker->hentry.next << 1041 return; << 1042 << 1043 /* can't use worker_set_flags(), also << 1044 worker->flags |= WORKER_IDLE; << 1045 pool->nr_idle++; << 1046 worker->last_active = jiffies; << 1047 910 1048 /* idle_list is LIFO */ !! 911 /* 1049 list_add(&worker->entry, &pool->idle_ !! 912 * If transitioning into NOT_RUNNING, adjust nr_running and 1050 !! 913 * wake up an idle worker as necessary if requested by 1051 if (too_many_workers(pool) && !timer_ !! 914 * @wakeup. 1052 mod_timer(&pool->idle_timer, !! 915 */ >> 916 if ((flags & WORKER_NOT_RUNNING) && >> 917 !(worker->flags & WORKER_NOT_RUNNING)) { >> 918 if (wakeup) { >> 919 if (atomic_dec_and_test(&pool->nr_running) && >> 920 !list_empty(&pool->worklist)) >> 921 wake_up_worker(pool); >> 922 } else >> 923 atomic_dec(&pool->nr_running); >> 924 } 1053 925 1054 /* Sanity check nr_running. */ !! 926 worker->flags |= flags; 1055 WARN_ON_ONCE(pool->nr_workers == pool << 1056 } 927 } 1057 928 1058 /** 929 /** 1059 * worker_leave_idle - leave idle state !! 930 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 1060 * @worker: worker which is leaving idle stat !! 931 * @worker: self >> 932 * @flags: flags to clear 1061 * 933 * 1062 * @worker is leaving idle state. Update sta !! 934 * Clear @flags in @worker->flags and adjust nr_running accordingly. 1063 * 935 * 1064 * LOCKING: !! 936 * CONTEXT: 1065 * raw_spin_lock_irq(pool->lock). !! 937 * spin_lock_irq(pool->lock) 1066 */ 938 */ 1067 static void worker_leave_idle(struct worker * !! 939 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 1068 { 940 { 1069 struct worker_pool *pool = worker->po 941 struct worker_pool *pool = worker->pool; >> 942 unsigned int oflags = worker->flags; 1070 943 1071 if (WARN_ON_ONCE(!(worker->flags & WO !! 944 WARN_ON_ONCE(worker->task != current); 1072 return; !! 945 1073 worker_clr_flags(worker, WORKER_IDLE) !! 946 worker->flags &= ~flags; 1074 pool->nr_idle--; !! 947 1075 list_del_init(&worker->entry); !! 948 /* >> 949 * If transitioning out of NOT_RUNNING, increment nr_running. Note >> 950 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask >> 951 * of multiple flags, not a single flag. >> 952 */ >> 953 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) >> 954 if (!(worker->flags & WORKER_NOT_RUNNING)) >> 955 atomic_inc(&pool->nr_running); 1076 } 956 } 1077 957 1078 /** 958 /** 1079 * find_worker_executing_work - find worker w 959 * find_worker_executing_work - find worker which is executing a work 1080 * @pool: pool of interest 960 * @pool: pool of interest 1081 * @work: work to find worker for 961 * @work: work to find worker for 1082 * 962 * 1083 * Find a worker which is executing @work on 963 * Find a worker which is executing @work on @pool by searching 1084 * @pool->busy_hash which is keyed by the add 964 * @pool->busy_hash which is keyed by the address of @work. For a worker 1085 * to match, its current execution should mat 965 * to match, its current execution should match the address of @work and 1086 * its work function. This is to avoid unwan 966 * its work function. This is to avoid unwanted dependency between 1087 * unrelated work executions through a work i 967 * unrelated work executions through a work item being recycled while still 1088 * being executed. 968 * being executed. 1089 * 969 * 1090 * This is a bit tricky. A work item may be 970 * This is a bit tricky. A work item may be freed once its execution 1091 * starts and nothing prevents the freed area 971 * starts and nothing prevents the freed area from being recycled for 1092 * another work item. If the same work item 972 * another work item. If the same work item address ends up being reused 1093 * before the original execution finishes, wo 973 * before the original execution finishes, workqueue will identify the 1094 * recycled work item as currently executing 974 * recycled work item as currently executing and make it wait until the 1095 * current execution finishes, introducing an 975 * current execution finishes, introducing an unwanted dependency. 1096 * 976 * 1097 * This function checks the work item address 977 * This function checks the work item address and work function to avoid 1098 * false positives. Note that this isn't com 978 * false positives. Note that this isn't complete as one may construct a 1099 * work function which can introduce dependen 979 * work function which can introduce dependency onto itself through a 1100 * recycled work item. Well, if somebody wan 980 * recycled work item. Well, if somebody wants to shoot oneself in the 1101 * foot that badly, there's only so much we c 981 * foot that badly, there's only so much we can do, and if such deadlock 1102 * actually occurs, it should be easy to loca 982 * actually occurs, it should be easy to locate the culprit work function. 1103 * 983 * 1104 * CONTEXT: 984 * CONTEXT: 1105 * raw_spin_lock_irq(pool->lock). !! 985 * spin_lock_irq(pool->lock). 1106 * 986 * 1107 * Return: !! 987 * RETURNS: 1108 * Pointer to worker which is executing @work !! 988 * Pointer to worker which is executing @work if found, NULL 1109 * otherwise. 989 * otherwise. 1110 */ 990 */ 1111 static struct worker *find_worker_executing_w 991 static struct worker *find_worker_executing_work(struct worker_pool *pool, 1112 992 struct work_struct *work) 1113 { 993 { 1114 struct worker *worker; 994 struct worker *worker; 1115 995 1116 hash_for_each_possible(pool->busy_has 996 hash_for_each_possible(pool->busy_hash, worker, hentry, 1117 (unsigned long 997 (unsigned long)work) 1118 if (worker->current_work == w 998 if (worker->current_work == work && 1119 worker->current_func == w 999 worker->current_func == work->func) 1120 return worker; 1000 return worker; 1121 1001 1122 return NULL; 1002 return NULL; 1123 } 1003 } 1124 1004 1125 /** 1005 /** 1126 * move_linked_works - move linked works to a 1006 * move_linked_works - move linked works to a list 1127 * @work: start of series of works to be sche 1007 * @work: start of series of works to be scheduled 1128 * @head: target list to append @work to 1008 * @head: target list to append @work to 1129 * @nextp: out parameter for nested worklist !! 1009 * @nextp: out paramter for nested worklist walking 1130 * 1010 * 1131 * Schedule linked works starting from @work !! 1011 * Schedule linked works starting from @work to @head. Work series to 1132 * scheduled starts at @work and includes any !! 1012 * be scheduled starts at @work and includes any consecutive work with 1133 * WORK_STRUCT_LINKED set in its predecessor. !! 1013 * WORK_STRUCT_LINKED set in its predecessor. 1134 * @nextp. !! 1014 * >> 1015 * If @nextp is not NULL, it's updated to point to the next work of >> 1016 * the last scheduled work. This allows move_linked_works() to be >> 1017 * nested inside outer list_for_each_entry_safe(). 1135 * 1018 * 1136 * CONTEXT: 1019 * CONTEXT: 1137 * raw_spin_lock_irq(pool->lock). !! 1020 * spin_lock_irq(pool->lock). 1138 */ 1021 */ 1139 static void move_linked_works(struct work_str 1022 static void move_linked_works(struct work_struct *work, struct list_head *head, 1140 struct work_str 1023 struct work_struct **nextp) 1141 { 1024 { 1142 struct work_struct *n; 1025 struct work_struct *n; 1143 1026 1144 /* 1027 /* 1145 * Linked worklist will always end be 1028 * Linked worklist will always end before the end of the list, 1146 * use NULL for list head. 1029 * use NULL for list head. 1147 */ 1030 */ 1148 list_for_each_entry_safe_from(work, n 1031 list_for_each_entry_safe_from(work, n, NULL, entry) { 1149 list_move_tail(&work->entry, 1032 list_move_tail(&work->entry, head); 1150 if (!(*work_data_bits(work) & 1033 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 1151 break; 1034 break; 1152 } 1035 } 1153 1036 1154 /* 1037 /* 1155 * If we're already inside safe list 1038 * If we're already inside safe list traversal and have moved 1156 * multiple works to the scheduled qu 1039 * multiple works to the scheduled queue, the next position 1157 * needs to be updated. 1040 * needs to be updated. 1158 */ 1041 */ 1159 if (nextp) 1042 if (nextp) 1160 *nextp = n; 1043 *nextp = n; 1161 } 1044 } 1162 1045 1163 /** 1046 /** 1164 * assign_work - assign a work item and its l << 1165 * @work: work to assign << 1166 * @worker: worker to assign to << 1167 * @nextp: out parameter for nested worklist << 1168 * << 1169 * Assign @work and its linked work items to << 1170 * executed by another worker in the same poo << 1171 * << 1172 * If @nextp is not NULL, it's updated to poi << 1173 * scheduled work. This allows assign_work() << 1174 * list_for_each_entry_safe(). << 1175 * << 1176 * Returns %true if @work was successfully as << 1177 * was punted to another worker already execu << 1178 */ << 1179 static bool assign_work(struct work_struct *w << 1180 struct work_struct ** << 1181 { << 1182 struct worker_pool *pool = worker->po << 1183 struct worker *collision; << 1184 << 1185 lockdep_assert_held(&pool->lock); << 1186 << 1187 /* << 1188 * A single work shouldn't be execute << 1189 * __queue_work() ensures that @work << 1190 * while still running in the previou << 1191 * @work is not executed concurrently << 1192 * pool. Check whether anyone is alre << 1193 * defer the work to the currently ex << 1194 */ << 1195 collision = find_worker_executing_wor << 1196 if (unlikely(collision)) { << 1197 move_linked_works(work, &coll << 1198 return false; << 1199 } << 1200 << 1201 move_linked_works(work, &worker->sche << 1202 return true; << 1203 } << 1204 << 1205 static struct irq_work *bh_pool_irq_work(stru << 1206 { << 1207 int high = pool->attrs->nice == HIGHP << 1208 << 1209 return &per_cpu(bh_pool_irq_works, po << 1210 } << 1211 << 1212 static void kick_bh_pool(struct worker_pool * << 1213 { << 1214 #ifdef CONFIG_SMP << 1215 /* see drain_dead_softirq_workfn() fo << 1216 if (unlikely(pool->cpu != smp_process << 1217 !(pool->flags & POOL_BH_ << 1218 irq_work_queue_on(bh_pool_irq << 1219 return; << 1220 } << 1221 #endif << 1222 if (pool->attrs->nice == HIGHPRI_NICE << 1223 raise_softirq_irqoff(HI_SOFTI << 1224 else << 1225 raise_softirq_irqoff(TASKLET_ << 1226 } << 1227 << 1228 /** << 1229 * kick_pool - wake up an idle worker if nece << 1230 * @pool: pool to kick << 1231 * << 1232 * @pool may have pending work items. Wake up << 1233 * whether a worker was woken up. << 1234 */ << 1235 static bool kick_pool(struct worker_pool *poo << 1236 { << 1237 struct worker *worker = first_idle_wo << 1238 struct task_struct *p; << 1239 << 1240 lockdep_assert_held(&pool->lock); << 1241 << 1242 if (!need_more_worker(pool) || !worke << 1243 return false; << 1244 << 1245 if (pool->flags & POOL_BH) { << 1246 kick_bh_pool(pool); << 1247 return true; << 1248 } << 1249 << 1250 p = worker->task; << 1251 << 1252 #ifdef CONFIG_SMP << 1253 /* << 1254 * Idle @worker is about to execute @ << 1255 * opportunity to migrate @worker at << 1256 * wake_cpu field. Let's see if we wa << 1257 * execution locality. << 1258 * << 1259 * We're waking the worker that went << 1260 * chance that @worker is marked idle << 1261 * so, setting the wake_cpu won't do << 1262 * optimization and the race window i << 1263 * now. If this becomes pronounced, w << 1264 * still on cpu when picking an idle << 1265 * << 1266 * If @pool has non-strict affinity, << 1267 * its affinity scope. Repatriate. << 1268 */ << 1269 if (!pool->attrs->affn_strict && << 1270 !cpumask_test_cpu(p->wake_cpu, po << 1271 struct work_struct *work = li << 1272 << 1273 int wake_cpu = cpumask_any_an << 1274 << 1275 if (wake_cpu < nr_cpu_ids) { << 1276 p->wake_cpu = wake_cp << 1277 get_work_pwq(work)->s << 1278 } << 1279 } << 1280 #endif << 1281 wake_up_process(p); << 1282 return true; << 1283 } << 1284 << 1285 #ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT << 1286 << 1287 /* << 1288 * Concurrency-managed per-cpu work items tha << 1289 * wq_cpu_intensive_thresh_us trigger the aut << 1290 * which prevents them from stalling other co << 1291 * work function keeps triggering this mechan << 1292 * should be using an unbound workqueue inste << 1293 * << 1294 * wq_cpu_intensive_report() tracks work func << 1295 * and report them so that they can be examin << 1296 * workqueues as appropriate. To avoid floodi << 1297 * function is tracked and reported with expo << 1298 */ << 1299 #define WCI_MAX_ENTS 128 << 1300 << 1301 struct wci_ent { << 1302 work_func_t func; << 1303 atomic64_t cnt; << 1304 struct hlist_node hash_node; << 1305 }; << 1306 << 1307 static struct wci_ent wci_ents[WCI_MAX_ENTS]; << 1308 static int wci_nr_ents; << 1309 static DEFINE_RAW_SPINLOCK(wci_lock); << 1310 static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_M << 1311 << 1312 static struct wci_ent *wci_find_ent(work_func << 1313 { << 1314 struct wci_ent *ent; << 1315 << 1316 hash_for_each_possible_rcu(wci_hash, << 1317 (unsigned << 1318 if (ent->func == func) << 1319 return ent; << 1320 } << 1321 return NULL; << 1322 } << 1323 << 1324 static void wq_cpu_intensive_report(work_func << 1325 { << 1326 struct wci_ent *ent; << 1327 << 1328 restart: << 1329 ent = wci_find_ent(func); << 1330 if (ent) { << 1331 u64 cnt; << 1332 << 1333 /* << 1334 * Start reporting from the w << 1335 * exponentially. << 1336 */ << 1337 cnt = atomic64_inc_return_rel << 1338 if (wq_cpu_intensive_warning_ << 1339 cnt >= wq_cpu_intensive_w << 1340 is_power_of_2(cnt + 1 - w << 1341 printk_deferred(KERN_ << 1342 ent-> << 1343 atomi << 1344 return; << 1345 } << 1346 << 1347 /* << 1348 * @func is a new violation. Allocate << 1349 * is exhausted, something went reall << 1350 * noise already. << 1351 */ << 1352 if (wci_nr_ents >= WCI_MAX_ENTS) << 1353 return; << 1354 << 1355 raw_spin_lock(&wci_lock); << 1356 << 1357 if (wci_nr_ents >= WCI_MAX_ENTS) { << 1358 raw_spin_unlock(&wci_lock); << 1359 return; << 1360 } << 1361 << 1362 if (wci_find_ent(func)) { << 1363 raw_spin_unlock(&wci_lock); << 1364 goto restart; << 1365 } << 1366 << 1367 ent = &wci_ents[wci_nr_ents++]; << 1368 ent->func = func; << 1369 atomic64_set(&ent->cnt, 0); << 1370 hash_add_rcu(wci_hash, &ent->hash_nod << 1371 << 1372 raw_spin_unlock(&wci_lock); << 1373 << 1374 goto restart; << 1375 } << 1376 << 1377 #else /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ << 1378 static void wq_cpu_intensive_report(work_func << 1379 #endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */ << 1380 << 1381 /** << 1382 * wq_worker_running - a worker is running ag << 1383 * @task: task waking up << 1384 * << 1385 * This function is called when a worker retu << 1386 */ << 1387 void wq_worker_running(struct task_struct *ta << 1388 { << 1389 struct worker *worker = kthread_data( << 1390 << 1391 if (!READ_ONCE(worker->sleeping)) << 1392 return; << 1393 << 1394 /* << 1395 * If preempted by unbind_workers() b << 1396 * and the nr_running increment below << 1397 * and leave with an unexpected pool- << 1398 * pool. Protect against such race. << 1399 */ << 1400 preempt_disable(); << 1401 if (!(worker->flags & WORKER_NOT_RUNN << 1402 worker->pool->nr_running++; << 1403 preempt_enable(); << 1404 << 1405 /* << 1406 * CPU intensive auto-detection cares << 1407 * CPU without sleeping. Reset the st << 1408 */ << 1409 worker->current_at = worker->task->se << 1410 << 1411 WRITE_ONCE(worker->sleeping, 0); << 1412 } << 1413 << 1414 /** << 1415 * wq_worker_sleeping - a worker is going to << 1416 * @task: task going to sleep << 1417 * << 1418 * This function is called from schedule() wh << 1419 * going to sleep. << 1420 */ << 1421 void wq_worker_sleeping(struct task_struct *t << 1422 { << 1423 struct worker *worker = kthread_data( << 1424 struct worker_pool *pool; << 1425 << 1426 /* << 1427 * Rescuers, which may not have all t << 1428 * workers, also reach here, let's no << 1429 * checking NOT_RUNNING. << 1430 */ << 1431 if (worker->flags & WORKER_NOT_RUNNIN << 1432 return; << 1433 << 1434 pool = worker->pool; << 1435 << 1436 /* Return if preempted before wq_work << 1437 if (READ_ONCE(worker->sleeping)) << 1438 return; << 1439 << 1440 WRITE_ONCE(worker->sleeping, 1); << 1441 raw_spin_lock_irq(&pool->lock); << 1442 << 1443 /* << 1444 * Recheck in case unbind_workers() p << 1445 * want to decrement nr_running after << 1446 * and nr_running has been reset. << 1447 */ << 1448 if (worker->flags & WORKER_NOT_RUNNIN << 1449 raw_spin_unlock_irq(&pool->lo << 1450 return; << 1451 } << 1452 << 1453 pool->nr_running--; << 1454 if (kick_pool(pool)) << 1455 worker->current_pwq->stats[PW << 1456 << 1457 raw_spin_unlock_irq(&pool->lock); << 1458 } << 1459 << 1460 /** << 1461 * wq_worker_tick - a scheduler tick occurred << 1462 * @task: task currently running << 1463 * << 1464 * Called from sched_tick(). We're in the IRQ << 1465 * worker's fields which follow the 'K' locki << 1466 */ << 1467 void wq_worker_tick(struct task_struct *task) << 1468 { << 1469 struct worker *worker = kthread_data( << 1470 struct pool_workqueue *pwq = worker-> << 1471 struct worker_pool *pool = worker->po << 1472 << 1473 if (!pwq) << 1474 return; << 1475 << 1476 pwq->stats[PWQ_STAT_CPU_TIME] += TICK << 1477 << 1478 if (!wq_cpu_intensive_thresh_us) << 1479 return; << 1480 << 1481 /* << 1482 * If the current worker is concurren << 1483 * longer than wq_cpu_intensive_thres << 1484 * CPU_INTENSIVE to avoid stalling ot << 1485 * << 1486 * Set @worker->sleeping means that @ << 1487 * switching out voluntarily and won' << 1488 * @pool->nr_running until it wakes u << 1489 * decrements ->nr_running, setting C << 1490 * double decrements. The task is rel << 1491 * We probably want to make this pret << 1492 */ << 1493 if ((worker->flags & WORKER_NOT_RUNNI << 1494 worker->task->se.sum_exec_runtime << 1495 wq_cpu_intensive_thresh_us * NSEC << 1496 return; << 1497 << 1498 raw_spin_lock(&pool->lock); << 1499 << 1500 worker_set_flags(worker, WORKER_CPU_I << 1501 wq_cpu_intensive_report(worker->curre << 1502 pwq->stats[PWQ_STAT_CPU_INTENSIVE]++; << 1503 << 1504 if (kick_pool(pool)) << 1505 pwq->stats[PWQ_STAT_CM_WAKEUP << 1506 << 1507 raw_spin_unlock(&pool->lock); << 1508 } << 1509 << 1510 /** << 1511 * wq_worker_last_func - retrieve worker's la << 1512 * @task: Task to retrieve last work function << 1513 * << 1514 * Determine the last function a worker execu << 1515 * the scheduler to get a worker's last known << 1516 * << 1517 * CONTEXT: << 1518 * raw_spin_lock_irq(rq->lock) << 1519 * << 1520 * This function is called during schedule() << 1521 * to sleep. It's used by psi to identify agg << 1522 * dequeuing, to allow periodic aggregation t << 1523 * worker is the last task in the system or c << 1524 * << 1525 * As this function doesn't involve any workq << 1526 * only returns stable values when called fro << 1527 * queuing and dequeuing paths, when @task, w << 1528 * is guaranteed to not be processing any wor << 1529 * << 1530 * Return: << 1531 * The last work function %current executed a << 1532 * hasn't executed any work yet. << 1533 */ << 1534 work_func_t wq_worker_last_func(struct task_s << 1535 { << 1536 struct worker *worker = kthread_data( << 1537 << 1538 return worker->last_func; << 1539 } << 1540 << 1541 /** << 1542 * wq_node_nr_active - Determine wq_node_nr_a << 1543 * @wq: workqueue of interest << 1544 * @node: NUMA node, can be %NUMA_NO_NODE << 1545 * << 1546 * Determine wq_node_nr_active to use for @wq << 1547 * << 1548 * - %NULL for per-cpu workqueues as they don << 1549 * << 1550 * - node_nr_active[nr_node_ids] if @node is << 1551 * << 1552 * - Otherwise, node_nr_active[@node]. << 1553 */ << 1554 static struct wq_node_nr_active *wq_node_nr_a << 1555 << 1556 { << 1557 if (!(wq->flags & WQ_UNBOUND)) << 1558 return NULL; << 1559 << 1560 if (node == NUMA_NO_NODE) << 1561 node = nr_node_ids; << 1562 << 1563 return wq->node_nr_active[node]; << 1564 } << 1565 << 1566 /** << 1567 * wq_update_node_max_active - Update per-nod << 1568 * @wq: workqueue to update << 1569 * @off_cpu: CPU that's going down, -1 if a C << 1570 * << 1571 * Update @wq->node_nr_active[]->max. @wq mus << 1572 * distributed among nodes according to the p << 1573 * cpus. The result is always between @wq->mi << 1574 */ << 1575 static void wq_update_node_max_active(struct << 1576 { << 1577 struct cpumask *effective = unbound_e << 1578 int min_active = READ_ONCE(wq->min_ac << 1579 int max_active = READ_ONCE(wq->max_ac << 1580 int total_cpus, node; << 1581 << 1582 lockdep_assert_held(&wq->mutex); << 1583 << 1584 if (!wq_topo_initialized) << 1585 return; << 1586 << 1587 if (off_cpu >= 0 && !cpumask_test_cpu << 1588 off_cpu = -1; << 1589 << 1590 total_cpus = cpumask_weight_and(effec << 1591 if (off_cpu >= 0) << 1592 total_cpus--; << 1593 << 1594 /* If all CPUs of the wq get offline, << 1595 if (unlikely(!total_cpus)) { << 1596 for_each_node(node) << 1597 wq_node_nr_active(wq, << 1598 << 1599 wq_node_nr_active(wq, NUMA_NO << 1600 return; << 1601 } << 1602 << 1603 for_each_node(node) { << 1604 int node_cpus; << 1605 << 1606 node_cpus = cpumask_weight_an << 1607 if (off_cpu >= 0 && cpu_to_no << 1608 node_cpus--; << 1609 << 1610 wq_node_nr_active(wq, node)-> << 1611 clamp(DIV_ROUND_UP(ma << 1612 min_active, max << 1613 } << 1614 << 1615 wq_node_nr_active(wq, NUMA_NO_NODE)-> << 1616 } << 1617 << 1618 /** << 1619 * get_pwq - get an extra reference on the sp 1047 * get_pwq - get an extra reference on the specified pool_workqueue 1620 * @pwq: pool_workqueue to get 1048 * @pwq: pool_workqueue to get 1621 * 1049 * 1622 * Obtain an extra reference on @pwq. The ca 1050 * Obtain an extra reference on @pwq. The caller should guarantee that 1623 * @pwq has positive refcnt and be holding th 1051 * @pwq has positive refcnt and be holding the matching pool->lock. 1624 */ 1052 */ 1625 static void get_pwq(struct pool_workqueue *pw 1053 static void get_pwq(struct pool_workqueue *pwq) 1626 { 1054 { 1627 lockdep_assert_held(&pwq->pool->lock) 1055 lockdep_assert_held(&pwq->pool->lock); 1628 WARN_ON_ONCE(pwq->refcnt <= 0); 1056 WARN_ON_ONCE(pwq->refcnt <= 0); 1629 pwq->refcnt++; 1057 pwq->refcnt++; 1630 } 1058 } 1631 1059 1632 /** 1060 /** 1633 * put_pwq - put a pool_workqueue reference 1061 * put_pwq - put a pool_workqueue reference 1634 * @pwq: pool_workqueue to put 1062 * @pwq: pool_workqueue to put 1635 * 1063 * 1636 * Drop a reference of @pwq. If its refcnt r 1064 * Drop a reference of @pwq. If its refcnt reaches zero, schedule its 1637 * destruction. The caller should be holding 1065 * destruction. The caller should be holding the matching pool->lock. 1638 */ 1066 */ 1639 static void put_pwq(struct pool_workqueue *pw 1067 static void put_pwq(struct pool_workqueue *pwq) 1640 { 1068 { 1641 lockdep_assert_held(&pwq->pool->lock) 1069 lockdep_assert_held(&pwq->pool->lock); 1642 if (likely(--pwq->refcnt)) 1070 if (likely(--pwq->refcnt)) 1643 return; 1071 return; >> 1072 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) >> 1073 return; 1644 /* 1074 /* 1645 * @pwq can't be released under pool- !! 1075 * @pwq can't be released under pool->lock, bounce to 1646 * kthread_worker to avoid A-A deadlo !! 1076 * pwq_unbound_release_workfn(). This never recurses on the same >> 1077 * pool->lock as this path is taken only for unbound workqueues and >> 1078 * the release work item is scheduled on a per-cpu workqueue. To >> 1079 * avoid lockdep warning, unbound pool->locks are given lockdep >> 1080 * subclass of 1 in get_unbound_pool(). 1647 */ 1081 */ 1648 kthread_queue_work(pwq_release_worker !! 1082 schedule_work(&pwq->unbound_release_work); 1649 } 1083 } 1650 1084 1651 /** 1085 /** 1652 * put_pwq_unlocked - put_pwq() with surround 1086 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock 1653 * @pwq: pool_workqueue to put (can be %NULL) 1087 * @pwq: pool_workqueue to put (can be %NULL) 1654 * 1088 * 1655 * put_pwq() with locking. This function als 1089 * put_pwq() with locking. This function also allows %NULL @pwq. 1656 */ 1090 */ 1657 static void put_pwq_unlocked(struct pool_work 1091 static void put_pwq_unlocked(struct pool_workqueue *pwq) 1658 { 1092 { 1659 if (pwq) { 1093 if (pwq) { 1660 /* 1094 /* 1661 * As both pwqs and pools are !! 1095 * As both pwqs and pools are sched-RCU protected, the 1662 * following lock operations 1096 * following lock operations are safe. 1663 */ 1097 */ 1664 raw_spin_lock_irq(&pwq->pool- !! 1098 spin_lock_irq(&pwq->pool->lock); 1665 put_pwq(pwq); 1099 put_pwq(pwq); 1666 raw_spin_unlock_irq(&pwq->poo !! 1100 spin_unlock_irq(&pwq->pool->lock); 1667 } 1101 } 1668 } 1102 } 1669 1103 1670 static bool pwq_is_empty(struct pool_workqueu !! 1104 static void pwq_activate_delayed_work(struct work_struct *work) 1671 { << 1672 return !pwq->nr_active && list_empty( << 1673 } << 1674 << 1675 static void __pwq_activate_work(struct pool_w << 1676 struct work_s << 1677 { 1105 { 1678 unsigned long *wdb = work_data_bits(w !! 1106 struct pool_workqueue *pwq = get_work_pwq(work); 1679 1107 1680 WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INA << 1681 trace_workqueue_activate_work(work); 1108 trace_workqueue_activate_work(work); 1682 if (list_empty(&pwq->pool->worklist)) << 1683 pwq->pool->watchdog_ts = jiff << 1684 move_linked_works(work, &pwq->pool->w 1109 move_linked_works(work, &pwq->pool->worklist, NULL); 1685 __clear_bit(WORK_STRUCT_INACTIVE_BIT, !! 1110 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); 1686 } !! 1111 pwq->nr_active++; 1687 << 1688 static bool tryinc_node_nr_active(struct wq_n << 1689 { << 1690 int max = READ_ONCE(nna->max); << 1691 << 1692 while (true) { << 1693 int old, tmp; << 1694 << 1695 old = atomic_read(&nna->nr); << 1696 if (old >= max) << 1697 return false; << 1698 tmp = atomic_cmpxchg_relaxed( << 1699 if (tmp == old) << 1700 return true; << 1701 } << 1702 } 1112 } 1703 1113 1704 /** !! 1114 static void pwq_activate_first_delayed(struct pool_workqueue *pwq) 1705 * pwq_tryinc_nr_active - Try to increment nr << 1706 * @pwq: pool_workqueue of interest << 1707 * @fill: max_active may have increased, try << 1708 * << 1709 * Try to increment nr_active for @pwq. Retur << 1710 * successfully obtained. %false otherwise. << 1711 */ << 1712 static bool pwq_tryinc_nr_active(struct pool_ << 1713 { 1115 { 1714 struct workqueue_struct *wq = pwq->wq !! 1116 struct work_struct *work = list_first_entry(&pwq->delayed_works, 1715 struct worker_pool *pool = pwq->pool; !! 1117 struct work_struct, entry); 1716 struct wq_node_nr_active *nna = wq_no << 1717 bool obtained = false; << 1718 << 1719 lockdep_assert_held(&pool->lock); << 1720 << 1721 if (!nna) { << 1722 /* BH or per-cpu workqueue, p << 1723 obtained = pwq->nr_active < R << 1724 goto out; << 1725 } << 1726 << 1727 if (unlikely(pwq->plugged)) << 1728 return false; << 1729 << 1730 /* << 1731 * Unbound workqueue uses per-node sh << 1732 * already waiting on $nna, pwq_dec_n << 1733 * concurrency level. Don't jump the << 1734 * << 1735 * We need to ignore the pending test << 1736 * pwq_dec_nr_active() can only maint << 1737 * increase it. This is indicated by << 1738 */ << 1739 if (!list_empty(&pwq->pending_node) & << 1740 goto out; << 1741 << 1742 obtained = tryinc_node_nr_active(nna) << 1743 if (obtained) << 1744 goto out; << 1745 << 1746 /* << 1747 * Lockless acquisition failed. Lock, << 1748 * and try again. The smp_mb() is pai << 1749 * of atomic_dec_return() in pwq_dec_ << 1750 * we see the decremented $nna->nr or << 1751 * $nna->pending_pwqs. << 1752 */ << 1753 raw_spin_lock(&nna->lock); << 1754 << 1755 if (list_empty(&pwq->pending_node)) << 1756 list_add_tail(&pwq->pending_n << 1757 else if (likely(!fill)) << 1758 goto out_unlock; << 1759 << 1760 smp_mb(); << 1761 1118 1762 obtained = tryinc_node_nr_active(nna) !! 1119 pwq_activate_delayed_work(work); 1763 << 1764 /* << 1765 * If @fill, @pwq might have already << 1766 * pending in cold paths doesn't affe << 1767 */ << 1768 if (obtained && likely(!fill)) << 1769 list_del_init(&pwq->pending_n << 1770 << 1771 out_unlock: << 1772 raw_spin_unlock(&nna->lock); << 1773 out: << 1774 if (obtained) << 1775 pwq->nr_active++; << 1776 return obtained; << 1777 } << 1778 << 1779 /** << 1780 * pwq_activate_first_inactive - Activate the << 1781 * @pwq: pool_workqueue of interest << 1782 * @fill: max_active may have increased, try << 1783 * << 1784 * Activate the first inactive work item of @ << 1785 * max_active limit. << 1786 * << 1787 * Returns %true if an inactive work item has << 1788 * inactive work item is found or max_active << 1789 */ << 1790 static bool pwq_activate_first_inactive(struc << 1791 { << 1792 struct work_struct *work = << 1793 list_first_entry_or_null(&pwq << 1794 stru << 1795 << 1796 if (work && pwq_tryinc_nr_active(pwq, << 1797 __pwq_activate_work(pwq, work << 1798 return true; << 1799 } else { << 1800 return false; << 1801 } << 1802 } << 1803 << 1804 /** << 1805 * unplug_oldest_pwq - unplug the oldest pool << 1806 * @wq: workqueue_struct where its oldest pwq << 1807 * << 1808 * This function should only be called for or << 1809 * oldest pwq is unplugged, the others are pl << 1810 * ensure proper work item ordering:: << 1811 * << 1812 * dfl_pwq --------------+ [P] - plugg << 1813 * | << 1814 * v << 1815 * pwqs -> A -> B [P] -> C [P] (newest) << 1816 * | | | << 1817 * 1 3 5 << 1818 * | | | << 1819 * 2 4 6 << 1820 * << 1821 * When the oldest pwq is drained and removed << 1822 * to unplug the next oldest one to start its << 1823 * pwq's are linked into wq->pwqs with the ol << 1824 * the list is the oldest. << 1825 */ << 1826 static void unplug_oldest_pwq(struct workqueu << 1827 { << 1828 struct pool_workqueue *pwq; << 1829 << 1830 lockdep_assert_held(&wq->mutex); << 1831 << 1832 /* Caller should make sure that pwqs << 1833 pwq = list_first_entry_or_null(&wq->p << 1834 pwqs_n << 1835 raw_spin_lock_irq(&pwq->pool->lock); << 1836 if (pwq->plugged) { << 1837 pwq->plugged = false; << 1838 if (pwq_activate_first_inacti << 1839 kick_pool(pwq->pool); << 1840 } << 1841 raw_spin_unlock_irq(&pwq->pool->lock) << 1842 } << 1843 << 1844 /** << 1845 * node_activate_pending_pwq - Activate a pen << 1846 * @nna: wq_node_nr_active to activate a pend << 1847 * @caller_pool: worker_pool the caller is lo << 1848 * << 1849 * Activate a pwq in @nna->pending_pwqs. Call << 1850 * @caller_pool may be unlocked and relocked << 1851 */ << 1852 static void node_activate_pending_pwq(struct << 1853 struct << 1854 { << 1855 struct worker_pool *locked_pool = cal << 1856 struct pool_workqueue *pwq; << 1857 struct work_struct *work; << 1858 << 1859 lockdep_assert_held(&caller_pool->loc << 1860 << 1861 raw_spin_lock(&nna->lock); << 1862 retry: << 1863 pwq = list_first_entry_or_null(&nna-> << 1864 struct << 1865 if (!pwq) << 1866 goto out_unlock; << 1867 << 1868 /* << 1869 * If @pwq is for a different pool th << 1870 * @pwq->pool->lock. Let's trylock fi << 1871 * / lock dance. For that, we also ne << 1872 * nested inside pool locks. << 1873 */ << 1874 if (pwq->pool != locked_pool) { << 1875 raw_spin_unlock(&locked_pool- << 1876 locked_pool = pwq->pool; << 1877 if (!raw_spin_trylock(&locked << 1878 raw_spin_unlock(&nna- << 1879 raw_spin_lock(&locked << 1880 raw_spin_lock(&nna->l << 1881 goto retry; << 1882 } << 1883 } << 1884 << 1885 /* << 1886 * $pwq may not have any inactive wor << 1887 * Drop it from pending_pwqs and see << 1888 */ << 1889 work = list_first_entry_or_null(&pwq- << 1890 struc << 1891 if (!work) { << 1892 list_del_init(&pwq->pending_n << 1893 goto retry; << 1894 } << 1895 << 1896 /* << 1897 * Acquire an nr_active count and act << 1898 * $pwq still has inactive work items << 1899 * pending_pwqs so that we round-robi << 1900 * inactive work items are not activa << 1901 * given that there has never been an << 1902 */ << 1903 if (likely(tryinc_node_nr_active(nna) << 1904 pwq->nr_active++; << 1905 __pwq_activate_work(pwq, work << 1906 << 1907 if (list_empty(&pwq->inactive << 1908 list_del_init(&pwq->p << 1909 else << 1910 list_move_tail(&pwq-> << 1911 << 1912 /* if activating a foreign po << 1913 if (pwq->pool != caller_pool) << 1914 kick_pool(pwq->pool); << 1915 } << 1916 << 1917 out_unlock: << 1918 raw_spin_unlock(&nna->lock); << 1919 if (locked_pool != caller_pool) { << 1920 raw_spin_unlock(&locked_pool- << 1921 raw_spin_lock(&caller_pool->l << 1922 } << 1923 } << 1924 << 1925 /** << 1926 * pwq_dec_nr_active - Retire an active count << 1927 * @pwq: pool_workqueue of interest << 1928 * << 1929 * Decrement @pwq's nr_active and try to acti << 1930 * For unbound workqueues, this function may << 1931 */ << 1932 static void pwq_dec_nr_active(struct pool_wor << 1933 { << 1934 struct worker_pool *pool = pwq->pool; << 1935 struct wq_node_nr_active *nna = wq_no << 1936 << 1937 lockdep_assert_held(&pool->lock); << 1938 << 1939 /* << 1940 * @pwq->nr_active should be decremen << 1941 * workqueues. << 1942 */ << 1943 pwq->nr_active--; << 1944 << 1945 /* << 1946 * For a percpu workqueue, it's simpl << 1947 * inactive work item on @pwq itself. << 1948 */ << 1949 if (!nna) { << 1950 pwq_activate_first_inactive(p << 1951 return; << 1952 } << 1953 << 1954 /* << 1955 * If @pwq is for an unbound workqueu << 1956 * multiple pwqs and pools may be sha << 1957 * pwq needs to wait for an nr_active << 1958 * $nna->pending_pwqs. The following << 1959 * memory barrier is paired with smp_ << 1960 * guarantee that either we see non-e << 1961 * decremented $nna->nr. << 1962 * << 1963 * $nna->max may change as CPUs come << 1964 * max_active gets updated. However, << 1965 * larger than @pwq->wq->min_active w << 1966 * This maintains the forward progres << 1967 */ << 1968 if (atomic_dec_return(&nna->nr) >= RE << 1969 return; << 1970 << 1971 if (!list_empty(&nna->pending_pwqs)) << 1972 node_activate_pending_pwq(nna << 1973 } 1120 } 1974 1121 1975 /** 1122 /** 1976 * pwq_dec_nr_in_flight - decrement pwq's nr_ 1123 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight 1977 * @pwq: pwq of interest 1124 * @pwq: pwq of interest 1978 * @work_data: work_data of work which left t !! 1125 * @color: color of work which left the queue 1979 * 1126 * 1980 * A work either has completed or is removed 1127 * A work either has completed or is removed from pending queue, 1981 * decrement nr_in_flight of its pwq and hand 1128 * decrement nr_in_flight of its pwq and handle workqueue flushing. 1982 * 1129 * 1983 * NOTE: << 1984 * For unbound workqueues, this function may << 1985 * and thus should be called after all other << 1986 * work item is complete. << 1987 * << 1988 * CONTEXT: 1130 * CONTEXT: 1989 * raw_spin_lock_irq(pool->lock). !! 1131 * spin_lock_irq(pool->lock). 1990 */ 1132 */ 1991 static void pwq_dec_nr_in_flight(struct pool_ !! 1133 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color) 1992 { 1134 { 1993 int color = get_work_color(work_data) !! 1135 /* uncolored work items don't participate in flushing or nr_active */ 1994 !! 1136 if (color == WORK_NO_COLOR) 1995 if (!(work_data & WORK_STRUCT_INACTIV !! 1137 goto out_put; 1996 pwq_dec_nr_active(pwq); << 1997 1138 1998 pwq->nr_in_flight[color]--; 1139 pwq->nr_in_flight[color]--; 1999 1140 >> 1141 pwq->nr_active--; >> 1142 if (!list_empty(&pwq->delayed_works)) { >> 1143 /* one down, submit a delayed one */ >> 1144 if (pwq->nr_active < pwq->max_active) >> 1145 pwq_activate_first_delayed(pwq); >> 1146 } >> 1147 2000 /* is flush in progress and are we at 1148 /* is flush in progress and are we at the flushing tip? */ 2001 if (likely(pwq->flush_color != color) 1149 if (likely(pwq->flush_color != color)) 2002 goto out_put; 1150 goto out_put; 2003 1151 2004 /* are there still in-flight works? * 1152 /* are there still in-flight works? */ 2005 if (pwq->nr_in_flight[color]) 1153 if (pwq->nr_in_flight[color]) 2006 goto out_put; 1154 goto out_put; 2007 1155 2008 /* this pwq is done, clear flush_colo 1156 /* this pwq is done, clear flush_color */ 2009 pwq->flush_color = -1; 1157 pwq->flush_color = -1; 2010 1158 2011 /* 1159 /* 2012 * If this was the last pwq, wake up 1160 * If this was the last pwq, wake up the first flusher. It 2013 * will handle the rest. 1161 * will handle the rest. 2014 */ 1162 */ 2015 if (atomic_dec_and_test(&pwq->wq->nr_ 1163 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 2016 complete(&pwq->wq->first_flus 1164 complete(&pwq->wq->first_flusher->done); 2017 out_put: 1165 out_put: 2018 put_pwq(pwq); 1166 put_pwq(pwq); 2019 } 1167 } 2020 1168 2021 /** 1169 /** 2022 * try_to_grab_pending - steal work item from 1170 * try_to_grab_pending - steal work item from worklist and disable irq 2023 * @work: work item to steal 1171 * @work: work item to steal 2024 * @cflags: %WORK_CANCEL_ flags !! 1172 * @is_dwork: @work is a delayed_work 2025 * @irq_flags: place to store irq state !! 1173 * @flags: place to store irq state 2026 * 1174 * 2027 * Try to grab PENDING bit of @work. This fu 1175 * Try to grab PENDING bit of @work. This function can handle @work in any 2028 * stable state - idle, on timer or on workli !! 1176 * stable state - idle, on timer or on worklist. Return values are 2029 * 1177 * 2030 * Return: << 2031 * << 2032 * ======== ============================= << 2033 * 1 if @work was pending and we s 1178 * 1 if @work was pending and we successfully stole PENDING 2034 * 0 if @work was idle and we clai 1179 * 0 if @work was idle and we claimed PENDING 2035 * -EAGAIN if PENDING couldn't be grabbe 1180 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry 2036 * ======== ============================= !! 1181 * -ENOENT if someone else is canceling @work, this state may persist >> 1182 * for arbitrarily long 2037 * 1183 * 2038 * Note: << 2039 * On >= 0 return, the caller owns @work's PE 1184 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 2040 * interrupted while holding PENDING and @wor 1185 * interrupted while holding PENDING and @work off queue, irq must be 2041 * disabled on entry. This, combined with de 1186 * disabled on entry. This, combined with delayed_work->timer being 2042 * irqsafe, ensures that we return -EAGAIN fo 1187 * irqsafe, ensures that we return -EAGAIN for finite short period of time. 2043 * 1188 * 2044 * On successful return, >= 0, irq is disable 1189 * On successful return, >= 0, irq is disabled and the caller is 2045 * responsible for releasing it using local_i !! 1190 * responsible for releasing it using local_irq_restore(*@flags). 2046 * 1191 * 2047 * This function is safe to call from any con 1192 * This function is safe to call from any context including IRQ handler. 2048 */ 1193 */ 2049 static int try_to_grab_pending(struct work_st !! 1194 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, 2050 unsigned long !! 1195 unsigned long *flags) 2051 { 1196 { 2052 struct worker_pool *pool; 1197 struct worker_pool *pool; 2053 struct pool_workqueue *pwq; 1198 struct pool_workqueue *pwq; 2054 1199 2055 local_irq_save(*irq_flags); !! 1200 local_irq_save(*flags); 2056 1201 2057 /* try to steal the timer if it exist 1202 /* try to steal the timer if it exists */ 2058 if (cflags & WORK_CANCEL_DELAYED) { !! 1203 if (is_dwork) { 2059 struct delayed_work *dwork = 1204 struct delayed_work *dwork = to_delayed_work(work); 2060 1205 2061 /* 1206 /* 2062 * dwork->timer is irqsafe. 1207 * dwork->timer is irqsafe. If del_timer() fails, it's 2063 * guaranteed that the timer 1208 * guaranteed that the timer is not queued anywhere and not 2064 * running on the local CPU. 1209 * running on the local CPU. 2065 */ 1210 */ 2066 if (likely(del_timer(&dwork-> 1211 if (likely(del_timer(&dwork->timer))) 2067 return 1; 1212 return 1; 2068 } 1213 } 2069 1214 2070 /* try to claim PENDING the normal wa 1215 /* try to claim PENDING the normal way */ 2071 if (!test_and_set_bit(WORK_STRUCT_PEN 1216 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 2072 return 0; 1217 return 0; 2073 1218 2074 rcu_read_lock(); << 2075 /* 1219 /* 2076 * The queueing is in progress, or it 1220 * The queueing is in progress, or it is already queued. Try to 2077 * steal it from ->worklist without c 1221 * steal it from ->worklist without clearing WORK_STRUCT_PENDING. 2078 */ 1222 */ 2079 pool = get_work_pool(work); 1223 pool = get_work_pool(work); 2080 if (!pool) 1224 if (!pool) 2081 goto fail; 1225 goto fail; 2082 1226 2083 raw_spin_lock(&pool->lock); !! 1227 spin_lock(&pool->lock); 2084 /* 1228 /* 2085 * work->data is guaranteed to point 1229 * work->data is guaranteed to point to pwq only while the work 2086 * item is queued on pwq->wq, and bot 1230 * item is queued on pwq->wq, and both updating work->data to point 2087 * to pwq on queueing and to pool on 1231 * to pwq on queueing and to pool on dequeueing are done under 2088 * pwq->pool->lock. This in turn gua 1232 * pwq->pool->lock. This in turn guarantees that, if work->data 2089 * points to pwq which is associated 1233 * points to pwq which is associated with a locked pool, the work 2090 * item is currently queued on that p 1234 * item is currently queued on that pool. 2091 */ 1235 */ 2092 pwq = get_work_pwq(work); 1236 pwq = get_work_pwq(work); 2093 if (pwq && pwq->pool == pool) { 1237 if (pwq && pwq->pool == pool) { 2094 unsigned long work_data = *wo << 2095 << 2096 debug_work_deactivate(work); 1238 debug_work_deactivate(work); 2097 1239 2098 /* 1240 /* 2099 * A cancelable inactive work !! 1241 * A delayed work item cannot be grabbed directly because 2100 * pwq->inactive_works since !! 1242 * it might have linked NO_COLOR work items which, if left 2101 * canceled (see the comments !! 1243 * on the delayed_list, will confuse pwq->nr_active 2102 * !! 1244 * management later on and cause stall. Make sure the work 2103 * An inactive work item cann !! 1245 * item is activated before grabbing. 2104 * it might have linked barri << 2105 * on the inactive_works list << 2106 * management later on and ca << 2107 * barrier work items to the << 2108 * item. Also keep WORK_STRUC << 2109 * it doesn't participate in << 2110 * pwq_dec_nr_in_flight(). << 2111 */ 1246 */ 2112 if (work_data & WORK_STRUCT_I !! 1247 if (*work_data_bits(work) & WORK_STRUCT_DELAYED) 2113 move_linked_works(wor !! 1248 pwq_activate_delayed_work(work); 2114 1249 2115 list_del_init(&work->entry); 1250 list_del_init(&work->entry); >> 1251 pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work)); 2116 1252 2117 /* !! 1253 /* work->data points to pwq iff queued, point to pool */ 2118 * work->data points to pwq i !! 1254 set_work_pool_and_keep_pending(work, pool->id); 2119 * this destroys work->data n << 2120 */ << 2121 set_work_pool_and_keep_pendin << 2122 << 2123 << 2124 /* must be the last step, see << 2125 pwq_dec_nr_in_flight(pwq, wor << 2126 1255 2127 raw_spin_unlock(&pool->lock); !! 1256 spin_unlock(&pool->lock); 2128 rcu_read_unlock(); << 2129 return 1; 1257 return 1; 2130 } 1258 } 2131 raw_spin_unlock(&pool->lock); !! 1259 spin_unlock(&pool->lock); 2132 fail: 1260 fail: 2133 rcu_read_unlock(); !! 1261 local_irq_restore(*flags); 2134 local_irq_restore(*irq_flags); !! 1262 if (work_is_canceling(work)) >> 1263 return -ENOENT; >> 1264 cpu_relax(); 2135 return -EAGAIN; 1265 return -EAGAIN; 2136 } 1266 } 2137 1267 2138 /** 1268 /** 2139 * work_grab_pending - steal work item from w << 2140 * @work: work item to steal << 2141 * @cflags: %WORK_CANCEL_ flags << 2142 * @irq_flags: place to store IRQ state << 2143 * << 2144 * Grab PENDING bit of @work. @work can be in << 2145 * or on worklist. << 2146 * << 2147 * Can be called from any context. IRQ is dis << 2148 * stored in *@irq_flags. The caller is respo << 2149 * local_irq_restore(). << 2150 * << 2151 * Returns %true if @work was pending. %false << 2152 */ << 2153 static bool work_grab_pending(struct work_str << 2154 unsigned long * << 2155 { << 2156 int ret; << 2157 << 2158 while (true) { << 2159 ret = try_to_grab_pending(wor << 2160 if (ret >= 0) << 2161 return ret; << 2162 cpu_relax(); << 2163 } << 2164 } << 2165 << 2166 /** << 2167 * insert_work - insert a work into a pool 1269 * insert_work - insert a work into a pool 2168 * @pwq: pwq @work belongs to 1270 * @pwq: pwq @work belongs to 2169 * @work: work to insert 1271 * @work: work to insert 2170 * @head: insertion point 1272 * @head: insertion point 2171 * @extra_flags: extra WORK_STRUCT_* flags to 1273 * @extra_flags: extra WORK_STRUCT_* flags to set 2172 * 1274 * 2173 * Insert @work which belongs to @pwq after @ 1275 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 2174 * work_struct flags. 1276 * work_struct flags. 2175 * 1277 * 2176 * CONTEXT: 1278 * CONTEXT: 2177 * raw_spin_lock_irq(pool->lock). !! 1279 * spin_lock_irq(pool->lock). 2178 */ 1280 */ 2179 static void insert_work(struct pool_workqueue 1281 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, 2180 struct list_head *hea 1282 struct list_head *head, unsigned int extra_flags) 2181 { 1283 { 2182 debug_work_activate(work); !! 1284 struct worker_pool *pool = pwq->pool; 2183 << 2184 /* record the work call stack in orde << 2185 kasan_record_aux_stack_noalloc(work); << 2186 1285 2187 /* we own @work, set data and link */ 1286 /* we own @work, set data and link */ 2188 set_work_pwq(work, pwq, extra_flags); 1287 set_work_pwq(work, pwq, extra_flags); 2189 list_add_tail(&work->entry, head); 1288 list_add_tail(&work->entry, head); 2190 get_pwq(pwq); 1289 get_pwq(pwq); >> 1290 >> 1291 /* >> 1292 * Ensure either wq_worker_sleeping() sees the above >> 1293 * list_add_tail() or we see zero nr_running to avoid workers lying >> 1294 * around lazily while there are works to be processed. >> 1295 */ >> 1296 smp_mb(); >> 1297 >> 1298 if (__need_more_worker(pool)) >> 1299 wake_up_worker(pool); 2191 } 1300 } 2192 1301 2193 /* 1302 /* 2194 * Test whether @work is being queued from an 1303 * Test whether @work is being queued from another work executing on the 2195 * same workqueue. 1304 * same workqueue. 2196 */ 1305 */ 2197 static bool is_chained_work(struct workqueue_ 1306 static bool is_chained_work(struct workqueue_struct *wq) 2198 { 1307 { 2199 struct worker *worker; 1308 struct worker *worker; 2200 1309 2201 worker = current_wq_worker(); 1310 worker = current_wq_worker(); 2202 /* 1311 /* 2203 * Return %true iff I'm a worker exec !! 1312 * Return %true iff I'm a worker execuing a work item on @wq. If 2204 * I'm @worker, it's safe to derefere 1313 * I'm @worker, it's safe to dereference it without locking. 2205 */ 1314 */ 2206 return worker && worker->current_pwq- 1315 return worker && worker->current_pwq->wq == wq; 2207 } 1316 } 2208 1317 2209 /* << 2210 * When queueing an unbound work item to a wq << 2211 * by wq_unbound_cpumask. Otherwise, round r << 2212 * avoid perturbing sensitive tasks. << 2213 */ << 2214 static int wq_select_unbound_cpu(int cpu) << 2215 { << 2216 int new_cpu; << 2217 << 2218 if (likely(!wq_debug_force_rr_cpu)) { << 2219 if (cpumask_test_cpu(cpu, wq_ << 2220 return cpu; << 2221 } else { << 2222 pr_warn_once("workqueue: roun << 2223 } << 2224 << 2225 new_cpu = __this_cpu_read(wq_rr_cpu_l << 2226 new_cpu = cpumask_next_and(new_cpu, w << 2227 if (unlikely(new_cpu >= nr_cpu_ids)) << 2228 new_cpu = cpumask_first_and(w << 2229 if (unlikely(new_cpu >= nr_cp << 2230 return cpu; << 2231 } << 2232 __this_cpu_write(wq_rr_cpu_last, new_ << 2233 << 2234 return new_cpu; << 2235 } << 2236 << 2237 static void __queue_work(int cpu, struct work 1318 static void __queue_work(int cpu, struct workqueue_struct *wq, 2238 struct work_struct * 1319 struct work_struct *work) 2239 { 1320 { 2240 struct pool_workqueue *pwq; 1321 struct pool_workqueue *pwq; 2241 struct worker_pool *last_pool, *pool; !! 1322 struct worker_pool *last_pool; >> 1323 struct list_head *worklist; 2242 unsigned int work_flags; 1324 unsigned int work_flags; 2243 unsigned int req_cpu = cpu; 1325 unsigned int req_cpu = cpu; 2244 1326 2245 /* 1327 /* 2246 * While a work item is PENDING && of 1328 * While a work item is PENDING && off queue, a task trying to 2247 * steal the PENDING will busy-loop w 1329 * steal the PENDING will busy-loop waiting for it to either get 2248 * queued or lose PENDING. Grabbing 1330 * queued or lose PENDING. Grabbing PENDING and queueing should 2249 * happen with IRQ disabled. 1331 * happen with IRQ disabled. 2250 */ 1332 */ 2251 lockdep_assert_irqs_disabled(); !! 1333 WARN_ON_ONCE(!irqs_disabled()); 2252 1334 2253 /* !! 1335 debug_work_activate(work); 2254 * For a draining wq, only works from !! 1336 2255 * allowed. The __WQ_DESTROYING helps !! 1337 /* if dying, only works from the same workqueue are allowed */ 2256 * queues a new work item to a wq aft !! 1338 if (unlikely(wq->flags & __WQ_DRAINING) && 2257 */ !! 1339 WARN_ON_ONCE(!is_chained_work(wq))) 2258 if (unlikely(wq->flags & (__WQ_DESTRO << 2259 WARN_ON_ONCE(!is_chained << 2260 return; 1340 return; 2261 rcu_read_lock(); << 2262 retry: 1341 retry: 2263 /* pwq which will be used unless @wor !! 1342 if (req_cpu == WORK_CPU_UNBOUND) 2264 if (req_cpu == WORK_CPU_UNBOUND) { !! 1343 cpu = raw_smp_processor_id(); 2265 if (wq->flags & WQ_UNBOUND) << 2266 cpu = wq_select_unbou << 2267 else << 2268 cpu = raw_smp_process << 2269 } << 2270 1344 2271 pwq = rcu_dereference(*per_cpu_ptr(wq !! 1345 /* pwq which will be used unless @work is executing elsewhere */ 2272 pool = pwq->pool; !! 1346 if (!(wq->flags & WQ_UNBOUND)) >> 1347 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); >> 1348 else >> 1349 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); 2273 1350 2274 /* 1351 /* 2275 * If @work was previously on a diffe 1352 * If @work was previously on a different pool, it might still be 2276 * running there, in which case the w 1353 * running there, in which case the work needs to be queued on that 2277 * pool to guarantee non-reentrancy. 1354 * pool to guarantee non-reentrancy. 2278 * << 2279 * For ordered workqueue, work items << 2280 * for accurate order management. Gu << 2281 * non-reentrancy. See the comments << 2282 */ 1355 */ 2283 last_pool = get_work_pool(work); 1356 last_pool = get_work_pool(work); 2284 if (last_pool && last_pool != pool && !! 1357 if (last_pool && last_pool != pwq->pool) { 2285 struct worker *worker; 1358 struct worker *worker; 2286 1359 2287 raw_spin_lock(&last_pool->loc !! 1360 spin_lock(&last_pool->lock); 2288 1361 2289 worker = find_worker_executin 1362 worker = find_worker_executing_work(last_pool, work); 2290 1363 2291 if (worker && worker->current 1364 if (worker && worker->current_pwq->wq == wq) { 2292 pwq = worker->current 1365 pwq = worker->current_pwq; 2293 pool = pwq->pool; << 2294 WARN_ON_ONCE(pool != << 2295 } else { 1366 } else { 2296 /* meh... not running 1367 /* meh... not running there, queue here */ 2297 raw_spin_unlock(&last !! 1368 spin_unlock(&last_pool->lock); 2298 raw_spin_lock(&pool-> !! 1369 spin_lock(&pwq->pool->lock); 2299 } 1370 } 2300 } else { 1371 } else { 2301 raw_spin_lock(&pool->lock); !! 1372 spin_lock(&pwq->pool->lock); 2302 } 1373 } 2303 1374 2304 /* 1375 /* 2305 * pwq is determined and locked. For !! 1376 * pwq is determined and locked. For unbound pools, we could have 2306 * with pwq release and it could alre !! 1377 * raced with pwq release and it could already be dead. If its 2307 * repeat pwq selection. Note that un !! 1378 * refcnt is zero, repeat pwq selection. Note that pwqs never die 2308 * another pwq replacing it in cpu_pw !! 1379 * without another pwq replacing it in the numa_pwq_tbl or while 2309 * on it, so the retrying is guarante !! 1380 * work items are executing on it, so the retrying is guaranteed to >> 1381 * make forward-progress. 2310 */ 1382 */ 2311 if (unlikely(!pwq->refcnt)) { 1383 if (unlikely(!pwq->refcnt)) { 2312 if (wq->flags & WQ_UNBOUND) { 1384 if (wq->flags & WQ_UNBOUND) { 2313 raw_spin_unlock(&pool !! 1385 spin_unlock(&pwq->pool->lock); 2314 cpu_relax(); 1386 cpu_relax(); 2315 goto retry; 1387 goto retry; 2316 } 1388 } 2317 /* oops */ 1389 /* oops */ 2318 WARN_ONCE(true, "workqueue: p 1390 WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 2319 wq->name, cpu); 1391 wq->name, cpu); 2320 } 1392 } 2321 1393 2322 /* pwq determined, queue */ 1394 /* pwq determined, queue */ 2323 trace_workqueue_queue_work(req_cpu, p 1395 trace_workqueue_queue_work(req_cpu, pwq, work); 2324 1396 2325 if (WARN_ON(!list_empty(&work->entry) !! 1397 if (WARN_ON(!list_empty(&work->entry))) { 2326 goto out; !! 1398 spin_unlock(&pwq->pool->lock); >> 1399 return; >> 1400 } 2327 1401 2328 pwq->nr_in_flight[pwq->work_color]++; 1402 pwq->nr_in_flight[pwq->work_color]++; 2329 work_flags = work_color_to_flags(pwq- 1403 work_flags = work_color_to_flags(pwq->work_color); 2330 1404 2331 /* !! 1405 if (likely(pwq->nr_active < pwq->max_active)) { 2332 * Limit the number of concurrently a << 2333 * @work must also queue behind exist << 2334 * ordering when max_active changes. << 2335 */ << 2336 if (list_empty(&pwq->inactive_works) << 2337 if (list_empty(&pool->worklis << 2338 pool->watchdog_ts = j << 2339 << 2340 trace_workqueue_activate_work 1406 trace_workqueue_activate_work(work); 2341 insert_work(pwq, work, &pool- !! 1407 pwq->nr_active++; 2342 kick_pool(pool); !! 1408 worklist = &pwq->pool->worklist; 2343 } else { 1409 } else { 2344 work_flags |= WORK_STRUCT_INA !! 1410 work_flags |= WORK_STRUCT_DELAYED; 2345 insert_work(pwq, work, &pwq-> !! 1411 worklist = &pwq->delayed_works; 2346 } 1412 } 2347 1413 2348 out: !! 1414 insert_work(pwq, work, worklist, work_flags); 2349 raw_spin_unlock(&pool->lock); << 2350 rcu_read_unlock(); << 2351 } << 2352 << 2353 static bool clear_pending_if_disabled(struct << 2354 { << 2355 unsigned long data = *work_data_bits( << 2356 struct work_offq_data offqd; << 2357 1415 2358 if (likely((data & WORK_STRUCT_PWQ) | !! 1416 spin_unlock(&pwq->pool->lock); 2359 !(data & WORK_OFFQ_DISABLE << 2360 return false; << 2361 << 2362 work_offqd_unpack(&offqd, data); << 2363 set_work_pool_and_clear_pending(work, << 2364 work_ << 2365 return true; << 2366 } 1417 } 2367 1418 2368 /** 1419 /** 2369 * queue_work_on - queue work on specific cpu 1420 * queue_work_on - queue work on specific cpu 2370 * @cpu: CPU number to execute work on 1421 * @cpu: CPU number to execute work on 2371 * @wq: workqueue to use 1422 * @wq: workqueue to use 2372 * @work: work to queue 1423 * @work: work to queue 2373 * 1424 * 2374 * We queue the work to a specific CPU, the c !! 1425 * Returns %false if @work was already on a queue, %true otherwise. 2375 * can't go away. Callers that fail to ensur << 2376 * CPU cannot go away will execute on a rando << 2377 * But note well that callers specifying a CP << 2378 * online will get a splat. << 2379 * 1426 * 2380 * Return: %false if @work was already on a q !! 1427 * We queue the work to a specific CPU, the caller must ensure it >> 1428 * can't go away. 2381 */ 1429 */ 2382 bool queue_work_on(int cpu, struct workqueue_ 1430 bool queue_work_on(int cpu, struct workqueue_struct *wq, 2383 struct work_struct *work) 1431 struct work_struct *work) 2384 { 1432 { 2385 bool ret = false; 1433 bool ret = false; 2386 unsigned long irq_flags; !! 1434 unsigned long flags; 2387 1435 2388 local_irq_save(irq_flags); !! 1436 local_irq_save(flags); 2389 1437 2390 if (!test_and_set_bit(WORK_STRUCT_PEN !! 1438 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2391 !clear_pending_if_disabled(work)) << 2392 __queue_work(cpu, wq, work); 1439 __queue_work(cpu, wq, work); 2393 ret = true; 1440 ret = true; 2394 } 1441 } 2395 1442 2396 local_irq_restore(irq_flags); !! 1443 local_irq_restore(flags); 2397 return ret; 1444 return ret; 2398 } 1445 } 2399 EXPORT_SYMBOL(queue_work_on); 1446 EXPORT_SYMBOL(queue_work_on); 2400 1447 2401 /** !! 1448 void delayed_work_timer_fn(unsigned long __data) 2402 * select_numa_node_cpu - Select a CPU based << 2403 * @node: NUMA node ID that we want to select << 2404 * << 2405 * This function will attempt to find a "rand << 2406 * node. If there are no CPUs available on th << 2407 * WORK_CPU_UNBOUND indicating that we should << 2408 * available CPU if we need to schedule this << 2409 */ << 2410 static int select_numa_node_cpu(int node) << 2411 { << 2412 int cpu; << 2413 << 2414 /* Delay binding to CPU if node is no << 2415 if (node < 0 || node >= MAX_NUMNODES << 2416 return WORK_CPU_UNBOUND; << 2417 << 2418 /* Use local node/cpu if we are alrea << 2419 cpu = raw_smp_processor_id(); << 2420 if (node == cpu_to_node(cpu)) << 2421 return cpu; << 2422 << 2423 /* Use "random" otherwise know as "fi << 2424 cpu = cpumask_any_and(cpumask_of_node << 2425 << 2426 /* If CPU is valid return that, other << 2427 return cpu < nr_cpu_ids ? cpu : WORK_ << 2428 } << 2429 << 2430 /** << 2431 * queue_work_node - queue work on a "random" << 2432 * @node: NUMA node that we are targeting the << 2433 * @wq: workqueue to use << 2434 * @work: work to queue << 2435 * << 2436 * We queue the work to a "random" CPU within << 2437 * idea here is to provide a way to somehow a << 2438 * NUMA node. << 2439 * << 2440 * This function will only make a best effort << 2441 * the right NUMA node. If no node is request << 2442 * offline then we just fall back to standard << 2443 * << 2444 * Currently the "random" CPU ends up being t << 2445 * intersection of cpu_online_mask and the cp << 2446 * are running on the node. In that case we j << 2447 * << 2448 * Return: %false if @work was already on a q << 2449 */ << 2450 bool queue_work_node(int node, struct workque << 2451 struct work_struct *work << 2452 { << 2453 unsigned long irq_flags; << 2454 bool ret = false; << 2455 << 2456 /* << 2457 * This current implementation is spe << 2458 * Specifically we only return the fi << 2459 * node instead of cycling through in << 2460 * << 2461 * If this is used with a per-cpu wor << 2462 * workqueue_select_cpu_near would ne << 2463 * some round robin type logic. << 2464 */ << 2465 WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND << 2466 << 2467 local_irq_save(irq_flags); << 2468 << 2469 if (!test_and_set_bit(WORK_STRUCT_PEN << 2470 !clear_pending_if_disabled(work)) << 2471 int cpu = select_numa_node_cp << 2472 << 2473 __queue_work(cpu, wq, work); << 2474 ret = true; << 2475 } << 2476 << 2477 local_irq_restore(irq_flags); << 2478 return ret; << 2479 } << 2480 EXPORT_SYMBOL_GPL(queue_work_node); << 2481 << 2482 void delayed_work_timer_fn(struct timer_list << 2483 { 1449 { 2484 struct delayed_work *dwork = from_tim !! 1450 struct delayed_work *dwork = (struct delayed_work *)__data; 2485 1451 2486 /* should have been called from irqsa 1452 /* should have been called from irqsafe timer with irq already off */ 2487 __queue_work(dwork->cpu, dwork->wq, & 1453 __queue_work(dwork->cpu, dwork->wq, &dwork->work); 2488 } 1454 } 2489 EXPORT_SYMBOL(delayed_work_timer_fn); 1455 EXPORT_SYMBOL(delayed_work_timer_fn); 2490 1456 2491 static void __queue_delayed_work(int cpu, str 1457 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, 2492 struct delaye 1458 struct delayed_work *dwork, unsigned long delay) 2493 { 1459 { 2494 struct timer_list *timer = &dwork->ti 1460 struct timer_list *timer = &dwork->timer; 2495 struct work_struct *work = &dwork->wo 1461 struct work_struct *work = &dwork->work; 2496 1462 2497 WARN_ON_ONCE(!wq); !! 1463 WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 2498 WARN_ON_ONCE(timer->function != delay !! 1464 timer->data != (unsigned long)dwork); 2499 WARN_ON_ONCE(timer_pending(timer)); 1465 WARN_ON_ONCE(timer_pending(timer)); 2500 WARN_ON_ONCE(!list_empty(&work->entry 1466 WARN_ON_ONCE(!list_empty(&work->entry)); 2501 1467 2502 /* 1468 /* 2503 * If @delay is 0, queue @dwork->work 1469 * If @delay is 0, queue @dwork->work immediately. This is for 2504 * both optimization and correctness. 1470 * both optimization and correctness. The earliest @timer can 2505 * expire is on the closest next tick 1471 * expire is on the closest next tick and delayed_work users depend 2506 * on that there's no such delay when 1472 * on that there's no such delay when @delay is 0. 2507 */ 1473 */ 2508 if (!delay) { 1474 if (!delay) { 2509 __queue_work(cpu, wq, &dwork- 1475 __queue_work(cpu, wq, &dwork->work); 2510 return; 1476 return; 2511 } 1477 } 2512 1478 >> 1479 timer_stats_timer_set_start_info(&dwork->timer); >> 1480 2513 dwork->wq = wq; 1481 dwork->wq = wq; 2514 dwork->cpu = cpu; 1482 dwork->cpu = cpu; 2515 timer->expires = jiffies + delay; 1483 timer->expires = jiffies + delay; 2516 1484 2517 if (housekeeping_enabled(HK_TYPE_TIME !! 1485 if (unlikely(cpu != WORK_CPU_UNBOUND)) 2518 /* If the current cpu is a ho << 2519 cpu = smp_processor_id(); << 2520 if (!housekeeping_test_cpu(cp << 2521 cpu = housekeeping_an << 2522 add_timer_on(timer, cpu); 1486 add_timer_on(timer, cpu); 2523 } else { !! 1487 else 2524 if (likely(cpu == WORK_CPU_UN !! 1488 add_timer(timer); 2525 add_timer_global(time << 2526 else << 2527 add_timer_on(timer, c << 2528 } << 2529 } 1489 } 2530 1490 2531 /** 1491 /** 2532 * queue_delayed_work_on - queue work on spec 1492 * queue_delayed_work_on - queue work on specific CPU after delay 2533 * @cpu: CPU number to execute work on 1493 * @cpu: CPU number to execute work on 2534 * @wq: workqueue to use 1494 * @wq: workqueue to use 2535 * @dwork: work to queue 1495 * @dwork: work to queue 2536 * @delay: number of jiffies to wait before q 1496 * @delay: number of jiffies to wait before queueing 2537 * 1497 * 2538 * Return: %false if @work was already on a q !! 1498 * Returns %false if @work was already on a queue, %true otherwise. If 2539 * @delay is zero and @dwork is idle, it will 1499 * @delay is zero and @dwork is idle, it will be scheduled for immediate 2540 * execution. 1500 * execution. 2541 */ 1501 */ 2542 bool queue_delayed_work_on(int cpu, struct wo 1502 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 2543 struct delayed_wor 1503 struct delayed_work *dwork, unsigned long delay) 2544 { 1504 { 2545 struct work_struct *work = &dwork->wo 1505 struct work_struct *work = &dwork->work; 2546 bool ret = false; 1506 bool ret = false; 2547 unsigned long irq_flags; !! 1507 unsigned long flags; 2548 1508 2549 /* read the comment in __queue_work() 1509 /* read the comment in __queue_work() */ 2550 local_irq_save(irq_flags); !! 1510 local_irq_save(flags); 2551 1511 2552 if (!test_and_set_bit(WORK_STRUCT_PEN !! 1512 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 2553 !clear_pending_if_disabled(work)) << 2554 __queue_delayed_work(cpu, wq, 1513 __queue_delayed_work(cpu, wq, dwork, delay); 2555 ret = true; 1514 ret = true; 2556 } 1515 } 2557 1516 2558 local_irq_restore(irq_flags); !! 1517 local_irq_restore(flags); 2559 return ret; 1518 return ret; 2560 } 1519 } 2561 EXPORT_SYMBOL(queue_delayed_work_on); 1520 EXPORT_SYMBOL(queue_delayed_work_on); 2562 1521 2563 /** 1522 /** 2564 * mod_delayed_work_on - modify delay of or q 1523 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 2565 * @cpu: CPU number to execute work on 1524 * @cpu: CPU number to execute work on 2566 * @wq: workqueue to use 1525 * @wq: workqueue to use 2567 * @dwork: work to queue 1526 * @dwork: work to queue 2568 * @delay: number of jiffies to wait before q 1527 * @delay: number of jiffies to wait before queueing 2569 * 1528 * 2570 * If @dwork is idle, equivalent to queue_del 1529 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise, 2571 * modify @dwork's timer so that it expires a 1530 * modify @dwork's timer so that it expires after @delay. If @delay is 2572 * zero, @work is guaranteed to be scheduled 1531 * zero, @work is guaranteed to be scheduled immediately regardless of its 2573 * current state. 1532 * current state. 2574 * 1533 * 2575 * Return: %false if @dwork was idle and queu !! 1534 * Returns %false if @dwork was idle and queued, %true if @dwork was 2576 * pending and its timer was modified. 1535 * pending and its timer was modified. 2577 * 1536 * 2578 * This function is safe to call from any con 1537 * This function is safe to call from any context including IRQ handler. 2579 * See try_to_grab_pending() for details. 1538 * See try_to_grab_pending() for details. 2580 */ 1539 */ 2581 bool mod_delayed_work_on(int cpu, struct work 1540 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 2582 struct delayed_work 1541 struct delayed_work *dwork, unsigned long delay) 2583 { 1542 { 2584 unsigned long irq_flags; !! 1543 unsigned long flags; 2585 bool ret; !! 1544 int ret; 2586 1545 2587 ret = work_grab_pending(&dwork->work, !! 1546 do { >> 1547 ret = try_to_grab_pending(&dwork->work, true, &flags); >> 1548 } while (unlikely(ret == -EAGAIN)); 2588 1549 2589 if (!clear_pending_if_disabled(&dwork !! 1550 if (likely(ret >= 0)) { 2590 __queue_delayed_work(cpu, wq, 1551 __queue_delayed_work(cpu, wq, dwork, delay); >> 1552 local_irq_restore(flags); >> 1553 } 2591 1554 2592 local_irq_restore(irq_flags); !! 1555 /* -ENOENT from try_to_grab_pending() becomes %true */ 2593 return ret; 1556 return ret; 2594 } 1557 } 2595 EXPORT_SYMBOL_GPL(mod_delayed_work_on); 1558 EXPORT_SYMBOL_GPL(mod_delayed_work_on); 2596 1559 2597 static void rcu_work_rcufn(struct rcu_head *r << 2598 { << 2599 struct rcu_work *rwork = container_of << 2600 << 2601 /* read the comment in __queue_work() << 2602 local_irq_disable(); << 2603 __queue_work(WORK_CPU_UNBOUND, rwork- << 2604 local_irq_enable(); << 2605 } << 2606 << 2607 /** 1560 /** 2608 * queue_rcu_work - queue work after a RCU gr !! 1561 * worker_enter_idle - enter idle state 2609 * @wq: workqueue to use !! 1562 * @worker: worker which is entering idle state 2610 * @rwork: work to queue << 2611 * 1563 * 2612 * Return: %false if @rwork was already pendi !! 1564 * @worker is entering idle state. Update stats and idle timer if 2613 * that a full RCU grace period is guaranteed !! 1565 * necessary. 2614 * While @rwork is guaranteed to be executed !! 1566 * 2615 * execution may happen before a full RCU gra !! 1567 * LOCKING: >> 1568 * spin_lock_irq(pool->lock). 2616 */ 1569 */ 2617 bool queue_rcu_work(struct workqueue_struct * !! 1570 static void worker_enter_idle(struct worker *worker) 2618 { 1571 { 2619 struct work_struct *work = &rwork->wo !! 1572 struct worker_pool *pool = worker->pool; 2620 1573 2621 /* !! 1574 if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 2622 * rcu_work can't be canceled or disa !! 1575 WARN_ON_ONCE(!list_empty(&worker->entry) && 2623 * inside @rwork and disabled the inn !! 1576 (worker->hentry.next || worker->hentry.pprev))) 2624 */ !! 1577 return; 2625 if (!test_and_set_bit(WORK_STRUCT_PEN << 2626 !WARN_ON_ONCE(clear_pending_if_di << 2627 rwork->wq = wq; << 2628 call_rcu_hurry(&rwork->rcu, r << 2629 return true; << 2630 } << 2631 1578 2632 return false; !! 1579 /* can't use worker_set_flags(), also called from start_worker() */ 2633 } !! 1580 worker->flags |= WORKER_IDLE; 2634 EXPORT_SYMBOL(queue_rcu_work); !! 1581 pool->nr_idle++; >> 1582 worker->last_active = jiffies; 2635 1583 2636 static struct worker *alloc_worker(int node) !! 1584 /* idle_list is LIFO */ 2637 { !! 1585 list_add(&worker->entry, &pool->idle_list); 2638 struct worker *worker; << 2639 1586 2640 worker = kzalloc_node(sizeof(*worker) !! 1587 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 2641 if (worker) { !! 1588 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 2642 INIT_LIST_HEAD(&worker->entry << 2643 INIT_LIST_HEAD(&worker->sched << 2644 INIT_LIST_HEAD(&worker->node) << 2645 /* on creation a worker is in << 2646 worker->flags = WORKER_PREP; << 2647 } << 2648 return worker; << 2649 } << 2650 1589 2651 static cpumask_t *pool_allowed_cpus(struct wo !! 1590 /* 2652 { !! 1591 * Sanity check nr_running. Because wq_unbind_fn() releases 2653 if (pool->cpu < 0 && pool->attrs->aff !! 1592 * pool->lock between setting %WORKER_UNBOUND and zapping 2654 return pool->attrs->__pod_cpu !! 1593 * nr_running, the warning may trigger spuriously. Check iff 2655 else !! 1594 * unbind is not in progress. 2656 return pool->attrs->cpumask; !! 1595 */ >> 1596 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && >> 1597 pool->nr_workers == pool->nr_idle && >> 1598 atomic_read(&pool->nr_running)); 2657 } 1599 } 2658 1600 2659 /** 1601 /** 2660 * worker_attach_to_pool() - attach a worker !! 1602 * worker_leave_idle - leave idle state 2661 * @worker: worker to be attached !! 1603 * @worker: worker which is leaving idle state 2662 * @pool: the target pool !! 1604 * >> 1605 * @worker is leaving idle state. Update stats. 2663 * 1606 * 2664 * Attach @worker to @pool. Once attached, t !! 1607 * LOCKING: 2665 * cpu-binding of @worker are kept coordinate !! 1608 * spin_lock_irq(pool->lock). 2666 * cpu-[un]hotplugs. << 2667 */ 1609 */ 2668 static void worker_attach_to_pool(struct work !! 1610 static void worker_leave_idle(struct worker *worker) 2669 struct work << 2670 { << 2671 mutex_lock(&wq_pool_attach_mutex); << 2672 << 2673 /* << 2674 * The wq_pool_attach_mutex ensures % << 2675 * across this function. See the comm << 2676 * details. BH workers are, while per << 2677 */ << 2678 if (pool->flags & POOL_DISASSOCIATED) << 2679 worker->flags |= WORKER_UNBOU << 2680 } else { << 2681 WARN_ON_ONCE(pool->flags & PO << 2682 kthread_set_per_cpu(worker->t << 2683 } << 2684 << 2685 if (worker->rescue_wq) << 2686 set_cpus_allowed_ptr(worker-> << 2687 << 2688 list_add_tail(&worker->node, &pool->w << 2689 worker->pool = pool; << 2690 << 2691 mutex_unlock(&wq_pool_attach_mutex); << 2692 } << 2693 << 2694 static void unbind_worker(struct worker *work << 2695 { << 2696 lockdep_assert_held(&wq_pool_attach_m << 2697 << 2698 kthread_set_per_cpu(worker->task, -1) << 2699 if (cpumask_intersects(wq_unbound_cpu << 2700 WARN_ON_ONCE(set_cpus_allowed << 2701 else << 2702 WARN_ON_ONCE(set_cpus_allowed << 2703 } << 2704 << 2705 << 2706 static void detach_worker(struct worker *work << 2707 { 1611 { 2708 lockdep_assert_held(&wq_pool_attach_m !! 1612 struct worker_pool *pool = worker->pool; 2709 1613 2710 unbind_worker(worker); !! 1614 if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 2711 list_del(&worker->node); !! 1615 return; >> 1616 worker_clr_flags(worker, WORKER_IDLE); >> 1617 pool->nr_idle--; >> 1618 list_del_init(&worker->entry); 2712 } 1619 } 2713 1620 2714 /** 1621 /** 2715 * worker_detach_from_pool() - detach a worke !! 1622 * worker_maybe_bind_and_lock - try to bind %current to worker_pool and lock it 2716 * @worker: worker which is attached to its p !! 1623 * @pool: target worker_pool 2717 * 1624 * 2718 * Undo the attaching which had been done in !! 1625 * Bind %current to the cpu of @pool if it is associated and lock @pool. 2719 * caller worker shouldn't access to the pool !! 1626 * 2720 * other reference to the pool. !! 1627 * Works which are scheduled while the cpu is online must at least be >> 1628 * scheduled to a worker which is bound to the cpu so that if they are >> 1629 * flushed from cpu callbacks while cpu is going down, they are >> 1630 * guaranteed to execute on the cpu. >> 1631 * >> 1632 * This function is to be used by unbound workers and rescuers to bind >> 1633 * themselves to the target cpu and may race with cpu going down or >> 1634 * coming online. kthread_bind() can't be used because it may put the >> 1635 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used >> 1636 * verbatim as it's best effort and blocking and pool may be >> 1637 * [dis]associated in the meantime. >> 1638 * >> 1639 * This function tries set_cpus_allowed() and locks pool and verifies the >> 1640 * binding against %POOL_DISASSOCIATED which is set during >> 1641 * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker >> 1642 * enters idle state or fetches works without dropping lock, it can >> 1643 * guarantee the scheduling requirement described in the first paragraph. >> 1644 * >> 1645 * CONTEXT: >> 1646 * Might sleep. Called without any lock but returns with pool->lock >> 1647 * held. >> 1648 * >> 1649 * RETURNS: >> 1650 * %true if the associated pool is online (@worker is successfully >> 1651 * bound), %false if offline. 2721 */ 1652 */ 2722 static void worker_detach_from_pool(struct wo !! 1653 static bool worker_maybe_bind_and_lock(struct worker_pool *pool) >> 1654 __acquires(&pool->lock) 2723 { 1655 { 2724 struct worker_pool *pool = worker->po !! 1656 while (true) { 2725 !! 1657 /* 2726 /* there is one permanent BH worker p !! 1658 * The following call may fail, succeed or succeed 2727 WARN_ON_ONCE(pool->flags & POOL_BH); !! 1659 * without actually migrating the task to the cpu if >> 1660 * it races with cpu hotunplug operation. Verify >> 1661 * against POOL_DISASSOCIATED. >> 1662 */ >> 1663 if (!(pool->flags & POOL_DISASSOCIATED)) >> 1664 set_cpus_allowed_ptr(current, pool->attrs->cpumask); 2728 1665 2729 mutex_lock(&wq_pool_attach_mutex); !! 1666 spin_lock_irq(&pool->lock); 2730 detach_worker(worker); !! 1667 if (pool->flags & POOL_DISASSOCIATED) 2731 worker->pool = NULL; !! 1668 return false; 2732 mutex_unlock(&wq_pool_attach_mutex); !! 1669 if (task_cpu(current) == pool->cpu && >> 1670 cpumask_equal(¤t->cpus_allowed, pool->attrs->cpumask)) >> 1671 return true; >> 1672 spin_unlock_irq(&pool->lock); 2733 1673 2734 /* clear leftover flags without pool- !! 1674 /* 2735 worker->flags &= ~(WORKER_UNBOUND | W !! 1675 * We've raced with CPU hot[un]plug. Give it a breather >> 1676 * and retry migration. cond_resched() is required here; >> 1677 * otherwise, we might deadlock against cpu_stop trying to >> 1678 * bring down the CPU on non-preemptive kernel. >> 1679 */ >> 1680 cpu_relax(); >> 1681 cond_resched(); >> 1682 } 2736 } 1683 } 2737 1684 2738 static int format_worker_id(char *buf, size_t !! 1685 static struct worker *alloc_worker(void) 2739 struct worker_poo << 2740 { 1686 { 2741 if (worker->rescue_wq) !! 1687 struct worker *worker; 2742 return scnprintf(buf, size, " << 2743 worker->resc << 2744 1688 2745 if (pool) { !! 1689 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 2746 if (pool->cpu >= 0) !! 1690 if (worker) { 2747 return scnprintf(buf, !! 1691 INIT_LIST_HEAD(&worker->entry); 2748 pool !! 1692 INIT_LIST_HEAD(&worker->scheduled); 2749 pool !! 1693 /* on creation a worker is in !idle && prep state */ 2750 else !! 1694 worker->flags = WORKER_PREP; 2751 return scnprintf(buf, << 2752 pool << 2753 } else { << 2754 return scnprintf(buf, size, " << 2755 } 1695 } >> 1696 return worker; 2756 } 1697 } 2757 1698 2758 /** 1699 /** 2759 * create_worker - create a new workqueue wor 1700 * create_worker - create a new workqueue worker 2760 * @pool: pool the new worker will belong to 1701 * @pool: pool the new worker will belong to 2761 * 1702 * 2762 * Create and start a new worker which is att !! 1703 * Create a new worker which is bound to @pool. The returned worker >> 1704 * can be started by calling start_worker() or destroyed using >> 1705 * destroy_worker(). 2763 * 1706 * 2764 * CONTEXT: 1707 * CONTEXT: 2765 * Might sleep. Does GFP_KERNEL allocations. 1708 * Might sleep. Does GFP_KERNEL allocations. 2766 * 1709 * 2767 * Return: !! 1710 * RETURNS: 2768 * Pointer to the newly created worker. 1711 * Pointer to the newly created worker. 2769 */ 1712 */ 2770 static struct worker *create_worker(struct wo 1713 static struct worker *create_worker(struct worker_pool *pool) 2771 { 1714 { 2772 struct worker *worker; !! 1715 struct worker *worker = NULL; 2773 int id; !! 1716 int id = -1; >> 1717 char id_buf[16]; 2774 1718 2775 /* ID is needed to determine kthread !! 1719 lockdep_assert_held(&pool->manager_mutex); 2776 id = ida_alloc(&pool->worker_ida, GFP << 2777 if (id < 0) { << 2778 pr_err_once("workqueue: Faile << 2779 ERR_PTR(id)); << 2780 return NULL; << 2781 } << 2782 1720 2783 worker = alloc_worker(pool->node); !! 1721 /* 2784 if (!worker) { !! 1722 * ID is needed to determine kthread name. Allocate ID first 2785 pr_err_once("workqueue: Faile !! 1723 * without installing the pointer. 2786 goto fail; !! 1724 */ 2787 } !! 1725 idr_preload(GFP_KERNEL); >> 1726 spin_lock_irq(&pool->lock); 2788 1727 2789 worker->id = id; !! 1728 id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_NOWAIT); 2790 1729 2791 if (!(pool->flags & POOL_BH)) { !! 1730 spin_unlock_irq(&pool->lock); 2792 char id_buf[WORKER_ID_LEN]; !! 1731 idr_preload_end(); >> 1732 if (id < 0) >> 1733 goto fail; 2793 1734 2794 format_worker_id(id_buf, size !! 1735 worker = alloc_worker(); 2795 worker->task = kthread_create !! 1736 if (!worker) 2796 !! 1737 goto fail; 2797 if (IS_ERR(worker->task)) { << 2798 if (PTR_ERR(worker->t << 2799 pr_err("workq << 2800 id_buf << 2801 } else { << 2802 pr_err_once(" << 2803 w << 2804 } << 2805 goto fail; << 2806 } << 2807 1738 2808 set_user_nice(worker->task, p !! 1739 worker->pool = pool; 2809 kthread_bind_mask(worker->tas !! 1740 worker->id = id; 2810 } !! 1741 >> 1742 if (pool->cpu >= 0) >> 1743 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, >> 1744 pool->attrs->nice < 0 ? "H" : ""); >> 1745 else >> 1746 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 2811 1747 2812 /* successful, attach the worker to t !! 1748 worker->task = kthread_create_on_node(worker_thread, worker, pool->node, 2813 worker_attach_to_pool(worker, pool); !! 1749 "kworker/%s", id_buf); >> 1750 if (IS_ERR(worker->task)) >> 1751 goto fail; 2814 1752 2815 /* start the newly created worker */ !! 1753 /* 2816 raw_spin_lock_irq(&pool->lock); !! 1754 * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any >> 1755 * online CPUs. It'll be re-applied when any of the CPUs come up. >> 1756 */ >> 1757 set_user_nice(worker->task, pool->attrs->nice); >> 1758 set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); 2817 1759 2818 worker->pool->nr_workers++; !! 1760 /* prevent userland from meddling with cpumask of workqueue workers */ 2819 worker_enter_idle(worker); !! 1761 worker->task->flags |= PF_NO_SETAFFINITY; 2820 1762 2821 /* 1763 /* 2822 * @worker is waiting on a completion !! 1764 * The caller is responsible for ensuring %POOL_DISASSOCIATED 2823 * check if not woken up soon. As kic !! 1765 * remains stable across this function. See the comments above the 2824 * wake it up explicitly. !! 1766 * flag definition for details. 2825 */ 1767 */ 2826 if (worker->task) !! 1768 if (pool->flags & POOL_DISASSOCIATED) 2827 wake_up_process(worker->task) !! 1769 worker->flags |= WORKER_UNBOUND; 2828 1770 2829 raw_spin_unlock_irq(&pool->lock); !! 1771 /* successful, commit the pointer to idr */ >> 1772 spin_lock_irq(&pool->lock); >> 1773 idr_replace(&pool->worker_idr, worker, worker->id); >> 1774 spin_unlock_irq(&pool->lock); 2830 1775 2831 return worker; 1776 return worker; 2832 1777 2833 fail: 1778 fail: 2834 ida_free(&pool->worker_ida, id); !! 1779 if (id >= 0) { >> 1780 spin_lock_irq(&pool->lock); >> 1781 idr_remove(&pool->worker_idr, id); >> 1782 spin_unlock_irq(&pool->lock); >> 1783 } 2835 kfree(worker); 1784 kfree(worker); 2836 return NULL; 1785 return NULL; 2837 } 1786 } 2838 1787 2839 static void detach_dying_workers(struct list_ !! 1788 /** >> 1789 * start_worker - start a newly created worker >> 1790 * @worker: worker to start >> 1791 * >> 1792 * Make the pool aware of @worker and start it. >> 1793 * >> 1794 * CONTEXT: >> 1795 * spin_lock_irq(pool->lock). >> 1796 */ >> 1797 static void start_worker(struct worker *worker) 2840 { 1798 { 2841 struct worker *worker; !! 1799 worker->flags |= WORKER_STARTED; 2842 !! 1800 worker->pool->nr_workers++; 2843 list_for_each_entry(worker, cull_list !! 1801 worker_enter_idle(worker); 2844 detach_worker(worker); !! 1802 wake_up_process(worker->task); 2845 } 1803 } 2846 1804 2847 static void reap_dying_workers(struct list_he !! 1805 /** >> 1806 * create_and_start_worker - create and start a worker for a pool >> 1807 * @pool: the target pool >> 1808 * >> 1809 * Grab the managership of @pool and create and start a new worker for it. >> 1810 */ >> 1811 static int create_and_start_worker(struct worker_pool *pool) 2848 { 1812 { 2849 struct worker *worker, *tmp; !! 1813 struct worker *worker; >> 1814 >> 1815 mutex_lock(&pool->manager_mutex); 2850 1816 2851 list_for_each_entry_safe(worker, tmp, !! 1817 worker = create_worker(pool); 2852 list_del_init(&worker->entry) !! 1818 if (worker) { 2853 kthread_stop_put(worker->task !! 1819 spin_lock_irq(&pool->lock); 2854 kfree(worker); !! 1820 start_worker(worker); >> 1821 spin_unlock_irq(&pool->lock); 2855 } 1822 } >> 1823 >> 1824 mutex_unlock(&pool->manager_mutex); >> 1825 >> 1826 return worker ? 0 : -ENOMEM; 2856 } 1827 } 2857 1828 2858 /** 1829 /** 2859 * set_worker_dying - Tag a worker for destru !! 1830 * destroy_worker - destroy a workqueue worker 2860 * @worker: worker to be destroyed 1831 * @worker: worker to be destroyed 2861 * @list: transfer worker away from its pool- << 2862 * 1832 * 2863 * Tag @worker for destruction and adjust @po !! 1833 * Destroy @worker and adjust @pool stats accordingly. 2864 * should be idle. << 2865 * 1834 * 2866 * CONTEXT: 1835 * CONTEXT: 2867 * raw_spin_lock_irq(pool->lock). !! 1836 * spin_lock_irq(pool->lock) which is released and regrabbed. 2868 */ 1837 */ 2869 static void set_worker_dying(struct worker *w !! 1838 static void destroy_worker(struct worker *worker) 2870 { 1839 { 2871 struct worker_pool *pool = worker->po 1840 struct worker_pool *pool = worker->pool; 2872 1841 >> 1842 lockdep_assert_held(&pool->manager_mutex); 2873 lockdep_assert_held(&pool->lock); 1843 lockdep_assert_held(&pool->lock); 2874 lockdep_assert_held(&wq_pool_attach_m << 2875 1844 2876 /* sanity check frenzy */ 1845 /* sanity check frenzy */ 2877 if (WARN_ON(worker->current_work) || 1846 if (WARN_ON(worker->current_work) || 2878 WARN_ON(!list_empty(&worker->sche !! 1847 WARN_ON(!list_empty(&worker->scheduled))) 2879 WARN_ON(!(worker->flags & WORKER_ << 2880 return; 1848 return; 2881 1849 2882 pool->nr_workers--; !! 1850 if (worker->flags & WORKER_STARTED) 2883 pool->nr_idle--; !! 1851 pool->nr_workers--; >> 1852 if (worker->flags & WORKER_IDLE) >> 1853 pool->nr_idle--; 2884 1854 2885 worker->flags |= WORKER_DIE; !! 1855 /* 2886 !! 1856 * Once WORKER_DIE is set, the kworker may destroy itself at any 2887 list_move(&worker->entry, list); !! 1857 * point. Pin to ensure the task stays until we're done with it. 2888 !! 1858 */ 2889 /* get an extra task struct reference << 2890 get_task_struct(worker->task); 1859 get_task_struct(worker->task); 2891 } << 2892 << 2893 /** << 2894 * idle_worker_timeout - check if some idle w << 2895 * @t: The pool's idle_timer that just expire << 2896 * << 2897 * The timer is armed in worker_enter_idle(). << 2898 * worker_leave_idle(), as a worker flicking << 2899 * pool is at the too_many_workers() tipping << 2900 * housekeeping overhead. Since IDLE_WORKER_T << 2901 * it expire and re-evaluate things from ther << 2902 */ << 2903 static void idle_worker_timeout(struct timer_ << 2904 { << 2905 struct worker_pool *pool = from_timer << 2906 bool do_cull = false; << 2907 1860 2908 if (work_pending(&pool->idle_cull_wor !! 1861 list_del_init(&worker->entry); 2909 return; !! 1862 worker->flags |= WORKER_DIE; 2910 << 2911 raw_spin_lock_irq(&pool->lock); << 2912 1863 2913 if (too_many_workers(pool)) { !! 1864 idr_remove(&pool->worker_idr, worker->id); 2914 struct worker *worker; << 2915 unsigned long expires; << 2916 1865 2917 /* idle_list is kept in LIFO !! 1866 spin_unlock_irq(&pool->lock); 2918 worker = list_last_entry(&poo << 2919 expires = worker->last_active << 2920 do_cull = !time_before(jiffie << 2921 1867 2922 if (!do_cull) !! 1868 kthread_stop(worker->task); 2923 mod_timer(&pool->idle !! 1869 put_task_struct(worker->task); 2924 } !! 1870 kfree(worker); 2925 raw_spin_unlock_irq(&pool->lock); << 2926 1871 2927 if (do_cull) !! 1872 spin_lock_irq(&pool->lock); 2928 queue_work(system_unbound_wq, << 2929 } 1873 } 2930 1874 2931 /** !! 1875 static void idle_worker_timeout(unsigned long __pool) 2932 * idle_cull_fn - cull workers that have been << 2933 * @work: the pool's work for handling these << 2934 * << 2935 * This goes through a pool's idle workers an << 2936 * idle for at least IDLE_WORKER_TIMEOUT seco << 2937 * << 2938 * We don't want to disturb isolated CPUs bec << 2939 * culled, so this also resets worker affinit << 2940 * context, hence the split between timer cal << 2941 */ << 2942 static void idle_cull_fn(struct work_struct * << 2943 { 1876 { 2944 struct worker_pool *pool = container_ !! 1877 struct worker_pool *pool = (void *)__pool; 2945 LIST_HEAD(cull_list); << 2946 1878 2947 /* !! 1879 spin_lock_irq(&pool->lock); 2948 * Grabbing wq_pool_attach_mutex here << 2949 * cannot proceed beyong set_pf_worke << 2950 * This is required as a previously-p << 2951 * set_worker_dying() has happened bu << 2952 */ << 2953 mutex_lock(&wq_pool_attach_mutex); << 2954 raw_spin_lock_irq(&pool->lock); << 2955 1880 2956 while (too_many_workers(pool)) { !! 1881 if (too_many_workers(pool)) { 2957 struct worker *worker; 1882 struct worker *worker; 2958 unsigned long expires; 1883 unsigned long expires; 2959 1884 2960 worker = list_last_entry(&poo !! 1885 /* idle_list is kept in LIFO order, check the last one */ >> 1886 worker = list_entry(pool->idle_list.prev, struct worker, entry); 2961 expires = worker->last_active 1887 expires = worker->last_active + IDLE_WORKER_TIMEOUT; 2962 1888 2963 if (time_before(jiffies, expi !! 1889 if (time_before(jiffies, expires)) 2964 mod_timer(&pool->idle 1890 mod_timer(&pool->idle_timer, expires); 2965 break; !! 1891 else { >> 1892 /* it's been idle for too long, wake up manager */ >> 1893 pool->flags |= POOL_MANAGE_WORKERS; >> 1894 wake_up_worker(pool); 2966 } 1895 } 2967 << 2968 set_worker_dying(worker, &cul << 2969 } 1896 } 2970 1897 2971 raw_spin_unlock_irq(&pool->lock); !! 1898 spin_unlock_irq(&pool->lock); 2972 detach_dying_workers(&cull_list); << 2973 mutex_unlock(&wq_pool_attach_mutex); << 2974 << 2975 reap_dying_workers(&cull_list); << 2976 } 1899 } 2977 1900 2978 static void send_mayday(struct work_struct *w 1901 static void send_mayday(struct work_struct *work) 2979 { 1902 { 2980 struct pool_workqueue *pwq = get_work 1903 struct pool_workqueue *pwq = get_work_pwq(work); 2981 struct workqueue_struct *wq = pwq->wq 1904 struct workqueue_struct *wq = pwq->wq; 2982 1905 2983 lockdep_assert_held(&wq_mayday_lock); 1906 lockdep_assert_held(&wq_mayday_lock); 2984 1907 2985 if (!wq->rescuer) 1908 if (!wq->rescuer) 2986 return; 1909 return; 2987 1910 2988 /* mayday mayday mayday */ 1911 /* mayday mayday mayday */ 2989 if (list_empty(&pwq->mayday_node)) { 1912 if (list_empty(&pwq->mayday_node)) { 2990 /* 1913 /* 2991 * If @pwq is for an unbound 1914 * If @pwq is for an unbound wq, its base ref may be put at 2992 * any time due to an attribu 1915 * any time due to an attribute change. Pin @pwq until the 2993 * rescuer is done with it. 1916 * rescuer is done with it. 2994 */ 1917 */ 2995 get_pwq(pwq); 1918 get_pwq(pwq); 2996 list_add_tail(&pwq->mayday_no 1919 list_add_tail(&pwq->mayday_node, &wq->maydays); 2997 wake_up_process(wq->rescuer-> 1920 wake_up_process(wq->rescuer->task); 2998 pwq->stats[PWQ_STAT_MAYDAY]++ << 2999 } 1921 } 3000 } 1922 } 3001 1923 3002 static void pool_mayday_timeout(struct timer_ !! 1924 static void pool_mayday_timeout(unsigned long __pool) 3003 { 1925 { 3004 struct worker_pool *pool = from_timer !! 1926 struct worker_pool *pool = (void *)__pool; 3005 struct work_struct *work; 1927 struct work_struct *work; 3006 1928 3007 raw_spin_lock_irq(&pool->lock); !! 1929 spin_lock_irq(&wq_mayday_lock); /* for wq->maydays */ 3008 raw_spin_lock(&wq_mayday_lock); !! 1930 spin_lock(&pool->lock); 3009 1931 3010 if (need_to_create_worker(pool)) { 1932 if (need_to_create_worker(pool)) { 3011 /* 1933 /* 3012 * We've been trying to creat 1934 * We've been trying to create a new worker but 3013 * haven't been successful. 1935 * haven't been successful. We might be hitting an 3014 * allocation deadlock. Send 1936 * allocation deadlock. Send distress signals to 3015 * rescuers. 1937 * rescuers. 3016 */ 1938 */ 3017 list_for_each_entry(work, &po 1939 list_for_each_entry(work, &pool->worklist, entry) 3018 send_mayday(work); 1940 send_mayday(work); 3019 } 1941 } 3020 1942 3021 raw_spin_unlock(&wq_mayday_lock); !! 1943 spin_unlock(&pool->lock); 3022 raw_spin_unlock_irq(&pool->lock); !! 1944 spin_unlock_irq(&wq_mayday_lock); 3023 1945 3024 mod_timer(&pool->mayday_timer, jiffie 1946 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 3025 } 1947 } 3026 1948 3027 /** 1949 /** 3028 * maybe_create_worker - create a new worker 1950 * maybe_create_worker - create a new worker if necessary 3029 * @pool: pool to create a new worker for 1951 * @pool: pool to create a new worker for 3030 * 1952 * 3031 * Create a new worker for @pool if necessary 1953 * Create a new worker for @pool if necessary. @pool is guaranteed to 3032 * have at least one idle worker on return fr 1954 * have at least one idle worker on return from this function. If 3033 * creating a new worker takes longer than MA 1955 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is 3034 * sent to all rescuers with works scheduled 1956 * sent to all rescuers with works scheduled on @pool to resolve 3035 * possible allocation deadlock. 1957 * possible allocation deadlock. 3036 * 1958 * 3037 * On return, need_to_create_worker() is guar 1959 * On return, need_to_create_worker() is guaranteed to be %false and 3038 * may_start_working() %true. 1960 * may_start_working() %true. 3039 * 1961 * 3040 * LOCKING: 1962 * LOCKING: 3041 * raw_spin_lock_irq(pool->lock) which may be !! 1963 * spin_lock_irq(pool->lock) which may be released and regrabbed 3042 * multiple times. Does GFP_KERNEL allocatio 1964 * multiple times. Does GFP_KERNEL allocations. Called only from 3043 * manager. 1965 * manager. 3044 */ 1966 */ 3045 static void maybe_create_worker(struct worker 1967 static void maybe_create_worker(struct worker_pool *pool) 3046 __releases(&pool->lock) 1968 __releases(&pool->lock) 3047 __acquires(&pool->lock) 1969 __acquires(&pool->lock) 3048 { 1970 { >> 1971 if (!need_to_create_worker(pool)) >> 1972 return; 3049 restart: 1973 restart: 3050 raw_spin_unlock_irq(&pool->lock); !! 1974 spin_unlock_irq(&pool->lock); 3051 1975 3052 /* if we don't make progress in MAYDA 1976 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ 3053 mod_timer(&pool->mayday_timer, jiffie 1977 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 3054 1978 3055 while (true) { 1979 while (true) { 3056 if (create_worker(pool) || !n !! 1980 struct worker *worker; >> 1981 >> 1982 worker = create_worker(pool); >> 1983 if (worker) { >> 1984 del_timer_sync(&pool->mayday_timer); >> 1985 spin_lock_irq(&pool->lock); >> 1986 start_worker(worker); >> 1987 if (WARN_ON_ONCE(need_to_create_worker(pool))) >> 1988 goto restart; >> 1989 return; >> 1990 } >> 1991 >> 1992 if (!need_to_create_worker(pool)) 3057 break; 1993 break; 3058 1994 3059 schedule_timeout_interruptibl !! 1995 __set_current_state(TASK_INTERRUPTIBLE); >> 1996 schedule_timeout(CREATE_COOLDOWN); 3060 1997 3061 if (!need_to_create_worker(po 1998 if (!need_to_create_worker(pool)) 3062 break; 1999 break; 3063 } 2000 } 3064 2001 3065 del_timer_sync(&pool->mayday_timer); 2002 del_timer_sync(&pool->mayday_timer); 3066 raw_spin_lock_irq(&pool->lock); !! 2003 spin_lock_irq(&pool->lock); 3067 /* << 3068 * This is necessary even after a new << 3069 * created as @pool->lock was dropped << 3070 * already become busy. << 3071 */ << 3072 if (need_to_create_worker(pool)) 2004 if (need_to_create_worker(pool)) 3073 goto restart; 2005 goto restart; >> 2006 return; >> 2007 } >> 2008 >> 2009 /** >> 2010 * maybe_destroy_worker - destroy workers which have been idle for a while >> 2011 * @pool: pool to destroy workers for >> 2012 * >> 2013 * Destroy @pool workers which have been idle for longer than >> 2014 * IDLE_WORKER_TIMEOUT. >> 2015 * >> 2016 * LOCKING: >> 2017 * spin_lock_irq(pool->lock) which may be released and regrabbed >> 2018 * multiple times. Called only from manager. >> 2019 */ >> 2020 static void maybe_destroy_workers(struct worker_pool *pool) >> 2021 { >> 2022 while (too_many_workers(pool)) { >> 2023 struct worker *worker; >> 2024 unsigned long expires; >> 2025 >> 2026 worker = list_entry(pool->idle_list.prev, struct worker, entry); >> 2027 expires = worker->last_active + IDLE_WORKER_TIMEOUT; >> 2028 >> 2029 if (time_before(jiffies, expires)) { >> 2030 mod_timer(&pool->idle_timer, expires); >> 2031 break; >> 2032 } >> 2033 >> 2034 destroy_worker(worker); >> 2035 } 3074 } 2036 } 3075 2037 3076 /** 2038 /** 3077 * manage_workers - manage worker pool 2039 * manage_workers - manage worker pool 3078 * @worker: self 2040 * @worker: self 3079 * 2041 * 3080 * Assume the manager role and manage the wor 2042 * Assume the manager role and manage the worker pool @worker belongs 3081 * to. At any given time, there can be only 2043 * to. At any given time, there can be only zero or one manager per 3082 * pool. The exclusion is handled automatica 2044 * pool. The exclusion is handled automatically by this function. 3083 * 2045 * 3084 * The caller can safely start processing wor 2046 * The caller can safely start processing works on false return. On 3085 * true return, it's guaranteed that need_to_ 2047 * true return, it's guaranteed that need_to_create_worker() is false 3086 * and may_start_working() is true. 2048 * and may_start_working() is true. 3087 * 2049 * 3088 * CONTEXT: 2050 * CONTEXT: 3089 * raw_spin_lock_irq(pool->lock) which may be !! 2051 * spin_lock_irq(pool->lock) which may be released and regrabbed 3090 * multiple times. Does GFP_KERNEL allocatio 2052 * multiple times. Does GFP_KERNEL allocations. 3091 * 2053 * 3092 * Return: !! 2054 * RETURNS: 3093 * %false if the pool doesn't need management 2055 * %false if the pool doesn't need management and the caller can safely 3094 * start processing works, %true if managemen 2056 * start processing works, %true if management function was performed and 3095 * the conditions that the caller verified be 2057 * the conditions that the caller verified before calling the function may 3096 * no longer be true. 2058 * no longer be true. 3097 */ 2059 */ 3098 static bool manage_workers(struct worker *wor 2060 static bool manage_workers(struct worker *worker) 3099 { 2061 { 3100 struct worker_pool *pool = worker->po 2062 struct worker_pool *pool = worker->pool; 3101 2063 3102 if (pool->flags & POOL_MANAGER_ACTIVE !! 2064 /* >> 2065 * Managership is governed by two mutexes - manager_arb and >> 2066 * manager_mutex. manager_arb handles arbitration of manager role. >> 2067 * Anyone who successfully grabs manager_arb wins the arbitration >> 2068 * and becomes the manager. mutex_trylock() on pool->manager_arb >> 2069 * failure while holding pool->lock reliably indicates that someone >> 2070 * else is managing the pool and the worker which failed trylock >> 2071 * can proceed to executing work items. This means that anyone >> 2072 * grabbing manager_arb is responsible for actually performing >> 2073 * manager duties. If manager_arb is grabbed and released without >> 2074 * actual management, the pool may stall indefinitely. >> 2075 * >> 2076 * manager_mutex is used for exclusion of actual management >> 2077 * operations. The holder of manager_mutex can be sure that none >> 2078 * of management operations, including creation and destruction of >> 2079 * workers, won't take place until the mutex is released. Because >> 2080 * manager_mutex doesn't interfere with manager role arbitration, >> 2081 * it is guaranteed that the pool's management, while may be >> 2082 * delayed, won't be disturbed by someone else grabbing >> 2083 * manager_mutex. >> 2084 */ >> 2085 if (!mutex_trylock(&pool->manager_arb)) 3103 return false; 2086 return false; 3104 2087 3105 pool->flags |= POOL_MANAGER_ACTIVE; !! 2088 /* 3106 pool->manager = worker; !! 2089 * With manager arbitration won, manager_mutex would be free in >> 2090 * most cases. trylock first without dropping @pool->lock. >> 2091 */ >> 2092 if (unlikely(!mutex_trylock(&pool->manager_mutex))) { >> 2093 spin_unlock_irq(&pool->lock); >> 2094 mutex_lock(&pool->manager_mutex); >> 2095 spin_lock_irq(&pool->lock); >> 2096 } >> 2097 >> 2098 pool->flags &= ~POOL_MANAGE_WORKERS; 3107 2099 >> 2100 /* >> 2101 * Destroy and then create so that may_start_working() is true >> 2102 * on return. >> 2103 */ >> 2104 maybe_destroy_workers(pool); 3108 maybe_create_worker(pool); 2105 maybe_create_worker(pool); 3109 2106 3110 pool->manager = NULL; !! 2107 mutex_unlock(&pool->manager_mutex); 3111 pool->flags &= ~POOL_MANAGER_ACTIVE; !! 2108 mutex_unlock(&pool->manager_arb); 3112 rcuwait_wake_up(&manager_wait); << 3113 return true; 2109 return true; 3114 } 2110 } 3115 2111 3116 /** 2112 /** 3117 * process_one_work - process single work 2113 * process_one_work - process single work 3118 * @worker: self 2114 * @worker: self 3119 * @work: work to process 2115 * @work: work to process 3120 * 2116 * 3121 * Process @work. This function contains all 2117 * Process @work. This function contains all the logics necessary to 3122 * process a single work including synchroniz 2118 * process a single work including synchronization against and 3123 * interaction with other workers on the same 2119 * interaction with other workers on the same cpu, queueing and 3124 * flushing. As long as context requirement 2120 * flushing. As long as context requirement is met, any worker can 3125 * call this function to process a work. 2121 * call this function to process a work. 3126 * 2122 * 3127 * CONTEXT: 2123 * CONTEXT: 3128 * raw_spin_lock_irq(pool->lock) which is rel !! 2124 * spin_lock_irq(pool->lock) which is released and regrabbed. 3129 */ 2125 */ 3130 static void process_one_work(struct worker *w 2126 static void process_one_work(struct worker *worker, struct work_struct *work) 3131 __releases(&pool->lock) 2127 __releases(&pool->lock) 3132 __acquires(&pool->lock) 2128 __acquires(&pool->lock) 3133 { 2129 { 3134 struct pool_workqueue *pwq = get_work 2130 struct pool_workqueue *pwq = get_work_pwq(work); 3135 struct worker_pool *pool = worker->po 2131 struct worker_pool *pool = worker->pool; 3136 unsigned long work_data; !! 2132 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; 3137 int lockdep_start_depth, rcu_start_de !! 2133 int work_color; 3138 bool bh_draining = pool->flags & POOL !! 2134 struct worker *collision; 3139 #ifdef CONFIG_LOCKDEP 2135 #ifdef CONFIG_LOCKDEP 3140 /* 2136 /* 3141 * It is permissible to free the stru 2137 * It is permissible to free the struct work_struct from 3142 * inside the function that is called 2138 * inside the function that is called from it, this we need to 3143 * take into account for lockdep too. 2139 * take into account for lockdep too. To avoid bogus "held 3144 * lock freed" warnings as well as pr 2140 * lock freed" warnings as well as problems when looking into 3145 * work->lockdep_map, make a copy and 2141 * work->lockdep_map, make a copy and use that here. 3146 */ 2142 */ 3147 struct lockdep_map lockdep_map; 2143 struct lockdep_map lockdep_map; 3148 2144 3149 lockdep_copy_map(&lockdep_map, &work- 2145 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 3150 #endif 2146 #endif 3151 /* ensure we're on the correct CPU */ !! 2147 /* 3152 WARN_ON_ONCE(!(pool->flags & POOL_DIS !! 2148 * Ensure we're on the correct CPU. DISASSOCIATED test is >> 2149 * necessary to avoid spurious warnings from rescuers servicing the >> 2150 * unbound or a disassociated pool. >> 2151 */ >> 2152 WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) && >> 2153 !(pool->flags & POOL_DISASSOCIATED) && 3153 raw_smp_processor_id() ! 2154 raw_smp_processor_id() != pool->cpu); 3154 2155 >> 2156 /* >> 2157 * A single work shouldn't be executed concurrently by >> 2158 * multiple workers on a single cpu. Check whether anyone is >> 2159 * already processing the work. If so, defer the work to the >> 2160 * currently executing one. >> 2161 */ >> 2162 collision = find_worker_executing_work(pool, work); >> 2163 if (unlikely(collision)) { >> 2164 move_linked_works(work, &collision->scheduled, NULL); >> 2165 return; >> 2166 } >> 2167 3155 /* claim and dequeue */ 2168 /* claim and dequeue */ 3156 debug_work_deactivate(work); 2169 debug_work_deactivate(work); 3157 hash_add(pool->busy_hash, &worker->he 2170 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 3158 worker->current_work = work; 2171 worker->current_work = work; 3159 worker->current_func = work->func; 2172 worker->current_func = work->func; 3160 worker->current_pwq = pwq; 2173 worker->current_pwq = pwq; 3161 if (worker->task) !! 2174 work_color = get_work_color(work); 3162 worker->current_at = worker-> << 3163 work_data = *work_data_bits(work); << 3164 worker->current_color = get_work_colo << 3165 << 3166 /* << 3167 * Record wq name for cmdline and deb << 3168 * overridden through set_worker_desc << 3169 */ << 3170 strscpy(worker->desc, pwq->wq->name, << 3171 2175 3172 list_del_init(&work->entry); 2176 list_del_init(&work->entry); 3173 2177 3174 /* 2178 /* 3175 * CPU intensive works don't particip !! 2179 * CPU intensive works don't participate in concurrency 3176 * They're the scheduler's responsibi !! 2180 * management. They're the scheduler's responsibility. 3177 * of concurrency management and the << 3178 * execution of the pending work item << 3179 */ 2181 */ 3180 if (unlikely(pwq->wq->flags & WQ_CPU_ !! 2182 if (unlikely(cpu_intensive)) 3181 worker_set_flags(worker, WORK !! 2183 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); 3182 2184 3183 /* 2185 /* 3184 * Kick @pool if necessary. It's alwa !! 2186 * Unbound pool isn't concurrency managed and work items should be 3185 * since nr_running would always be > !! 2187 * executed ASAP. Wake up another worker if necessary. 3186 * chain execution of the pending wor << 3187 * workers such as the UNBOUND and CP << 3188 */ 2188 */ 3189 kick_pool(pool); !! 2189 if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) >> 2190 wake_up_worker(pool); 3190 2191 3191 /* 2192 /* 3192 * Record the last pool and clear PEN 2193 * Record the last pool and clear PENDING which should be the last 3193 * update to @work. Also, do this in 2194 * update to @work. Also, do this inside @pool->lock so that 3194 * PENDING and queued state changes h 2195 * PENDING and queued state changes happen together while IRQ is 3195 * disabled. 2196 * disabled. 3196 */ 2197 */ 3197 set_work_pool_and_clear_pending(work, !! 2198 set_work_pool_and_clear_pending(work, pool->id); 3198 2199 3199 pwq->stats[PWQ_STAT_STARTED]++; !! 2200 spin_unlock_irq(&pool->lock); 3200 raw_spin_unlock_irq(&pool->lock); << 3201 2201 3202 rcu_start_depth = rcu_preempt_depth() !! 2202 lock_map_acquire_read(&pwq->wq->lockdep_map); 3203 lockdep_start_depth = lockdep_depth(c << 3204 /* see drain_dead_softirq_workfn() */ << 3205 if (!bh_draining) << 3206 lock_map_acquire(&pwq->wq->lo << 3207 lock_map_acquire(&lockdep_map); 2203 lock_map_acquire(&lockdep_map); 3208 /* << 3209 * Strictly speaking we should mark t << 3210 * any locks, that is, before these t << 3211 * << 3212 * However, that would result in: << 3213 * << 3214 * A(W1) << 3215 * WFC(C) << 3216 * A(W1) << 3217 * C(C) << 3218 * << 3219 * Which would create W1->C->W1 depen << 3220 * actual deadlock possible. There ar << 3221 * read-recursive acquire on the work << 3222 * hit the lockdep limitation on recu << 3223 * these locks. << 3224 * << 3225 * AFAICT there is no possible deadlo << 3226 * flush_work() and complete() primit << 3227 * workqueues), so hiding them isn't << 3228 */ << 3229 lockdep_invariant_state(true); << 3230 trace_workqueue_execute_start(work); 2204 trace_workqueue_execute_start(work); 3231 worker->current_func(work); 2205 worker->current_func(work); 3232 /* 2206 /* 3233 * While we must be careful to not us 2207 * While we must be careful to not use "work" after this, the trace 3234 * point will only record its address 2208 * point will only record its address. 3235 */ 2209 */ 3236 trace_workqueue_execute_end(work, wor !! 2210 trace_workqueue_execute_end(work); 3237 pwq->stats[PWQ_STAT_COMPLETED]++; << 3238 lock_map_release(&lockdep_map); 2211 lock_map_release(&lockdep_map); 3239 if (!bh_draining) !! 2212 lock_map_release(&pwq->wq->lockdep_map); 3240 lock_map_release(&pwq->wq->lo << 3241 2213 3242 if (unlikely((worker->task && in_atom !! 2214 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 3243 lockdep_depth(current) ! !! 2215 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 3244 rcu_preempt_depth() != r !! 2216 " last function: %pf\n", 3245 pr_err("BUG: workqueue leaked !! 2217 current->comm, preempt_count(), task_pid_nr(current), 3246 " preempt=0x%08x l << 3247 current->comm, task_pi << 3248 lockdep_start_depth, l << 3249 rcu_start_depth, rcu_p << 3250 worker->current_func); 2218 worker->current_func); 3251 debug_show_held_locks(current 2219 debug_show_held_locks(current); 3252 dump_stack(); 2220 dump_stack(); 3253 } 2221 } 3254 2222 3255 /* 2223 /* 3256 * The following prevents a kworker f !! 2224 * The following prevents a kworker from hogging CPU on !PREEMPT 3257 * kernels, where a requeueing work i 2225 * kernels, where a requeueing work item waiting for something to 3258 * happen could deadlock with stop_ma 2226 * happen could deadlock with stop_machine as such work item could 3259 * indefinitely requeue itself while 2227 * indefinitely requeue itself while all other CPUs are trapped in 3260 * stop_machine. At the same time, re !! 2228 * stop_machine. 3261 * the same condition doesn't freeze << 3262 */ 2229 */ 3263 if (worker->task) !! 2230 cond_resched(); 3264 cond_resched(); << 3265 << 3266 raw_spin_lock_irq(&pool->lock); << 3267 2231 3268 /* !! 2232 spin_lock_irq(&pool->lock); 3269 * In addition to %WQ_CPU_INTENSIVE, << 3270 * CPU intensive by wq_worker_tick() << 3271 * wq_cpu_intensive_thresh_us. Clear << 3272 */ << 3273 worker_clr_flags(worker, WORKER_CPU_I << 3274 2233 3275 /* tag the worker for identification !! 2234 /* clear cpu intensive status */ 3276 worker->last_func = worker->current_f !! 2235 if (unlikely(cpu_intensive)) >> 2236 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 3277 2237 3278 /* we're done with it, release */ 2238 /* we're done with it, release */ 3279 hash_del(&worker->hentry); 2239 hash_del(&worker->hentry); 3280 worker->current_work = NULL; 2240 worker->current_work = NULL; 3281 worker->current_func = NULL; 2241 worker->current_func = NULL; 3282 worker->current_pwq = NULL; 2242 worker->current_pwq = NULL; 3283 worker->current_color = INT_MAX; !! 2243 worker->desc_valid = false; 3284 !! 2244 pwq_dec_nr_in_flight(pwq, work_color); 3285 /* must be the last step, see the fun << 3286 pwq_dec_nr_in_flight(pwq, work_data); << 3287 } 2245 } 3288 2246 3289 /** 2247 /** 3290 * process_scheduled_works - process schedule 2248 * process_scheduled_works - process scheduled works 3291 * @worker: self 2249 * @worker: self 3292 * 2250 * 3293 * Process all scheduled works. Please note 2251 * Process all scheduled works. Please note that the scheduled list 3294 * may change while processing a work, so thi 2252 * may change while processing a work, so this function repeatedly 3295 * fetches a work from the top and executes i 2253 * fetches a work from the top and executes it. 3296 * 2254 * 3297 * CONTEXT: 2255 * CONTEXT: 3298 * raw_spin_lock_irq(pool->lock) which may be !! 2256 * spin_lock_irq(pool->lock) which may be released and regrabbed 3299 * multiple times. 2257 * multiple times. 3300 */ 2258 */ 3301 static void process_scheduled_works(struct wo 2259 static void process_scheduled_works(struct worker *worker) 3302 { 2260 { 3303 struct work_struct *work; !! 2261 while (!list_empty(&worker->scheduled)) { 3304 bool first = true; !! 2262 struct work_struct *work = list_first_entry(&worker->scheduled, 3305 !! 2263 struct work_struct, entry); 3306 while ((work = list_first_entry_or_nu << 3307 << 3308 if (first) { << 3309 worker->pool->watchdo << 3310 first = false; << 3311 } << 3312 process_one_work(worker, work 2264 process_one_work(worker, work); 3313 } 2265 } 3314 } 2266 } 3315 2267 3316 static void set_pf_worker(bool val) << 3317 { << 3318 mutex_lock(&wq_pool_attach_mutex); << 3319 if (val) << 3320 current->flags |= PF_WQ_WORKE << 3321 else << 3322 current->flags &= ~PF_WQ_WORK << 3323 mutex_unlock(&wq_pool_attach_mutex); << 3324 } << 3325 << 3326 /** 2268 /** 3327 * worker_thread - the worker thread function 2269 * worker_thread - the worker thread function 3328 * @__worker: self 2270 * @__worker: self 3329 * 2271 * 3330 * The worker thread function. All workers b 2272 * The worker thread function. All workers belong to a worker_pool - 3331 * either a per-cpu one or dynamic unbound on 2273 * either a per-cpu one or dynamic unbound one. These workers process all 3332 * work items regardless of their specific ta 2274 * work items regardless of their specific target workqueue. The only 3333 * exception is work items which belong to wo 2275 * exception is work items which belong to workqueues with a rescuer which 3334 * will be explained in rescuer_thread(). 2276 * will be explained in rescuer_thread(). 3335 * << 3336 * Return: 0 << 3337 */ 2277 */ 3338 static int worker_thread(void *__worker) 2278 static int worker_thread(void *__worker) 3339 { 2279 { 3340 struct worker *worker = __worker; 2280 struct worker *worker = __worker; 3341 struct worker_pool *pool = worker->po 2281 struct worker_pool *pool = worker->pool; 3342 2282 3343 /* tell the scheduler that this is a 2283 /* tell the scheduler that this is a workqueue worker */ 3344 set_pf_worker(true); !! 2284 worker->task->flags |= PF_WQ_WORKER; 3345 woke_up: 2285 woke_up: 3346 raw_spin_lock_irq(&pool->lock); !! 2286 spin_lock_irq(&pool->lock); 3347 2287 3348 /* am I supposed to die? */ 2288 /* am I supposed to die? */ 3349 if (unlikely(worker->flags & WORKER_D 2289 if (unlikely(worker->flags & WORKER_DIE)) { 3350 raw_spin_unlock_irq(&pool->lo !! 2290 spin_unlock_irq(&pool->lock); 3351 set_pf_worker(false); !! 2291 WARN_ON_ONCE(!list_empty(&worker->entry)); 3352 /* !! 2292 worker->task->flags &= ~PF_WQ_WORKER; 3353 * The worker is dead and PF_ << 3354 * shouldn't be accessed, res << 3355 */ << 3356 worker->pool = NULL; << 3357 ida_free(&pool->worker_ida, w << 3358 return 0; 2293 return 0; 3359 } 2294 } 3360 2295 3361 worker_leave_idle(worker); 2296 worker_leave_idle(worker); 3362 recheck: 2297 recheck: 3363 /* no more worker necessary? */ 2298 /* no more worker necessary? */ 3364 if (!need_more_worker(pool)) 2299 if (!need_more_worker(pool)) 3365 goto sleep; 2300 goto sleep; 3366 2301 3367 /* do we need to manage? */ 2302 /* do we need to manage? */ 3368 if (unlikely(!may_start_working(pool) 2303 if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 3369 goto recheck; 2304 goto recheck; 3370 2305 3371 /* 2306 /* 3372 * ->scheduled list can only be fille 2307 * ->scheduled list can only be filled while a worker is 3373 * preparing to process a work or act 2308 * preparing to process a work or actually processing it. 3374 * Make sure nobody diddled with it w 2309 * Make sure nobody diddled with it while I was sleeping. 3375 */ 2310 */ 3376 WARN_ON_ONCE(!list_empty(&worker->sch 2311 WARN_ON_ONCE(!list_empty(&worker->scheduled)); 3377 2312 3378 /* 2313 /* 3379 * Finish PREP stage. We're guarante 2314 * Finish PREP stage. We're guaranteed to have at least one idle 3380 * worker or that someone else has al 2315 * worker or that someone else has already assumed the manager 3381 * role. This is where @worker start 2316 * role. This is where @worker starts participating in concurrency 3382 * management if applicable and concu 2317 * management if applicable and concurrency management is restored 3383 * after being rebound. See rebind_w 2318 * after being rebound. See rebind_workers() for details. 3384 */ 2319 */ 3385 worker_clr_flags(worker, WORKER_PREP 2320 worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND); 3386 2321 3387 do { 2322 do { 3388 struct work_struct *work = 2323 struct work_struct *work = 3389 list_first_entry(&poo 2324 list_first_entry(&pool->worklist, 3390 stru 2325 struct work_struct, entry); 3391 2326 3392 if (assign_work(work, worker, !! 2327 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { >> 2328 /* optimization path, not strictly necessary */ >> 2329 process_one_work(worker, work); >> 2330 if (unlikely(!list_empty(&worker->scheduled))) >> 2331 process_scheduled_works(worker); >> 2332 } else { >> 2333 move_linked_works(work, &worker->scheduled, NULL); 3393 process_scheduled_wor 2334 process_scheduled_works(worker); >> 2335 } 3394 } while (keep_working(pool)); 2336 } while (keep_working(pool)); 3395 2337 3396 worker_set_flags(worker, WORKER_PREP) !! 2338 worker_set_flags(worker, WORKER_PREP, false); 3397 sleep: 2339 sleep: >> 2340 if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker)) >> 2341 goto recheck; >> 2342 3398 /* 2343 /* 3399 * pool->lock is held and there's no 2344 * pool->lock is held and there's no work to process and no need to 3400 * manage, sleep. Workers are woken 2345 * manage, sleep. Workers are woken up only while holding 3401 * pool->lock or from local cpu, so s 2346 * pool->lock or from local cpu, so setting the current state 3402 * before releasing pool->lock is eno 2347 * before releasing pool->lock is enough to prevent losing any 3403 * event. 2348 * event. 3404 */ 2349 */ 3405 worker_enter_idle(worker); 2350 worker_enter_idle(worker); 3406 __set_current_state(TASK_IDLE); !! 2351 __set_current_state(TASK_INTERRUPTIBLE); 3407 raw_spin_unlock_irq(&pool->lock); !! 2352 spin_unlock_irq(&pool->lock); 3408 schedule(); 2353 schedule(); 3409 goto woke_up; 2354 goto woke_up; 3410 } 2355 } 3411 2356 3412 /** 2357 /** 3413 * rescuer_thread - the rescuer thread functi 2358 * rescuer_thread - the rescuer thread function 3414 * @__rescuer: self 2359 * @__rescuer: self 3415 * 2360 * 3416 * Workqueue rescuer thread function. There' 2361 * Workqueue rescuer thread function. There's one rescuer for each 3417 * workqueue which has WQ_MEM_RECLAIM set. 2362 * workqueue which has WQ_MEM_RECLAIM set. 3418 * 2363 * 3419 * Regular work processing on a pool may bloc 2364 * Regular work processing on a pool may block trying to create a new 3420 * worker which uses GFP_KERNEL allocation wh 2365 * worker which uses GFP_KERNEL allocation which has slight chance of 3421 * developing into deadlock if some works cur 2366 * developing into deadlock if some works currently on the same queue 3422 * need to be processed to satisfy the GFP_KE 2367 * need to be processed to satisfy the GFP_KERNEL allocation. This is 3423 * the problem rescuer solves. 2368 * the problem rescuer solves. 3424 * 2369 * 3425 * When such condition is possible, the pool 2370 * When such condition is possible, the pool summons rescuers of all 3426 * workqueues which have works queued on the 2371 * workqueues which have works queued on the pool and let them process 3427 * those works so that forward progress can b 2372 * those works so that forward progress can be guaranteed. 3428 * 2373 * 3429 * This should happen rarely. 2374 * This should happen rarely. 3430 * << 3431 * Return: 0 << 3432 */ 2375 */ 3433 static int rescuer_thread(void *__rescuer) 2376 static int rescuer_thread(void *__rescuer) 3434 { 2377 { 3435 struct worker *rescuer = __rescuer; 2378 struct worker *rescuer = __rescuer; 3436 struct workqueue_struct *wq = rescuer 2379 struct workqueue_struct *wq = rescuer->rescue_wq; >> 2380 struct list_head *scheduled = &rescuer->scheduled; 3437 bool should_stop; 2381 bool should_stop; 3438 2382 3439 set_user_nice(current, RESCUER_NICE_L 2383 set_user_nice(current, RESCUER_NICE_LEVEL); 3440 2384 3441 /* 2385 /* 3442 * Mark rescuer as worker too. As WO 2386 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it 3443 * doesn't participate in concurrency 2387 * doesn't participate in concurrency management. 3444 */ 2388 */ 3445 set_pf_worker(true); !! 2389 rescuer->task->flags |= PF_WQ_WORKER; 3446 repeat: 2390 repeat: 3447 set_current_state(TASK_IDLE); !! 2391 set_current_state(TASK_INTERRUPTIBLE); 3448 2392 3449 /* 2393 /* 3450 * By the time the rescuer is request 2394 * By the time the rescuer is requested to stop, the workqueue 3451 * shouldn't have any work pending, b 2395 * shouldn't have any work pending, but @wq->maydays may still have 3452 * pwq(s) queued. This can happen by 2396 * pwq(s) queued. This can happen by non-rescuer workers consuming 3453 * all the work items before the resc 2397 * all the work items before the rescuer got to them. Go through 3454 * @wq->maydays processing before act 2398 * @wq->maydays processing before acting on should_stop so that the 3455 * list is always empty on exit. 2399 * list is always empty on exit. 3456 */ 2400 */ 3457 should_stop = kthread_should_stop(); 2401 should_stop = kthread_should_stop(); 3458 2402 3459 /* see whether any pwq is asking for 2403 /* see whether any pwq is asking for help */ 3460 raw_spin_lock_irq(&wq_mayday_lock); !! 2404 spin_lock_irq(&wq_mayday_lock); 3461 2405 3462 while (!list_empty(&wq->maydays)) { 2406 while (!list_empty(&wq->maydays)) { 3463 struct pool_workqueue *pwq = 2407 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 3464 struc 2408 struct pool_workqueue, mayday_node); 3465 struct worker_pool *pool = pw 2409 struct worker_pool *pool = pwq->pool; 3466 struct work_struct *work, *n; 2410 struct work_struct *work, *n; 3467 2411 3468 __set_current_state(TASK_RUNN 2412 __set_current_state(TASK_RUNNING); 3469 list_del_init(&pwq->mayday_no 2413 list_del_init(&pwq->mayday_node); 3470 2414 3471 raw_spin_unlock_irq(&wq_mayda !! 2415 spin_unlock_irq(&wq_mayday_lock); 3472 2416 3473 worker_attach_to_pool(rescuer !! 2417 /* migrate to the target cpu if possible */ 3474 !! 2418 worker_maybe_bind_and_lock(pool); 3475 raw_spin_lock_irq(&pool->lock !! 2419 rescuer->pool = pool; 3476 2420 3477 /* 2421 /* 3478 * Slurp in all works issued 2422 * Slurp in all works issued via this workqueue and 3479 * process'em. 2423 * process'em. 3480 */ 2424 */ 3481 WARN_ON_ONCE(!list_empty(&res 2425 WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 3482 list_for_each_entry_safe(work !! 2426 list_for_each_entry_safe(work, n, &pool->worklist, entry) 3483 if (get_work_pwq(work !! 2427 if (get_work_pwq(work) == pwq) 3484 assign_work(work, !! 2428 move_linked_works(work, scheduled, &n); 3485 pwq->stats[PW << 3486 } << 3487 2429 3488 if (!list_empty(&rescuer->sch !! 2430 process_scheduled_works(rescuer); 3489 process_scheduled_wor << 3490 << 3491 /* << 3492 * The above executio << 3493 * have created more << 3494 * pwq_activate_first << 3495 * queueing. Let's p << 3496 * that such back-to- << 3497 * being used to reli << 3498 * incur MAYDAY_INTER << 3499 */ << 3500 if (pwq->nr_active && << 3501 raw_spin_lock << 3502 /* << 3503 * Queue iff << 3504 * and somebo << 3505 */ << 3506 if (wq->rescu << 3507 get_p << 3508 list_ << 3509 } << 3510 raw_spin_unlo << 3511 } << 3512 } << 3513 2431 3514 /* 2432 /* 3515 * Put the reference grabbed 2433 * Put the reference grabbed by send_mayday(). @pool won't 3516 * go away while we're still !! 2434 * go away while we're holding its lock. 3517 */ 2435 */ 3518 put_pwq(pwq); 2436 put_pwq(pwq); 3519 2437 3520 /* 2438 /* 3521 * Leave this pool. Notify re !! 2439 * Leave this pool. If keep_working() is %true, notify a 3522 * with 0 concurrency and sta !! 2440 * regular worker; otherwise, we end up with 0 concurrency >> 2441 * and stalling the execution. 3523 */ 2442 */ 3524 kick_pool(pool); !! 2443 if (keep_working(pool)) 3525 !! 2444 wake_up_worker(pool); 3526 raw_spin_unlock_irq(&pool->lo << 3527 << 3528 worker_detach_from_pool(rescu << 3529 2445 3530 raw_spin_lock_irq(&wq_mayday_ !! 2446 rescuer->pool = NULL; >> 2447 spin_unlock(&pool->lock); >> 2448 spin_lock(&wq_mayday_lock); 3531 } 2449 } 3532 2450 3533 raw_spin_unlock_irq(&wq_mayday_lock); !! 2451 spin_unlock_irq(&wq_mayday_lock); 3534 2452 3535 if (should_stop) { 2453 if (should_stop) { 3536 __set_current_state(TASK_RUNN 2454 __set_current_state(TASK_RUNNING); 3537 set_pf_worker(false); !! 2455 rescuer->task->flags &= ~PF_WQ_WORKER; 3538 return 0; 2456 return 0; 3539 } 2457 } 3540 2458 3541 /* rescuers should never participate 2459 /* rescuers should never participate in concurrency management */ 3542 WARN_ON_ONCE(!(rescuer->flags & WORKE 2460 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 3543 schedule(); 2461 schedule(); 3544 goto repeat; 2462 goto repeat; 3545 } 2463 } 3546 2464 3547 static void bh_worker(struct worker *worker) << 3548 { << 3549 struct worker_pool *pool = worker->po << 3550 int nr_restarts = BH_WORKER_RESTARTS; << 3551 unsigned long end = jiffies + BH_WORK << 3552 << 3553 raw_spin_lock_irq(&pool->lock); << 3554 worker_leave_idle(worker); << 3555 << 3556 /* << 3557 * This function follows the structur << 3558 * explanations on each step. << 3559 */ << 3560 if (!need_more_worker(pool)) << 3561 goto done; << 3562 << 3563 WARN_ON_ONCE(!list_empty(&worker->sch << 3564 worker_clr_flags(worker, WORKER_PREP << 3565 << 3566 do { << 3567 struct work_struct *work = << 3568 list_first_entry(&poo << 3569 stru << 3570 << 3571 if (assign_work(work, worker, << 3572 process_scheduled_wor << 3573 } while (keep_working(pool) && << 3574 --nr_restarts && time_before << 3575 << 3576 worker_set_flags(worker, WORKER_PREP) << 3577 done: << 3578 worker_enter_idle(worker); << 3579 kick_pool(pool); << 3580 raw_spin_unlock_irq(&pool->lock); << 3581 } << 3582 << 3583 /* << 3584 * TODO: Convert all tasklet users to workque << 3585 * << 3586 * This is currently called from tasklet[_hi] << 3587 * whenever there are tasklets to run. Let's << 3588 * queued. Once conversion from tasklet is co << 3589 * can be dropped. << 3590 * << 3591 * After full conversion, we'll add worker->s << 3592 * softirq action and obtain the worker point << 3593 */ << 3594 void workqueue_softirq_action(bool highpri) << 3595 { << 3596 struct worker_pool *pool = << 3597 &per_cpu(bh_worker_pools, smp << 3598 if (need_more_worker(pool)) << 3599 bh_worker(list_first_entry(&p << 3600 } << 3601 << 3602 struct wq_drain_dead_softirq_work { << 3603 struct work_struct work; << 3604 struct worker_pool *pool; << 3605 struct completion done; << 3606 }; << 3607 << 3608 static void drain_dead_softirq_workfn(struct << 3609 { << 3610 struct wq_drain_dead_softirq_work *de << 3611 container_of(work, struct wq_ << 3612 struct worker_pool *pool = dead_work- << 3613 bool repeat; << 3614 << 3615 /* << 3616 * @pool's CPU is dead and we want to << 3617 * items from this BH work item which << 3618 * its CPU is dead, @pool can't be ki << 3619 * will be nested, a lockdep annotati << 3620 * @pool with %POOL_BH_DRAINING for t << 3621 */ << 3622 raw_spin_lock_irq(&pool->lock); << 3623 pool->flags |= POOL_BH_DRAINING; << 3624 raw_spin_unlock_irq(&pool->lock); << 3625 << 3626 bh_worker(list_first_entry(&pool->wor << 3627 << 3628 raw_spin_lock_irq(&pool->lock); << 3629 pool->flags &= ~POOL_BH_DRAINING; << 3630 repeat = need_more_worker(pool); << 3631 raw_spin_unlock_irq(&pool->lock); << 3632 << 3633 /* << 3634 * bh_worker() might hit consecutive << 3635 * still are pending work items, resc << 3636 * don't hog this CPU's BH. << 3637 */ << 3638 if (repeat) { << 3639 if (pool->attrs->nice == HIGH << 3640 queue_work(system_bh_ << 3641 else << 3642 queue_work(system_bh_ << 3643 } else { << 3644 complete(&dead_work->done); << 3645 } << 3646 } << 3647 << 3648 /* << 3649 * @cpu is dead. Drain the remaining BH work << 3650 * possible to allocate dead_work per CPU and << 3651 * have to worry about draining overlapping w << 3652 * nesting (one CPU's dead_work queued on ano << 3653 * on). Let's keep it simple and drain them s << 3654 * items which shouldn't be requeued on the s << 3655 */ << 3656 void workqueue_softirq_dead(unsigned int cpu) << 3657 { << 3658 int i; << 3659 << 3660 for (i = 0; i < NR_STD_WORKER_POOLS; << 3661 struct worker_pool *pool = &p << 3662 struct wq_drain_dead_softirq_ << 3663 << 3664 if (!need_more_worker(pool)) << 3665 continue; << 3666 << 3667 INIT_WORK_ONSTACK(&dead_work. << 3668 dead_work.pool = pool; << 3669 init_completion(&dead_work.do << 3670 << 3671 if (pool->attrs->nice == HIGH << 3672 queue_work(system_bh_ << 3673 else << 3674 queue_work(system_bh_ << 3675 << 3676 wait_for_completion(&dead_wor << 3677 destroy_work_on_stack(&dead_w << 3678 } << 3679 } << 3680 << 3681 /** << 3682 * check_flush_dependency - check for flush d << 3683 * @target_wq: workqueue being flushed << 3684 * @target_work: work item being flushed (NUL << 3685 * << 3686 * %current is trying to flush the whole @tar << 3687 * If @target_wq doesn't have %WQ_MEM_RECLAIM << 3688 * reclaiming memory or running on a workqueu << 3689 * %WQ_MEM_RECLAIM as that can break forward- << 3690 * a deadlock. << 3691 */ << 3692 static void check_flush_dependency(struct wor << 3693 struct wor << 3694 { << 3695 work_func_t target_func = target_work << 3696 struct worker *worker; << 3697 << 3698 if (target_wq->flags & WQ_MEM_RECLAIM << 3699 return; << 3700 << 3701 worker = current_wq_worker(); << 3702 << 3703 WARN_ONCE(current->flags & PF_MEMALLO << 3704 "workqueue: PF_MEMALLOC tas << 3705 current->pid, current->comm << 3706 WARN_ONCE(worker && ((worker->current << 3707 (WQ_MEM_RECLAIM << 3708 "workqueue: WQ_MEM_RECLAIM << 3709 worker->current_pwq->wq->na << 3710 target_wq->name, target_fun << 3711 } << 3712 << 3713 struct wq_barrier { 2465 struct wq_barrier { 3714 struct work_struct work; 2466 struct work_struct work; 3715 struct completion done; 2467 struct completion done; 3716 struct task_struct *task; /* pu << 3717 }; 2468 }; 3718 2469 3719 static void wq_barrier_func(struct work_struc 2470 static void wq_barrier_func(struct work_struct *work) 3720 { 2471 { 3721 struct wq_barrier *barr = container_o 2472 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); 3722 complete(&barr->done); 2473 complete(&barr->done); 3723 } 2474 } 3724 2475 3725 /** 2476 /** 3726 * insert_wq_barrier - insert a barrier work 2477 * insert_wq_barrier - insert a barrier work 3727 * @pwq: pwq to insert barrier into 2478 * @pwq: pwq to insert barrier into 3728 * @barr: wq_barrier to insert 2479 * @barr: wq_barrier to insert 3729 * @target: target work to attach @barr to 2480 * @target: target work to attach @barr to 3730 * @worker: worker currently executing @targe 2481 * @worker: worker currently executing @target, NULL if @target is not executing 3731 * 2482 * 3732 * @barr is linked to @target such that @barr 2483 * @barr is linked to @target such that @barr is completed only after 3733 * @target finishes execution. Please note t 2484 * @target finishes execution. Please note that the ordering 3734 * guarantee is observed only with respect to 2485 * guarantee is observed only with respect to @target and on the local 3735 * cpu. 2486 * cpu. 3736 * 2487 * 3737 * Currently, a queued barrier can't be cance 2488 * Currently, a queued barrier can't be canceled. This is because 3738 * try_to_grab_pending() can't determine whet 2489 * try_to_grab_pending() can't determine whether the work to be 3739 * grabbed is at the head of the queue and th 2490 * grabbed is at the head of the queue and thus can't clear LINKED 3740 * flag of the previous work while there must 2491 * flag of the previous work while there must be a valid next work 3741 * after a work with LINKED flag set. 2492 * after a work with LINKED flag set. 3742 * 2493 * 3743 * Note that when @worker is non-NULL, @targe 2494 * Note that when @worker is non-NULL, @target may be modified 3744 * underneath us, so we can't reliably determ 2495 * underneath us, so we can't reliably determine pwq from @target. 3745 * 2496 * 3746 * CONTEXT: 2497 * CONTEXT: 3747 * raw_spin_lock_irq(pool->lock). !! 2498 * spin_lock_irq(pool->lock). 3748 */ 2499 */ 3749 static void insert_wq_barrier(struct pool_wor 2500 static void insert_wq_barrier(struct pool_workqueue *pwq, 3750 struct wq_barri 2501 struct wq_barrier *barr, 3751 struct work_str 2502 struct work_struct *target, struct worker *worker) 3752 { 2503 { 3753 static __maybe_unused struct lock_cla << 3754 unsigned int work_flags = 0; << 3755 unsigned int work_color; << 3756 struct list_head *head; 2504 struct list_head *head; >> 2505 unsigned int linked = 0; 3757 2506 3758 /* 2507 /* 3759 * debugobject calls are safe here ev 2508 * debugobject calls are safe here even with pool->lock locked 3760 * as we know for sure that this will 2509 * as we know for sure that this will not trigger any of the 3761 * checks and call back into the fixu 2510 * checks and call back into the fixup functions where we 3762 * might deadlock. 2511 * might deadlock. 3763 * << 3764 * BH and threaded workqueues need se << 3765 * spuriously triggering "inconsisten << 3766 * usage". << 3767 */ 2512 */ 3768 INIT_WORK_ONSTACK_KEY(&barr->work, wq !! 2513 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 3769 (pwq->wq->flags << 3770 __set_bit(WORK_STRUCT_PENDING_BIT, wo 2514 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); 3771 !! 2515 init_completion(&barr->done); 3772 init_completion_map(&barr->done, &tar << 3773 << 3774 barr->task = current; << 3775 << 3776 /* The barrier work item does not par << 3777 work_flags |= WORK_STRUCT_INACTIVE; << 3778 2516 3779 /* 2517 /* 3780 * If @target is currently being exec 2518 * If @target is currently being executed, schedule the 3781 * barrier to the worker; otherwise, 2519 * barrier to the worker; otherwise, put it after @target. 3782 */ 2520 */ 3783 if (worker) { !! 2521 if (worker) 3784 head = worker->scheduled.next 2522 head = worker->scheduled.next; 3785 work_color = worker->current_ !! 2523 else { 3786 } else { << 3787 unsigned long *bits = work_da 2524 unsigned long *bits = work_data_bits(target); 3788 2525 3789 head = target->entry.next; 2526 head = target->entry.next; 3790 /* there can already be other 2527 /* there can already be other linked works, inherit and set */ 3791 work_flags |= *bits & WORK_ST !! 2528 linked = *bits & WORK_STRUCT_LINKED; 3792 work_color = get_work_color(* << 3793 __set_bit(WORK_STRUCT_LINKED_ 2529 __set_bit(WORK_STRUCT_LINKED_BIT, bits); 3794 } 2530 } 3795 2531 3796 pwq->nr_in_flight[work_color]++; !! 2532 debug_work_activate(&barr->work); 3797 work_flags |= work_color_to_flags(wor !! 2533 insert_work(pwq, &barr->work, head, 3798 !! 2534 work_color_to_flags(WORK_NO_COLOR) | linked); 3799 insert_work(pwq, &barr->work, head, w << 3800 } 2535 } 3801 2536 3802 /** 2537 /** 3803 * flush_workqueue_prep_pwqs - prepare pwqs f 2538 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing 3804 * @wq: workqueue being flushed 2539 * @wq: workqueue being flushed 3805 * @flush_color: new flush color, < 0 for no- 2540 * @flush_color: new flush color, < 0 for no-op 3806 * @work_color: new work color, < 0 for no-op 2541 * @work_color: new work color, < 0 for no-op 3807 * 2542 * 3808 * Prepare pwqs for workqueue flushing. 2543 * Prepare pwqs for workqueue flushing. 3809 * 2544 * 3810 * If @flush_color is non-negative, flush_col 2545 * If @flush_color is non-negative, flush_color on all pwqs should be 3811 * -1. If no pwq has in-flight commands at t 2546 * -1. If no pwq has in-flight commands at the specified color, all 3812 * pwq->flush_color's stay at -1 and %false i 2547 * pwq->flush_color's stay at -1 and %false is returned. If any pwq 3813 * has in flight commands, its pwq->flush_col 2548 * has in flight commands, its pwq->flush_color is set to 3814 * @flush_color, @wq->nr_pwqs_to_flush is upd 2549 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 3815 * wakeup logic is armed and %true is returne 2550 * wakeup logic is armed and %true is returned. 3816 * 2551 * 3817 * The caller should have initialized @wq->fi 2552 * The caller should have initialized @wq->first_flusher prior to 3818 * calling this function with non-negative @f 2553 * calling this function with non-negative @flush_color. If 3819 * @flush_color is negative, no flush color u 2554 * @flush_color is negative, no flush color update is done and %false 3820 * is returned. 2555 * is returned. 3821 * 2556 * 3822 * If @work_color is non-negative, all pwqs s 2557 * If @work_color is non-negative, all pwqs should have the same 3823 * work_color which is previous to @work_colo 2558 * work_color which is previous to @work_color and all will be 3824 * advanced to @work_color. 2559 * advanced to @work_color. 3825 * 2560 * 3826 * CONTEXT: 2561 * CONTEXT: 3827 * mutex_lock(wq->mutex). 2562 * mutex_lock(wq->mutex). 3828 * 2563 * 3829 * Return: !! 2564 * RETURNS: 3830 * %true if @flush_color >= 0 and there's som 2565 * %true if @flush_color >= 0 and there's something to flush. %false 3831 * otherwise. 2566 * otherwise. 3832 */ 2567 */ 3833 static bool flush_workqueue_prep_pwqs(struct 2568 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, 3834 int flu 2569 int flush_color, int work_color) 3835 { 2570 { 3836 bool wait = false; 2571 bool wait = false; 3837 struct pool_workqueue *pwq; 2572 struct pool_workqueue *pwq; 3838 2573 3839 if (flush_color >= 0) { 2574 if (flush_color >= 0) { 3840 WARN_ON_ONCE(atomic_read(&wq- 2575 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 3841 atomic_set(&wq->nr_pwqs_to_fl 2576 atomic_set(&wq->nr_pwqs_to_flush, 1); 3842 } 2577 } 3843 2578 3844 for_each_pwq(pwq, wq) { 2579 for_each_pwq(pwq, wq) { 3845 struct worker_pool *pool = pw 2580 struct worker_pool *pool = pwq->pool; 3846 2581 3847 raw_spin_lock_irq(&pool->lock !! 2582 spin_lock_irq(&pool->lock); 3848 2583 3849 if (flush_color >= 0) { 2584 if (flush_color >= 0) { 3850 WARN_ON_ONCE(pwq->flu 2585 WARN_ON_ONCE(pwq->flush_color != -1); 3851 2586 3852 if (pwq->nr_in_flight 2587 if (pwq->nr_in_flight[flush_color]) { 3853 pwq->flush_co 2588 pwq->flush_color = flush_color; 3854 atomic_inc(&w 2589 atomic_inc(&wq->nr_pwqs_to_flush); 3855 wait = true; 2590 wait = true; 3856 } 2591 } 3857 } 2592 } 3858 2593 3859 if (work_color >= 0) { 2594 if (work_color >= 0) { 3860 WARN_ON_ONCE(work_col 2595 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 3861 pwq->work_color = wor 2596 pwq->work_color = work_color; 3862 } 2597 } 3863 2598 3864 raw_spin_unlock_irq(&pool->lo !! 2599 spin_unlock_irq(&pool->lock); 3865 } 2600 } 3866 2601 3867 if (flush_color >= 0 && atomic_dec_an 2602 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 3868 complete(&wq->first_flusher-> 2603 complete(&wq->first_flusher->done); 3869 2604 3870 return wait; 2605 return wait; 3871 } 2606 } 3872 2607 3873 static void touch_wq_lockdep_map(struct workq << 3874 { << 3875 #ifdef CONFIG_LOCKDEP << 3876 if (wq->flags & WQ_BH) << 3877 local_bh_disable(); << 3878 << 3879 lock_map_acquire(&wq->lockdep_map); << 3880 lock_map_release(&wq->lockdep_map); << 3881 << 3882 if (wq->flags & WQ_BH) << 3883 local_bh_enable(); << 3884 #endif << 3885 } << 3886 << 3887 static void touch_work_lockdep_map(struct wor << 3888 struct wor << 3889 { << 3890 #ifdef CONFIG_LOCKDEP << 3891 if (wq->flags & WQ_BH) << 3892 local_bh_disable(); << 3893 << 3894 lock_map_acquire(&work->lockdep_map); << 3895 lock_map_release(&work->lockdep_map); << 3896 << 3897 if (wq->flags & WQ_BH) << 3898 local_bh_enable(); << 3899 #endif << 3900 } << 3901 << 3902 /** 2608 /** 3903 * __flush_workqueue - ensure that any schedu !! 2609 * flush_workqueue - ensure that any scheduled work has run to completion. 3904 * @wq: workqueue to flush 2610 * @wq: workqueue to flush 3905 * 2611 * 3906 * This function sleeps until all work items 2612 * This function sleeps until all work items which were queued on entry 3907 * have finished execution, but it is not liv 2613 * have finished execution, but it is not livelocked by new incoming ones. 3908 */ 2614 */ 3909 void __flush_workqueue(struct workqueue_struc !! 2615 void flush_workqueue(struct workqueue_struct *wq) 3910 { 2616 { 3911 struct wq_flusher this_flusher = { 2617 struct wq_flusher this_flusher = { 3912 .list = LIST_HEAD_INIT(this_f 2618 .list = LIST_HEAD_INIT(this_flusher.list), 3913 .flush_color = -1, 2619 .flush_color = -1, 3914 .done = COMPLETION_INITIALIZE !! 2620 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), 3915 }; 2621 }; 3916 int next_color; 2622 int next_color; 3917 2623 3918 if (WARN_ON(!wq_online)) !! 2624 lock_map_acquire(&wq->lockdep_map); 3919 return; !! 2625 lock_map_release(&wq->lockdep_map); 3920 << 3921 touch_wq_lockdep_map(wq); << 3922 2626 3923 mutex_lock(&wq->mutex); 2627 mutex_lock(&wq->mutex); 3924 2628 3925 /* 2629 /* 3926 * Start-to-wait phase 2630 * Start-to-wait phase 3927 */ 2631 */ 3928 next_color = work_next_color(wq->work 2632 next_color = work_next_color(wq->work_color); 3929 2633 3930 if (next_color != wq->flush_color) { 2634 if (next_color != wq->flush_color) { 3931 /* 2635 /* 3932 * Color space is not full. 2636 * Color space is not full. The current work_color 3933 * becomes our flush_color an 2637 * becomes our flush_color and work_color is advanced 3934 * by one. 2638 * by one. 3935 */ 2639 */ 3936 WARN_ON_ONCE(!list_empty(&wq- 2640 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 3937 this_flusher.flush_color = wq 2641 this_flusher.flush_color = wq->work_color; 3938 wq->work_color = next_color; 2642 wq->work_color = next_color; 3939 2643 3940 if (!wq->first_flusher) { 2644 if (!wq->first_flusher) { 3941 /* no flush in progre 2645 /* no flush in progress, become the first flusher */ 3942 WARN_ON_ONCE(wq->flus 2646 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3943 2647 3944 wq->first_flusher = & 2648 wq->first_flusher = &this_flusher; 3945 2649 3946 if (!flush_workqueue_ 2650 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 3947 2651 wq->work_color)) { 3948 /* nothing to 2652 /* nothing to flush, done */ 3949 wq->flush_col 2653 wq->flush_color = next_color; 3950 wq->first_flu 2654 wq->first_flusher = NULL; 3951 goto out_unlo 2655 goto out_unlock; 3952 } 2656 } 3953 } else { 2657 } else { 3954 /* wait in queue */ 2658 /* wait in queue */ 3955 WARN_ON_ONCE(wq->flus 2659 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 3956 list_add_tail(&this_f 2660 list_add_tail(&this_flusher.list, &wq->flusher_queue); 3957 flush_workqueue_prep_ 2661 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 3958 } 2662 } 3959 } else { 2663 } else { 3960 /* 2664 /* 3961 * Oops, color space is full, 2665 * Oops, color space is full, wait on overflow queue. 3962 * The next flush completion 2666 * The next flush completion will assign us 3963 * flush_color and transfer t 2667 * flush_color and transfer to flusher_queue. 3964 */ 2668 */ 3965 list_add_tail(&this_flusher.l 2669 list_add_tail(&this_flusher.list, &wq->flusher_overflow); 3966 } 2670 } 3967 2671 3968 check_flush_dependency(wq, NULL); << 3969 << 3970 mutex_unlock(&wq->mutex); 2672 mutex_unlock(&wq->mutex); 3971 2673 3972 wait_for_completion(&this_flusher.don 2674 wait_for_completion(&this_flusher.done); 3973 2675 3974 /* 2676 /* 3975 * Wake-up-and-cascade phase 2677 * Wake-up-and-cascade phase 3976 * 2678 * 3977 * First flushers are responsible for 2679 * First flushers are responsible for cascading flushes and 3978 * handling overflow. Non-first flus 2680 * handling overflow. Non-first flushers can simply return. 3979 */ 2681 */ 3980 if (READ_ONCE(wq->first_flusher) != & !! 2682 if (wq->first_flusher != &this_flusher) 3981 return; 2683 return; 3982 2684 3983 mutex_lock(&wq->mutex); 2685 mutex_lock(&wq->mutex); 3984 2686 3985 /* we might have raced, check again w 2687 /* we might have raced, check again with mutex held */ 3986 if (wq->first_flusher != &this_flushe 2688 if (wq->first_flusher != &this_flusher) 3987 goto out_unlock; 2689 goto out_unlock; 3988 2690 3989 WRITE_ONCE(wq->first_flusher, NULL); !! 2691 wq->first_flusher = NULL; 3990 2692 3991 WARN_ON_ONCE(!list_empty(&this_flushe 2693 WARN_ON_ONCE(!list_empty(&this_flusher.list)); 3992 WARN_ON_ONCE(wq->flush_color != this_ 2694 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 3993 2695 3994 while (true) { 2696 while (true) { 3995 struct wq_flusher *next, *tmp 2697 struct wq_flusher *next, *tmp; 3996 2698 3997 /* complete all the flushers 2699 /* complete all the flushers sharing the current flush color */ 3998 list_for_each_entry_safe(next 2700 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 3999 if (next->flush_color 2701 if (next->flush_color != wq->flush_color) 4000 break; 2702 break; 4001 list_del_init(&next-> 2703 list_del_init(&next->list); 4002 complete(&next->done) 2704 complete(&next->done); 4003 } 2705 } 4004 2706 4005 WARN_ON_ONCE(!list_empty(&wq- 2707 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 4006 wq->flush_color 2708 wq->flush_color != work_next_color(wq->work_color)); 4007 2709 4008 /* this flush_color is finish 2710 /* this flush_color is finished, advance by one */ 4009 wq->flush_color = work_next_c 2711 wq->flush_color = work_next_color(wq->flush_color); 4010 2712 4011 /* one color has been freed, 2713 /* one color has been freed, handle overflow queue */ 4012 if (!list_empty(&wq->flusher_ 2714 if (!list_empty(&wq->flusher_overflow)) { 4013 /* 2715 /* 4014 * Assign the same co 2716 * Assign the same color to all overflowed 4015 * flushers, advance 2717 * flushers, advance work_color and append to 4016 * flusher_queue. Th 2718 * flusher_queue. This is the start-to-wait 4017 * phase for these ov 2719 * phase for these overflowed flushers. 4018 */ 2720 */ 4019 list_for_each_entry(t 2721 list_for_each_entry(tmp, &wq->flusher_overflow, list) 4020 tmp->flush_co 2722 tmp->flush_color = wq->work_color; 4021 2723 4022 wq->work_color = work 2724 wq->work_color = work_next_color(wq->work_color); 4023 2725 4024 list_splice_tail_init 2726 list_splice_tail_init(&wq->flusher_overflow, 4025 2727 &wq->flusher_queue); 4026 flush_workqueue_prep_ 2728 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 4027 } 2729 } 4028 2730 4029 if (list_empty(&wq->flusher_q 2731 if (list_empty(&wq->flusher_queue)) { 4030 WARN_ON_ONCE(wq->flus 2732 WARN_ON_ONCE(wq->flush_color != wq->work_color); 4031 break; 2733 break; 4032 } 2734 } 4033 2735 4034 /* 2736 /* 4035 * Need to flush more colors. 2737 * Need to flush more colors. Make the next flusher 4036 * the new first flusher and 2738 * the new first flusher and arm pwqs. 4037 */ 2739 */ 4038 WARN_ON_ONCE(wq->flush_color 2740 WARN_ON_ONCE(wq->flush_color == wq->work_color); 4039 WARN_ON_ONCE(wq->flush_color 2741 WARN_ON_ONCE(wq->flush_color != next->flush_color); 4040 2742 4041 list_del_init(&next->list); 2743 list_del_init(&next->list); 4042 wq->first_flusher = next; 2744 wq->first_flusher = next; 4043 2745 4044 if (flush_workqueue_prep_pwqs 2746 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 4045 break; 2747 break; 4046 2748 4047 /* 2749 /* 4048 * Meh... this color is alrea 2750 * Meh... this color is already done, clear first 4049 * flusher and repeat cascadi 2751 * flusher and repeat cascading. 4050 */ 2752 */ 4051 wq->first_flusher = NULL; 2753 wq->first_flusher = NULL; 4052 } 2754 } 4053 2755 4054 out_unlock: 2756 out_unlock: 4055 mutex_unlock(&wq->mutex); 2757 mutex_unlock(&wq->mutex); 4056 } 2758 } 4057 EXPORT_SYMBOL(__flush_workqueue); !! 2759 EXPORT_SYMBOL_GPL(flush_workqueue); 4058 2760 4059 /** 2761 /** 4060 * drain_workqueue - drain a workqueue 2762 * drain_workqueue - drain a workqueue 4061 * @wq: workqueue to drain 2763 * @wq: workqueue to drain 4062 * 2764 * 4063 * Wait until the workqueue becomes empty. W 2765 * Wait until the workqueue becomes empty. While draining is in progress, 4064 * only chain queueing is allowed. IOW, only 2766 * only chain queueing is allowed. IOW, only currently pending or running 4065 * work items on @wq can queue further work i 2767 * work items on @wq can queue further work items on it. @wq is flushed 4066 * repeatedly until it becomes empty. The nu !! 2768 * repeatedly until it becomes empty. The number of flushing is detemined 4067 * by the depth of chaining and should be rel 2769 * by the depth of chaining and should be relatively short. Whine if it 4068 * takes too long. 2770 * takes too long. 4069 */ 2771 */ 4070 void drain_workqueue(struct workqueue_struct 2772 void drain_workqueue(struct workqueue_struct *wq) 4071 { 2773 { 4072 unsigned int flush_cnt = 0; 2774 unsigned int flush_cnt = 0; 4073 struct pool_workqueue *pwq; 2775 struct pool_workqueue *pwq; 4074 2776 4075 /* 2777 /* 4076 * __queue_work() needs to test wheth 2778 * __queue_work() needs to test whether there are drainers, is much 4077 * hotter than drain_workqueue() and 2779 * hotter than drain_workqueue() and already looks at @wq->flags. 4078 * Use __WQ_DRAINING so that queue do 2780 * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers. 4079 */ 2781 */ 4080 mutex_lock(&wq->mutex); 2782 mutex_lock(&wq->mutex); 4081 if (!wq->nr_drainers++) 2783 if (!wq->nr_drainers++) 4082 wq->flags |= __WQ_DRAINING; 2784 wq->flags |= __WQ_DRAINING; 4083 mutex_unlock(&wq->mutex); 2785 mutex_unlock(&wq->mutex); 4084 reflush: 2786 reflush: 4085 __flush_workqueue(wq); !! 2787 flush_workqueue(wq); 4086 2788 4087 mutex_lock(&wq->mutex); 2789 mutex_lock(&wq->mutex); 4088 2790 4089 for_each_pwq(pwq, wq) { 2791 for_each_pwq(pwq, wq) { 4090 bool drained; 2792 bool drained; 4091 2793 4092 raw_spin_lock_irq(&pwq->pool- !! 2794 spin_lock_irq(&pwq->pool->lock); 4093 drained = pwq_is_empty(pwq); !! 2795 drained = !pwq->nr_active && list_empty(&pwq->delayed_works); 4094 raw_spin_unlock_irq(&pwq->poo !! 2796 spin_unlock_irq(&pwq->pool->lock); 4095 2797 4096 if (drained) 2798 if (drained) 4097 continue; 2799 continue; 4098 2800 4099 if (++flush_cnt == 10 || 2801 if (++flush_cnt == 10 || 4100 (flush_cnt % 100 == 0 && 2802 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 4101 pr_warn("workqueue %s !! 2803 pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n", 4102 wq->name, __f !! 2804 wq->name, flush_cnt); 4103 2805 4104 mutex_unlock(&wq->mutex); 2806 mutex_unlock(&wq->mutex); 4105 goto reflush; 2807 goto reflush; 4106 } 2808 } 4107 2809 4108 if (!--wq->nr_drainers) 2810 if (!--wq->nr_drainers) 4109 wq->flags &= ~__WQ_DRAINING; 2811 wq->flags &= ~__WQ_DRAINING; 4110 mutex_unlock(&wq->mutex); 2812 mutex_unlock(&wq->mutex); 4111 } 2813 } 4112 EXPORT_SYMBOL_GPL(drain_workqueue); 2814 EXPORT_SYMBOL_GPL(drain_workqueue); 4113 2815 4114 static bool start_flush_work(struct work_stru !! 2816 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) 4115 bool from_cancel << 4116 { 2817 { 4117 struct worker *worker = NULL; 2818 struct worker *worker = NULL; 4118 struct worker_pool *pool; 2819 struct worker_pool *pool; 4119 struct pool_workqueue *pwq; 2820 struct pool_workqueue *pwq; 4120 struct workqueue_struct *wq; << 4121 2821 4122 rcu_read_lock(); !! 2822 might_sleep(); >> 2823 >> 2824 local_irq_disable(); 4123 pool = get_work_pool(work); 2825 pool = get_work_pool(work); 4124 if (!pool) { 2826 if (!pool) { 4125 rcu_read_unlock(); !! 2827 local_irq_enable(); 4126 return false; 2828 return false; 4127 } 2829 } 4128 2830 4129 raw_spin_lock_irq(&pool->lock); !! 2831 spin_lock(&pool->lock); 4130 /* see the comment in try_to_grab_pen 2832 /* see the comment in try_to_grab_pending() with the same code */ 4131 pwq = get_work_pwq(work); 2833 pwq = get_work_pwq(work); 4132 if (pwq) { 2834 if (pwq) { 4133 if (unlikely(pwq->pool != poo 2835 if (unlikely(pwq->pool != pool)) 4134 goto already_gone; 2836 goto already_gone; 4135 } else { 2837 } else { 4136 worker = find_worker_executin 2838 worker = find_worker_executing_work(pool, work); 4137 if (!worker) 2839 if (!worker) 4138 goto already_gone; 2840 goto already_gone; 4139 pwq = worker->current_pwq; 2841 pwq = worker->current_pwq; 4140 } 2842 } 4141 2843 4142 wq = pwq->wq; << 4143 check_flush_dependency(wq, work); << 4144 << 4145 insert_wq_barrier(pwq, barr, work, wo 2844 insert_wq_barrier(pwq, barr, work, worker); 4146 raw_spin_unlock_irq(&pool->lock); !! 2845 spin_unlock_irq(&pool->lock); 4147 << 4148 touch_work_lockdep_map(work, wq); << 4149 2846 4150 /* 2847 /* 4151 * Force a lock recursion deadlock wh !! 2848 * If @max_active is 1 or rescuer is in use, flushing another work 4152 * single-threaded or rescuer equippe !! 2849 * item on the same workqueue may lead to deadlock. Make sure the 4153 * !! 2850 * flusher is not running on the same workqueue by verifying write 4154 * For single threaded workqueues the !! 2851 * access. 4155 * is after the work issuing the flus << 4156 * workqueues the deadlock happens wh << 4157 * forward progress. << 4158 */ 2852 */ 4159 if (!from_cancel && (wq->saved_max_ac !! 2853 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) 4160 touch_wq_lockdep_map(wq); !! 2854 lock_map_acquire(&pwq->wq->lockdep_map); >> 2855 else >> 2856 lock_map_acquire_read(&pwq->wq->lockdep_map); >> 2857 lock_map_release(&pwq->wq->lockdep_map); 4161 2858 4162 rcu_read_unlock(); << 4163 return true; 2859 return true; 4164 already_gone: 2860 already_gone: 4165 raw_spin_unlock_irq(&pool->lock); !! 2861 spin_unlock_irq(&pool->lock); 4166 rcu_read_unlock(); << 4167 return false; 2862 return false; 4168 } 2863 } 4169 2864 4170 static bool __flush_work(struct work_struct * << 4171 { << 4172 struct wq_barrier barr; << 4173 << 4174 if (WARN_ON(!wq_online)) << 4175 return false; << 4176 << 4177 if (WARN_ON(!work->func)) << 4178 return false; << 4179 << 4180 if (!start_flush_work(work, &barr, fr << 4181 return false; << 4182 << 4183 /* << 4184 * start_flush_work() returned %true. << 4185 * that @work must have been executin << 4186 * can't currently be queued. Its dat << 4187 * was queued on a BH workqueue, we a << 4188 * BH context and thus can be busy-wa << 4189 */ << 4190 if (from_cancel) { << 4191 unsigned long data = *work_da << 4192 << 4193 if (!WARN_ON_ONCE(data & WORK << 4194 (data & WORK_OFFQ_BH)) { << 4195 /* << 4196 * On RT, prevent a l << 4197 * soft interrupt pro << 4198 * running by keeping << 4199 * runs on a differen << 4200 * than doing the BH << 4201 * This is copied fro << 4202 * kernel/softirq.c:: << 4203 */ << 4204 while (!try_wait_for_ << 4205 if (IS_ENABLE << 4206 local << 4207 local << 4208 } else { << 4209 cpu_r << 4210 } << 4211 } << 4212 goto out_destroy; << 4213 } << 4214 } << 4215 << 4216 wait_for_completion(&barr.done); << 4217 << 4218 out_destroy: << 4219 destroy_work_on_stack(&barr.work); << 4220 return true; << 4221 } << 4222 << 4223 /** 2865 /** 4224 * flush_work - wait for a work to finish exe 2866 * flush_work - wait for a work to finish executing the last queueing instance 4225 * @work: the work to flush 2867 * @work: the work to flush 4226 * 2868 * 4227 * Wait until @work has finished execution. 2869 * Wait until @work has finished execution. @work is guaranteed to be idle 4228 * on return if it hasn't been requeued since 2870 * on return if it hasn't been requeued since flush started. 4229 * 2871 * 4230 * Return: !! 2872 * RETURNS: 4231 * %true if flush_work() waited for the work 2873 * %true if flush_work() waited for the work to finish execution, 4232 * %false if it was already idle. 2874 * %false if it was already idle. 4233 */ 2875 */ 4234 bool flush_work(struct work_struct *work) 2876 bool flush_work(struct work_struct *work) 4235 { 2877 { 4236 might_sleep(); !! 2878 struct wq_barrier barr; 4237 return __flush_work(work, false); << 4238 } << 4239 EXPORT_SYMBOL_GPL(flush_work); << 4240 2879 4241 /** !! 2880 lock_map_acquire(&work->lockdep_map); 4242 * flush_delayed_work - wait for a dwork to f !! 2881 lock_map_release(&work->lockdep_map); 4243 * @dwork: the delayed work to flush << 4244 * << 4245 * Delayed timer is cancelled and the pending << 4246 * immediate execution. Like flush_work(), t << 4247 * considers the last queueing instance of @d << 4248 * << 4249 * Return: << 4250 * %true if flush_work() waited for the work << 4251 * %false if it was already idle. << 4252 */ << 4253 bool flush_delayed_work(struct delayed_work * << 4254 { << 4255 local_irq_disable(); << 4256 if (del_timer_sync(&dwork->timer)) << 4257 __queue_work(dwork->cpu, dwor << 4258 local_irq_enable(); << 4259 return flush_work(&dwork->work); << 4260 } << 4261 EXPORT_SYMBOL(flush_delayed_work); << 4262 2882 4263 /** !! 2883 if (start_flush_work(work, &barr)) { 4264 * flush_rcu_work - wait for a rwork to finis !! 2884 wait_for_completion(&barr.done); 4265 * @rwork: the rcu work to flush !! 2885 destroy_work_on_stack(&barr.work); 4266 * << 4267 * Return: << 4268 * %true if flush_rcu_work() waited for the w << 4269 * %false if it was already idle. << 4270 */ << 4271 bool flush_rcu_work(struct rcu_work *rwork) << 4272 { << 4273 if (test_bit(WORK_STRUCT_PENDING_BIT, << 4274 rcu_barrier(); << 4275 flush_work(&rwork->work); << 4276 return true; 2886 return true; 4277 } else { 2887 } else { 4278 return flush_work(&rwork->wor !! 2888 return false; 4279 } 2889 } 4280 } 2890 } 4281 EXPORT_SYMBOL(flush_rcu_work); !! 2891 EXPORT_SYMBOL_GPL(flush_work); 4282 << 4283 static void work_offqd_disable(struct work_of << 4284 { << 4285 const unsigned long max = (1lu << WOR << 4286 2892 4287 if (likely(offqd->disable < max)) !! 2893 struct cwt_wait { 4288 offqd->disable++; !! 2894 wait_queue_t wait; 4289 else !! 2895 struct work_struct *work; 4290 WARN_ONCE(true, "workqueue: w !! 2896 }; 4291 } << 4292 2897 4293 static void work_offqd_enable(struct work_off !! 2898 static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) 4294 { 2899 { 4295 if (likely(offqd->disable > 0)) !! 2900 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); 4296 offqd->disable--; !! 2901 4297 else !! 2902 if (cwait->work != key) 4298 WARN_ONCE(true, "workqueue: w !! 2903 return 0; >> 2904 return autoremove_wake_function(wait, mode, sync, key); 4299 } 2905 } 4300 2906 4301 static bool __cancel_work(struct work_struct !! 2907 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) 4302 { 2908 { 4303 struct work_offq_data offqd; !! 2909 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq); 4304 unsigned long irq_flags; !! 2910 unsigned long flags; 4305 int ret; 2911 int ret; 4306 2912 4307 ret = work_grab_pending(work, cflags, !! 2913 do { 4308 !! 2914 ret = try_to_grab_pending(work, is_dwork, &flags); 4309 work_offqd_unpack(&offqd, *work_data_ !! 2915 /* 4310 !! 2916 * If someone else is already canceling, wait for it to 4311 if (cflags & WORK_CANCEL_DISABLE) !! 2917 * finish. flush_work() doesn't work for PREEMPT_NONE 4312 work_offqd_disable(&offqd); !! 2918 * because we may get scheduled between @work's completion >> 2919 * and the other canceling task resuming and clearing >> 2920 * CANCELING - flush_work() will return false immediately >> 2921 * as @work is no longer busy, try_to_grab_pending() will >> 2922 * return -ENOENT as @work is still being canceled and the >> 2923 * other canceling task won't be able to clear CANCELING as >> 2924 * we're hogging the CPU. >> 2925 * >> 2926 * Let's wait for completion using a waitqueue. As this >> 2927 * may lead to the thundering herd problem, use a custom >> 2928 * wake function which matches @work along with exclusive >> 2929 * wait and wakeup. >> 2930 */ >> 2931 if (unlikely(ret == -ENOENT)) { >> 2932 struct cwt_wait cwait; 4313 2933 4314 set_work_pool_and_clear_pending(work, !! 2934 init_wait(&cwait.wait); 4315 work_ !! 2935 cwait.wait.func = cwt_wakefn; 4316 local_irq_restore(irq_flags); !! 2936 cwait.work = work; 4317 return ret; << 4318 } << 4319 2937 4320 static bool __cancel_work_sync(struct work_st !! 2938 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, 4321 { !! 2939 TASK_UNINTERRUPTIBLE); 4322 bool ret; !! 2940 if (work_is_canceling(work)) >> 2941 schedule(); >> 2942 finish_wait(&cancel_waitq, &cwait.wait); >> 2943 } >> 2944 } while (unlikely(ret < 0)); 4323 2945 4324 ret = __cancel_work(work, cflags | WO !! 2946 /* tell other tasks trying to grab @work to back off */ >> 2947 mark_work_canceling(work); >> 2948 local_irq_restore(flags); 4325 2949 4326 if (*work_data_bits(work) & WORK_OFFQ !! 2950 flush_work(work); 4327 WARN_ON_ONCE(in_hardirq()); !! 2951 clear_work_data(work); 4328 else << 4329 might_sleep(); << 4330 2952 4331 /* 2953 /* 4332 * Skip __flush_work() during early b !! 2954 * Paired with prepare_to_wait() above so that either 4333 * executing. This allows canceling d !! 2955 * waitqueue_active() is visible here or !work_is_canceling() is >> 2956 * visible there. 4334 */ 2957 */ 4335 if (wq_online) !! 2958 smp_mb(); 4336 __flush_work(work, true); !! 2959 if (waitqueue_active(&cancel_waitq)) 4337 !! 2960 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); 4338 if (!(cflags & WORK_CANCEL_DISABLE)) << 4339 enable_work(work); << 4340 2961 4341 return ret; 2962 return ret; 4342 } 2963 } 4343 2964 4344 /* << 4345 * See cancel_delayed_work() << 4346 */ << 4347 bool cancel_work(struct work_struct *work) << 4348 { << 4349 return __cancel_work(work, 0); << 4350 } << 4351 EXPORT_SYMBOL(cancel_work); << 4352 << 4353 /** 2965 /** 4354 * cancel_work_sync - cancel a work and wait 2966 * cancel_work_sync - cancel a work and wait for it to finish 4355 * @work: the work to cancel 2967 * @work: the work to cancel 4356 * 2968 * 4357 * Cancel @work and wait for its execution to !! 2969 * Cancel @work and wait for its execution to finish. This function 4358 * even if the work re-queues itself or migra !! 2970 * can be used even if the work re-queues itself or migrates to 4359 * from this function, @work is guaranteed to !! 2971 * another workqueue. On return from this function, @work is 4360 * CPU as long as there aren't racing enqueue !! 2972 * guaranteed to be not pending or executing on any CPU. 4361 * !! 2973 * 4362 * cancel_work_sync(&delayed_work->work) must !! 2974 * cancel_work_sync(&delayed_work->work) must not be used for 4363 * Use cancel_delayed_work_sync() instead. !! 2975 * delayed_work's. Use cancel_delayed_work_sync() instead. 4364 * << 4365 * Must be called from a sleepable context if << 4366 * workqueue. Can also be called from non-har << 4367 * if @work was last queued on a BH workqueue << 4368 * 2976 * 4369 * Returns %true if @work was pending, %false !! 2977 * The caller must ensure that the workqueue on which @work was last >> 2978 * queued can't be destroyed before this function returns. >> 2979 * >> 2980 * RETURNS: >> 2981 * %true if @work was pending, %false otherwise. 4370 */ 2982 */ 4371 bool cancel_work_sync(struct work_struct *wor 2983 bool cancel_work_sync(struct work_struct *work) 4372 { 2984 { 4373 return __cancel_work_sync(work, 0); !! 2985 return __cancel_work_timer(work, false); 4374 } 2986 } 4375 EXPORT_SYMBOL_GPL(cancel_work_sync); 2987 EXPORT_SYMBOL_GPL(cancel_work_sync); 4376 2988 4377 /** 2989 /** 4378 * cancel_delayed_work - cancel a delayed wor !! 2990 * flush_delayed_work - wait for a dwork to finish executing the last queueing 4379 * @dwork: delayed_work to cancel !! 2991 * @dwork: the delayed work to flush 4380 * 2992 * 4381 * Kill off a pending delayed_work. !! 2993 * Delayed timer is cancelled and the pending work is queued for >> 2994 * immediate execution. Like flush_work(), this function only >> 2995 * considers the last queueing instance of @dwork. 4382 * 2996 * 4383 * Return: %true if @dwork was pending and ca !! 2997 * RETURNS: 4384 * pending. !! 2998 * %true if flush_work() waited for the work to finish execution, >> 2999 * %false if it was already idle. >> 3000 */ >> 3001 bool flush_delayed_work(struct delayed_work *dwork) >> 3002 { >> 3003 local_irq_disable(); >> 3004 if (del_timer_sync(&dwork->timer)) >> 3005 __queue_work(dwork->cpu, dwork->wq, &dwork->work); >> 3006 local_irq_enable(); >> 3007 return flush_work(&dwork->work); >> 3008 } >> 3009 EXPORT_SYMBOL(flush_delayed_work); >> 3010 >> 3011 /** >> 3012 * cancel_delayed_work - cancel a delayed work >> 3013 * @dwork: delayed_work to cancel 4385 * 3014 * 4386 * Note: !! 3015 * Kill off a pending delayed_work. Returns %true if @dwork was pending 4387 * The work callback function may still be ru !! 3016 * and canceled; %false if wasn't pending. Note that the work callback 4388 * it returns %true and the work doesn't re-a !! 3017 * function may still be running on return, unless it returns %true and the 4389 * use cancel_delayed_work_sync() to wait on !! 3018 * work doesn't re-arm itself. Explicitly flush or use >> 3019 * cancel_delayed_work_sync() to wait on it. 4390 * 3020 * 4391 * This function is safe to call from any con 3021 * This function is safe to call from any context including IRQ handler. 4392 */ 3022 */ 4393 bool cancel_delayed_work(struct delayed_work 3023 bool cancel_delayed_work(struct delayed_work *dwork) 4394 { 3024 { 4395 return __cancel_work(&dwork->work, WO !! 3025 unsigned long flags; >> 3026 int ret; >> 3027 >> 3028 do { >> 3029 ret = try_to_grab_pending(&dwork->work, true, &flags); >> 3030 } while (unlikely(ret == -EAGAIN)); >> 3031 >> 3032 if (unlikely(ret < 0)) >> 3033 return false; >> 3034 >> 3035 set_work_pool_and_clear_pending(&dwork->work, >> 3036 get_work_pool_id(&dwork->work)); >> 3037 local_irq_restore(flags); >> 3038 return ret; 4396 } 3039 } 4397 EXPORT_SYMBOL(cancel_delayed_work); 3040 EXPORT_SYMBOL(cancel_delayed_work); 4398 3041 4399 /** 3042 /** 4400 * cancel_delayed_work_sync - cancel a delaye 3043 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 4401 * @dwork: the delayed work cancel 3044 * @dwork: the delayed work cancel 4402 * 3045 * 4403 * This is cancel_work_sync() for delayed wor 3046 * This is cancel_work_sync() for delayed works. 4404 * 3047 * 4405 * Return: !! 3048 * RETURNS: 4406 * %true if @dwork was pending, %false otherw 3049 * %true if @dwork was pending, %false otherwise. 4407 */ 3050 */ 4408 bool cancel_delayed_work_sync(struct delayed_ 3051 bool cancel_delayed_work_sync(struct delayed_work *dwork) 4409 { 3052 { 4410 return __cancel_work_sync(&dwork->wor !! 3053 return __cancel_work_timer(&dwork->work, true); 4411 } 3054 } 4412 EXPORT_SYMBOL(cancel_delayed_work_sync); 3055 EXPORT_SYMBOL(cancel_delayed_work_sync); 4413 3056 4414 /** 3057 /** 4415 * disable_work - Disable and cancel a work i << 4416 * @work: work item to disable << 4417 * << 4418 * Disable @work by incrementing its disable << 4419 * pending. As long as the disable count is n << 4420 * will fail and return %false. The maximum s << 4421 * power of %WORK_OFFQ_DISABLE_BITS, currentl << 4422 * << 4423 * Can be called from any context. Returns %t << 4424 * otherwise. << 4425 */ << 4426 bool disable_work(struct work_struct *work) << 4427 { << 4428 return __cancel_work(work, WORK_CANCE << 4429 } << 4430 EXPORT_SYMBOL_GPL(disable_work); << 4431 << 4432 /** << 4433 * disable_work_sync - Disable, cancel and dr << 4434 * @work: work item to disable << 4435 * << 4436 * Similar to disable_work() but also wait fo << 4437 * executing. << 4438 * << 4439 * Must be called from a sleepable context if << 4440 * workqueue. Can also be called from non-har << 4441 * if @work was last queued on a BH workqueue << 4442 * << 4443 * Returns %true if @work was pending, %false << 4444 */ << 4445 bool disable_work_sync(struct work_struct *wo << 4446 { << 4447 return __cancel_work_sync(work, WORK_ << 4448 } << 4449 EXPORT_SYMBOL_GPL(disable_work_sync); << 4450 << 4451 /** << 4452 * enable_work - Enable a work item << 4453 * @work: work item to enable << 4454 * << 4455 * Undo disable_work[_sync]() by decrementing << 4456 * only be queued if its disable count is 0. << 4457 * << 4458 * Can be called from any context. Returns %t << 4459 * Otherwise, %false. << 4460 */ << 4461 bool enable_work(struct work_struct *work) << 4462 { << 4463 struct work_offq_data offqd; << 4464 unsigned long irq_flags; << 4465 << 4466 work_grab_pending(work, 0, &irq_flags << 4467 << 4468 work_offqd_unpack(&offqd, *work_data_ << 4469 work_offqd_enable(&offqd); << 4470 set_work_pool_and_clear_pending(work, << 4471 work_ << 4472 local_irq_restore(irq_flags); << 4473 << 4474 return !offqd.disable; << 4475 } << 4476 EXPORT_SYMBOL_GPL(enable_work); << 4477 << 4478 /** << 4479 * disable_delayed_work - Disable and cancel << 4480 * @dwork: delayed work item to disable << 4481 * << 4482 * disable_work() for delayed work items. << 4483 */ << 4484 bool disable_delayed_work(struct delayed_work << 4485 { << 4486 return __cancel_work(&dwork->work, << 4487 WORK_CANCEL_DELA << 4488 } << 4489 EXPORT_SYMBOL_GPL(disable_delayed_work); << 4490 << 4491 /** << 4492 * disable_delayed_work_sync - Disable, cance << 4493 * @dwork: delayed work item to disable << 4494 * << 4495 * disable_work_sync() for delayed work items << 4496 */ << 4497 bool disable_delayed_work_sync(struct delayed << 4498 { << 4499 return __cancel_work_sync(&dwork->wor << 4500 WORK_CANCEL << 4501 } << 4502 EXPORT_SYMBOL_GPL(disable_delayed_work_sync); << 4503 << 4504 /** << 4505 * enable_delayed_work - Enable a delayed wor << 4506 * @dwork: delayed work item to enable << 4507 * << 4508 * enable_work() for delayed work items. << 4509 */ << 4510 bool enable_delayed_work(struct delayed_work << 4511 { << 4512 return enable_work(&dwork->work); << 4513 } << 4514 EXPORT_SYMBOL_GPL(enable_delayed_work); << 4515 << 4516 /** << 4517 * schedule_on_each_cpu - execute a function 3058 * schedule_on_each_cpu - execute a function synchronously on each online CPU 4518 * @func: the function to call 3059 * @func: the function to call 4519 * 3060 * 4520 * schedule_on_each_cpu() executes @func on e 3061 * schedule_on_each_cpu() executes @func on each online CPU using the 4521 * system workqueue and blocks until all CPUs 3062 * system workqueue and blocks until all CPUs have completed. 4522 * schedule_on_each_cpu() is very slow. 3063 * schedule_on_each_cpu() is very slow. 4523 * 3064 * 4524 * Return: !! 3065 * RETURNS: 4525 * 0 on success, -errno on failure. 3066 * 0 on success, -errno on failure. 4526 */ 3067 */ 4527 int schedule_on_each_cpu(work_func_t func) 3068 int schedule_on_each_cpu(work_func_t func) 4528 { 3069 { 4529 int cpu; 3070 int cpu; 4530 struct work_struct __percpu *works; 3071 struct work_struct __percpu *works; 4531 3072 4532 works = alloc_percpu(struct work_stru 3073 works = alloc_percpu(struct work_struct); 4533 if (!works) 3074 if (!works) 4534 return -ENOMEM; 3075 return -ENOMEM; 4535 3076 4536 cpus_read_lock(); !! 3077 get_online_cpus(); 4537 3078 4538 for_each_online_cpu(cpu) { 3079 for_each_online_cpu(cpu) { 4539 struct work_struct *work = pe 3080 struct work_struct *work = per_cpu_ptr(works, cpu); 4540 3081 4541 INIT_WORK(work, func); 3082 INIT_WORK(work, func); 4542 schedule_work_on(cpu, work); 3083 schedule_work_on(cpu, work); 4543 } 3084 } 4544 3085 4545 for_each_online_cpu(cpu) 3086 for_each_online_cpu(cpu) 4546 flush_work(per_cpu_ptr(works, 3087 flush_work(per_cpu_ptr(works, cpu)); 4547 3088 4548 cpus_read_unlock(); !! 3089 put_online_cpus(); 4549 free_percpu(works); 3090 free_percpu(works); 4550 return 0; 3091 return 0; 4551 } 3092 } 4552 3093 4553 /** 3094 /** >> 3095 * flush_scheduled_work - ensure that any scheduled work has run to completion. >> 3096 * >> 3097 * Forces execution of the kernel-global workqueue and blocks until its >> 3098 * completion. >> 3099 * >> 3100 * Think twice before calling this function! It's very easy to get into >> 3101 * trouble if you don't take great care. Either of the following situations >> 3102 * will lead to deadlock: >> 3103 * >> 3104 * One of the work items currently on the workqueue needs to acquire >> 3105 * a lock held by your code or its caller. >> 3106 * >> 3107 * Your code is running in the context of a work routine. >> 3108 * >> 3109 * They will be detected by lockdep when they occur, but the first might not >> 3110 * occur very often. It depends on what work items are on the workqueue and >> 3111 * what locks they need, which you have no control over. >> 3112 * >> 3113 * In most situations flushing the entire workqueue is overkill; you merely >> 3114 * need to know that a particular work item isn't queued and isn't running. >> 3115 * In such cases you should use cancel_delayed_work_sync() or >> 3116 * cancel_work_sync() instead. >> 3117 */ >> 3118 void flush_scheduled_work(void) >> 3119 { >> 3120 flush_workqueue(system_wq); >> 3121 } >> 3122 EXPORT_SYMBOL(flush_scheduled_work); >> 3123 >> 3124 /** 4554 * execute_in_process_context - reliably exec 3125 * execute_in_process_context - reliably execute the routine with user context 4555 * @fn: the function to execute 3126 * @fn: the function to execute 4556 * @ew: guaranteed storage for the ex 3127 * @ew: guaranteed storage for the execute work structure (must 4557 * be available when the work ex 3128 * be available when the work executes) 4558 * 3129 * 4559 * Executes the function immediately if proce 3130 * Executes the function immediately if process context is available, 4560 * otherwise schedules the function for delay 3131 * otherwise schedules the function for delayed execution. 4561 * 3132 * 4562 * Return: 0 - function was executed !! 3133 * Returns: 0 - function was executed 4563 * 1 - function was scheduled fo 3134 * 1 - function was scheduled for execution 4564 */ 3135 */ 4565 int execute_in_process_context(work_func_t fn 3136 int execute_in_process_context(work_func_t fn, struct execute_work *ew) 4566 { 3137 { 4567 if (!in_interrupt()) { 3138 if (!in_interrupt()) { 4568 fn(&ew->work); 3139 fn(&ew->work); 4569 return 0; 3140 return 0; 4570 } 3141 } 4571 3142 4572 INIT_WORK(&ew->work, fn); 3143 INIT_WORK(&ew->work, fn); 4573 schedule_work(&ew->work); 3144 schedule_work(&ew->work); 4574 3145 4575 return 1; 3146 return 1; 4576 } 3147 } 4577 EXPORT_SYMBOL_GPL(execute_in_process_context) 3148 EXPORT_SYMBOL_GPL(execute_in_process_context); 4578 3149 >> 3150 #ifdef CONFIG_SYSFS >> 3151 /* >> 3152 * Workqueues with WQ_SYSFS flag set is visible to userland via >> 3153 * /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the >> 3154 * following attributes. >> 3155 * >> 3156 * per_cpu RO bool : whether the workqueue is per-cpu or unbound >> 3157 * max_active RW int : maximum number of in-flight work items >> 3158 * >> 3159 * Unbound workqueues have the following extra attributes. >> 3160 * >> 3161 * id RO int : the associated pool ID >> 3162 * nice RW int : nice value of the workers >> 3163 * cpumask RW mask : bitmask of allowed CPUs for the workers >> 3164 */ >> 3165 struct wq_device { >> 3166 struct workqueue_struct *wq; >> 3167 struct device dev; >> 3168 }; >> 3169 >> 3170 static struct workqueue_struct *dev_to_wq(struct device *dev) >> 3171 { >> 3172 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); >> 3173 >> 3174 return wq_dev->wq; >> 3175 } >> 3176 >> 3177 static ssize_t wq_per_cpu_show(struct device *dev, >> 3178 struct device_attribute *attr, char *buf) >> 3179 { >> 3180 struct workqueue_struct *wq = dev_to_wq(dev); >> 3181 >> 3182 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); >> 3183 } >> 3184 >> 3185 static ssize_t wq_max_active_show(struct device *dev, >> 3186 struct device_attribute *attr, char *buf) >> 3187 { >> 3188 struct workqueue_struct *wq = dev_to_wq(dev); >> 3189 >> 3190 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); >> 3191 } >> 3192 >> 3193 static ssize_t wq_max_active_store(struct device *dev, >> 3194 struct device_attribute *attr, >> 3195 const char *buf, size_t count) >> 3196 { >> 3197 struct workqueue_struct *wq = dev_to_wq(dev); >> 3198 int val; >> 3199 >> 3200 if (sscanf(buf, "%d", &val) != 1 || val <= 0) >> 3201 return -EINVAL; >> 3202 >> 3203 workqueue_set_max_active(wq, val); >> 3204 return count; >> 3205 } >> 3206 >> 3207 static struct device_attribute wq_sysfs_attrs[] = { >> 3208 __ATTR(per_cpu, 0444, wq_per_cpu_show, NULL), >> 3209 __ATTR(max_active, 0644, wq_max_active_show, wq_max_active_store), >> 3210 __ATTR_NULL, >> 3211 }; >> 3212 >> 3213 static ssize_t wq_pool_ids_show(struct device *dev, >> 3214 struct device_attribute *attr, char *buf) >> 3215 { >> 3216 struct workqueue_struct *wq = dev_to_wq(dev); >> 3217 const char *delim = ""; >> 3218 int node, written = 0; >> 3219 >> 3220 rcu_read_lock_sched(); >> 3221 for_each_node(node) { >> 3222 written += scnprintf(buf + written, PAGE_SIZE - written, >> 3223 "%s%d:%d", delim, node, >> 3224 unbound_pwq_by_node(wq, node)->pool->id); >> 3225 delim = " "; >> 3226 } >> 3227 written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); >> 3228 rcu_read_unlock_sched(); >> 3229 >> 3230 return written; >> 3231 } >> 3232 >> 3233 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr, >> 3234 char *buf) >> 3235 { >> 3236 struct workqueue_struct *wq = dev_to_wq(dev); >> 3237 int written; >> 3238 >> 3239 mutex_lock(&wq->mutex); >> 3240 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); >> 3241 mutex_unlock(&wq->mutex); >> 3242 >> 3243 return written; >> 3244 } >> 3245 >> 3246 /* prepare workqueue_attrs for sysfs store operations */ >> 3247 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) >> 3248 { >> 3249 struct workqueue_attrs *attrs; >> 3250 >> 3251 attrs = alloc_workqueue_attrs(GFP_KERNEL); >> 3252 if (!attrs) >> 3253 return NULL; >> 3254 >> 3255 mutex_lock(&wq->mutex); >> 3256 copy_workqueue_attrs(attrs, wq->unbound_attrs); >> 3257 mutex_unlock(&wq->mutex); >> 3258 return attrs; >> 3259 } >> 3260 >> 3261 static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr, >> 3262 const char *buf, size_t count) >> 3263 { >> 3264 struct workqueue_struct *wq = dev_to_wq(dev); >> 3265 struct workqueue_attrs *attrs; >> 3266 int ret; >> 3267 >> 3268 attrs = wq_sysfs_prep_attrs(wq); >> 3269 if (!attrs) >> 3270 return -ENOMEM; >> 3271 >> 3272 if (sscanf(buf, "%d", &attrs->nice) == 1 && >> 3273 attrs->nice >= -20 && attrs->nice <= 19) >> 3274 ret = apply_workqueue_attrs(wq, attrs); >> 3275 else >> 3276 ret = -EINVAL; >> 3277 >> 3278 free_workqueue_attrs(attrs); >> 3279 return ret ?: count; >> 3280 } >> 3281 >> 3282 static ssize_t wq_cpumask_show(struct device *dev, >> 3283 struct device_attribute *attr, char *buf) >> 3284 { >> 3285 struct workqueue_struct *wq = dev_to_wq(dev); >> 3286 int written; >> 3287 >> 3288 mutex_lock(&wq->mutex); >> 3289 written = cpumask_scnprintf(buf, PAGE_SIZE, wq->unbound_attrs->cpumask); >> 3290 mutex_unlock(&wq->mutex); >> 3291 >> 3292 written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); >> 3293 return written; >> 3294 } >> 3295 >> 3296 static ssize_t wq_cpumask_store(struct device *dev, >> 3297 struct device_attribute *attr, >> 3298 const char *buf, size_t count) >> 3299 { >> 3300 struct workqueue_struct *wq = dev_to_wq(dev); >> 3301 struct workqueue_attrs *attrs; >> 3302 int ret; >> 3303 >> 3304 attrs = wq_sysfs_prep_attrs(wq); >> 3305 if (!attrs) >> 3306 return -ENOMEM; >> 3307 >> 3308 ret = cpumask_parse(buf, attrs->cpumask); >> 3309 if (!ret) >> 3310 ret = apply_workqueue_attrs(wq, attrs); >> 3311 >> 3312 free_workqueue_attrs(attrs); >> 3313 return ret ?: count; >> 3314 } >> 3315 >> 3316 static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr, >> 3317 char *buf) >> 3318 { >> 3319 struct workqueue_struct *wq = dev_to_wq(dev); >> 3320 int written; >> 3321 >> 3322 mutex_lock(&wq->mutex); >> 3323 written = scnprintf(buf, PAGE_SIZE, "%d\n", >> 3324 !wq->unbound_attrs->no_numa); >> 3325 mutex_unlock(&wq->mutex); >> 3326 >> 3327 return written; >> 3328 } >> 3329 >> 3330 static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr, >> 3331 const char *buf, size_t count) >> 3332 { >> 3333 struct workqueue_struct *wq = dev_to_wq(dev); >> 3334 struct workqueue_attrs *attrs; >> 3335 int v, ret; >> 3336 >> 3337 attrs = wq_sysfs_prep_attrs(wq); >> 3338 if (!attrs) >> 3339 return -ENOMEM; >> 3340 >> 3341 ret = -EINVAL; >> 3342 if (sscanf(buf, "%d", &v) == 1) { >> 3343 attrs->no_numa = !v; >> 3344 ret = apply_workqueue_attrs(wq, attrs); >> 3345 } >> 3346 >> 3347 free_workqueue_attrs(attrs); >> 3348 return ret ?: count; >> 3349 } >> 3350 >> 3351 static struct device_attribute wq_sysfs_unbound_attrs[] = { >> 3352 __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL), >> 3353 __ATTR(nice, 0644, wq_nice_show, wq_nice_store), >> 3354 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store), >> 3355 __ATTR(numa, 0644, wq_numa_show, wq_numa_store), >> 3356 __ATTR_NULL, >> 3357 }; >> 3358 >> 3359 static struct bus_type wq_subsys = { >> 3360 .name = "workqueue", >> 3361 .dev_attrs = wq_sysfs_attrs, >> 3362 }; >> 3363 >> 3364 static int __init wq_sysfs_init(void) >> 3365 { >> 3366 return subsys_virtual_register(&wq_subsys, NULL); >> 3367 } >> 3368 core_initcall(wq_sysfs_init); >> 3369 >> 3370 static void wq_device_release(struct device *dev) >> 3371 { >> 3372 struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); >> 3373 >> 3374 kfree(wq_dev); >> 3375 } >> 3376 >> 3377 /** >> 3378 * workqueue_sysfs_register - make a workqueue visible in sysfs >> 3379 * @wq: the workqueue to register >> 3380 * >> 3381 * Expose @wq in sysfs under /sys/bus/workqueue/devices. >> 3382 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set >> 3383 * which is the preferred method. >> 3384 * >> 3385 * Workqueue user should use this function directly iff it wants to apply >> 3386 * workqueue_attrs before making the workqueue visible in sysfs; otherwise, >> 3387 * apply_workqueue_attrs() may race against userland updating the >> 3388 * attributes. >> 3389 * >> 3390 * Returns 0 on success, -errno on failure. >> 3391 */ >> 3392 int workqueue_sysfs_register(struct workqueue_struct *wq) >> 3393 { >> 3394 struct wq_device *wq_dev; >> 3395 int ret; >> 3396 >> 3397 /* >> 3398 * Adjusting max_active or creating new pwqs by applyting >> 3399 * attributes breaks ordering guarantee. Disallow exposing ordered >> 3400 * workqueues. >> 3401 */ >> 3402 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) >> 3403 return -EINVAL; >> 3404 >> 3405 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); >> 3406 if (!wq_dev) >> 3407 return -ENOMEM; >> 3408 >> 3409 wq_dev->wq = wq; >> 3410 wq_dev->dev.bus = &wq_subsys; >> 3411 wq_dev->dev.init_name = wq->name; >> 3412 wq_dev->dev.release = wq_device_release; >> 3413 >> 3414 /* >> 3415 * unbound_attrs are created separately. Suppress uevent until >> 3416 * everything is ready. >> 3417 */ >> 3418 dev_set_uevent_suppress(&wq_dev->dev, true); >> 3419 >> 3420 ret = device_register(&wq_dev->dev); >> 3421 if (ret) { >> 3422 kfree(wq_dev); >> 3423 wq->wq_dev = NULL; >> 3424 return ret; >> 3425 } >> 3426 >> 3427 if (wq->flags & WQ_UNBOUND) { >> 3428 struct device_attribute *attr; >> 3429 >> 3430 for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { >> 3431 ret = device_create_file(&wq_dev->dev, attr); >> 3432 if (ret) { >> 3433 device_unregister(&wq_dev->dev); >> 3434 wq->wq_dev = NULL; >> 3435 return ret; >> 3436 } >> 3437 } >> 3438 } >> 3439 >> 3440 dev_set_uevent_suppress(&wq_dev->dev, false); >> 3441 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); >> 3442 return 0; >> 3443 } >> 3444 >> 3445 /** >> 3446 * workqueue_sysfs_unregister - undo workqueue_sysfs_register() >> 3447 * @wq: the workqueue to unregister >> 3448 * >> 3449 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. >> 3450 */ >> 3451 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) >> 3452 { >> 3453 struct wq_device *wq_dev = wq->wq_dev; >> 3454 >> 3455 if (!wq->wq_dev) >> 3456 return; >> 3457 >> 3458 wq->wq_dev = NULL; >> 3459 device_unregister(&wq_dev->dev); >> 3460 } >> 3461 #else /* CONFIG_SYSFS */ >> 3462 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } >> 3463 #endif /* CONFIG_SYSFS */ >> 3464 4579 /** 3465 /** 4580 * free_workqueue_attrs - free a workqueue_at 3466 * free_workqueue_attrs - free a workqueue_attrs 4581 * @attrs: workqueue_attrs to free 3467 * @attrs: workqueue_attrs to free 4582 * 3468 * 4583 * Undo alloc_workqueue_attrs(). 3469 * Undo alloc_workqueue_attrs(). 4584 */ 3470 */ 4585 void free_workqueue_attrs(struct workqueue_at 3471 void free_workqueue_attrs(struct workqueue_attrs *attrs) 4586 { 3472 { 4587 if (attrs) { 3473 if (attrs) { 4588 free_cpumask_var(attrs->cpuma 3474 free_cpumask_var(attrs->cpumask); 4589 free_cpumask_var(attrs->__pod << 4590 kfree(attrs); 3475 kfree(attrs); 4591 } 3476 } 4592 } 3477 } 4593 3478 4594 /** 3479 /** 4595 * alloc_workqueue_attrs - allocate a workque 3480 * alloc_workqueue_attrs - allocate a workqueue_attrs >> 3481 * @gfp_mask: allocation mask to use 4596 * 3482 * 4597 * Allocate a new workqueue_attrs, initialize 3483 * Allocate a new workqueue_attrs, initialize with default settings and 4598 * return it. !! 3484 * return it. Returns NULL on failure. 4599 * << 4600 * Return: The allocated new workqueue_attr o << 4601 */ 3485 */ 4602 struct workqueue_attrs *alloc_workqueue_attrs !! 3486 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) 4603 { 3487 { 4604 struct workqueue_attrs *attrs; 3488 struct workqueue_attrs *attrs; 4605 3489 4606 attrs = kzalloc(sizeof(*attrs), GFP_K !! 3490 attrs = kzalloc(sizeof(*attrs), gfp_mask); 4607 if (!attrs) 3491 if (!attrs) 4608 goto fail; 3492 goto fail; 4609 if (!alloc_cpumask_var(&attrs->cpumas !! 3493 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) 4610 goto fail; << 4611 if (!alloc_cpumask_var(&attrs->__pod_ << 4612 goto fail; 3494 goto fail; 4613 3495 4614 cpumask_copy(attrs->cpumask, cpu_poss 3496 cpumask_copy(attrs->cpumask, cpu_possible_mask); 4615 attrs->affn_scope = WQ_AFFN_DFL; << 4616 return attrs; 3497 return attrs; 4617 fail: 3498 fail: 4618 free_workqueue_attrs(attrs); 3499 free_workqueue_attrs(attrs); 4619 return NULL; 3500 return NULL; 4620 } 3501 } 4621 3502 4622 static void copy_workqueue_attrs(struct workq 3503 static void copy_workqueue_attrs(struct workqueue_attrs *to, 4623 const struct 3504 const struct workqueue_attrs *from) 4624 { 3505 { 4625 to->nice = from->nice; 3506 to->nice = from->nice; 4626 cpumask_copy(to->cpumask, from->cpuma 3507 cpumask_copy(to->cpumask, from->cpumask); 4627 cpumask_copy(to->__pod_cpumask, from- << 4628 to->affn_strict = from->affn_strict; << 4629 << 4630 /* 3508 /* 4631 * Unlike hash and equality test, cop !! 3509 * Unlike hash and equality test, this function doesn't ignore 4632 * fields as copying is used for both !! 3510 * ->no_numa as it is used for both pool and wq attrs. Instead, 4633 * get_unbound_pool() explicitly clea !! 3511 * get_unbound_pool() explicitly clears ->no_numa after copying. 4634 */ 3512 */ 4635 to->affn_scope = from->affn_scope; !! 3513 to->no_numa = from->no_numa; 4636 to->ordered = from->ordered; << 4637 } << 4638 << 4639 /* << 4640 * Some attrs fields are workqueue-only. Clea << 4641 * comments in 'struct workqueue_attrs' defin << 4642 */ << 4643 static void wqattrs_clear_for_pool(struct wor << 4644 { << 4645 attrs->affn_scope = WQ_AFFN_NR_TYPES; << 4646 attrs->ordered = false; << 4647 if (attrs->affn_strict) << 4648 cpumask_copy(attrs->cpumask, << 4649 } 3514 } 4650 3515 4651 /* hash value of the content of @attr */ 3516 /* hash value of the content of @attr */ 4652 static u32 wqattrs_hash(const struct workqueu 3517 static u32 wqattrs_hash(const struct workqueue_attrs *attrs) 4653 { 3518 { 4654 u32 hash = 0; 3519 u32 hash = 0; 4655 3520 4656 hash = jhash_1word(attrs->nice, hash) 3521 hash = jhash_1word(attrs->nice, hash); 4657 hash = jhash_1word(attrs->affn_strict !! 3522 hash = jhash(cpumask_bits(attrs->cpumask), 4658 hash = jhash(cpumask_bits(attrs->__po << 4659 BITS_TO_LONGS(nr_cpumask 3523 BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); 4660 if (!attrs->affn_strict) << 4661 hash = jhash(cpumask_bits(att << 4662 BITS_TO_LONGS(nr << 4663 return hash; 3524 return hash; 4664 } 3525 } 4665 3526 4666 /* content equality test */ 3527 /* content equality test */ 4667 static bool wqattrs_equal(const struct workqu 3528 static bool wqattrs_equal(const struct workqueue_attrs *a, 4668 const struct workqu 3529 const struct workqueue_attrs *b) 4669 { 3530 { 4670 if (a->nice != b->nice) 3531 if (a->nice != b->nice) 4671 return false; 3532 return false; 4672 if (a->affn_strict != b->affn_strict) !! 3533 if (!cpumask_equal(a->cpumask, b->cpumask)) 4673 return false; << 4674 if (!cpumask_equal(a->__pod_cpumask, << 4675 return false; << 4676 if (!a->affn_strict && !cpumask_equal << 4677 return false; 3534 return false; 4678 return true; 3535 return true; 4679 } 3536 } 4680 3537 4681 /* Update @attrs with actually available CPUs << 4682 static void wqattrs_actualize_cpumask(struct << 4683 const c << 4684 { << 4685 /* << 4686 * Calculate the effective CPU mask o << 4687 * @attrs->cpumask doesn't overlap wi << 4688 * @unbound_cpumask. << 4689 */ << 4690 cpumask_and(attrs->cpumask, attrs->cp << 4691 if (unlikely(cpumask_empty(attrs->cpu << 4692 cpumask_copy(attrs->cpumask, << 4693 } << 4694 << 4695 /* find wq_pod_type to use for @attrs */ << 4696 static const struct wq_pod_type * << 4697 wqattrs_pod_type(const struct workqueue_attrs << 4698 { << 4699 enum wq_affn_scope scope; << 4700 struct wq_pod_type *pt; << 4701 << 4702 /* to synchronize access to wq_affn_d << 4703 lockdep_assert_held(&wq_pool_mutex); << 4704 << 4705 if (attrs->affn_scope == WQ_AFFN_DFL) << 4706 scope = wq_affn_dfl; << 4707 else << 4708 scope = attrs->affn_scope; << 4709 << 4710 pt = &wq_pod_types[scope]; << 4711 << 4712 if (!WARN_ON_ONCE(attrs->affn_scope = << 4713 likely(pt->nr_pods)) << 4714 return pt; << 4715 << 4716 /* << 4717 * Before workqueue_init_topology(), << 4718 * initialized in workqueue_init_earl << 4719 */ << 4720 pt = &wq_pod_types[WQ_AFFN_SYSTEM]; << 4721 BUG_ON(!pt->nr_pods); << 4722 return pt; << 4723 } << 4724 << 4725 /** 3538 /** 4726 * init_worker_pool - initialize a newly zall 3539 * init_worker_pool - initialize a newly zalloc'd worker_pool 4727 * @pool: worker_pool to initialize 3540 * @pool: worker_pool to initialize 4728 * 3541 * 4729 * Initialize a newly zalloc'd @pool. It als !! 3542 * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. 4730 * !! 3543 * Returns 0 on success, -errno on failure. Even on failure, all fields 4731 * Return: 0 on success, -errno on failure. << 4732 * inside @pool proper are initialized and pu 3544 * inside @pool proper are initialized and put_unbound_pool() can be called 4733 * on @pool safely to release it. 3545 * on @pool safely to release it. 4734 */ 3546 */ 4735 static int init_worker_pool(struct worker_poo 3547 static int init_worker_pool(struct worker_pool *pool) 4736 { 3548 { 4737 raw_spin_lock_init(&pool->lock); !! 3549 spin_lock_init(&pool->lock); 4738 pool->id = -1; 3550 pool->id = -1; 4739 pool->cpu = -1; 3551 pool->cpu = -1; 4740 pool->node = NUMA_NO_NODE; 3552 pool->node = NUMA_NO_NODE; 4741 pool->flags |= POOL_DISASSOCIATED; 3553 pool->flags |= POOL_DISASSOCIATED; 4742 pool->watchdog_ts = jiffies; << 4743 INIT_LIST_HEAD(&pool->worklist); 3554 INIT_LIST_HEAD(&pool->worklist); 4744 INIT_LIST_HEAD(&pool->idle_list); 3555 INIT_LIST_HEAD(&pool->idle_list); 4745 hash_init(pool->busy_hash); 3556 hash_init(pool->busy_hash); 4746 3557 4747 timer_setup(&pool->idle_timer, idle_w !! 3558 init_timer_deferrable(&pool->idle_timer); 4748 INIT_WORK(&pool->idle_cull_work, idle !! 3559 pool->idle_timer.function = idle_worker_timeout; 4749 !! 3560 pool->idle_timer.data = (unsigned long)pool; 4750 timer_setup(&pool->mayday_timer, pool !! 3561 4751 !! 3562 setup_timer(&pool->mayday_timer, pool_mayday_timeout, 4752 INIT_LIST_HEAD(&pool->workers); !! 3563 (unsigned long)pool); >> 3564 >> 3565 mutex_init(&pool->manager_arb); >> 3566 mutex_init(&pool->manager_mutex); >> 3567 idr_init(&pool->worker_idr); 4753 3568 4754 ida_init(&pool->worker_ida); << 4755 INIT_HLIST_NODE(&pool->hash_node); 3569 INIT_HLIST_NODE(&pool->hash_node); 4756 pool->refcnt = 1; 3570 pool->refcnt = 1; 4757 3571 4758 /* shouldn't fail above this point */ 3572 /* shouldn't fail above this point */ 4759 pool->attrs = alloc_workqueue_attrs() !! 3573 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); 4760 if (!pool->attrs) 3574 if (!pool->attrs) 4761 return -ENOMEM; 3575 return -ENOMEM; 4762 << 4763 wqattrs_clear_for_pool(pool->attrs); << 4764 << 4765 return 0; << 4766 } << 4767 << 4768 #ifdef CONFIG_LOCKDEP << 4769 static void wq_init_lockdep(struct workqueue_ << 4770 { << 4771 char *lock_name; << 4772 << 4773 lockdep_register_key(&wq->key); << 4774 lock_name = kasprintf(GFP_KERNEL, "%s << 4775 if (!lock_name) << 4776 lock_name = wq->name; << 4777 << 4778 wq->lock_name = lock_name; << 4779 lockdep_init_map(&wq->lockdep_map, lo << 4780 } << 4781 << 4782 static void wq_unregister_lockdep(struct work << 4783 { << 4784 lockdep_unregister_key(&wq->key); << 4785 } << 4786 << 4787 static void wq_free_lockdep(struct workqueue_ << 4788 { << 4789 if (wq->lock_name != wq->name) << 4790 kfree(wq->lock_name); << 4791 } << 4792 #else << 4793 static void wq_init_lockdep(struct workqueue_ << 4794 { << 4795 } << 4796 << 4797 static void wq_unregister_lockdep(struct work << 4798 { << 4799 } << 4800 << 4801 static void wq_free_lockdep(struct workqueue_ << 4802 { << 4803 } << 4804 #endif << 4805 << 4806 static void free_node_nr_active(struct wq_nod << 4807 { << 4808 int node; << 4809 << 4810 for_each_node(node) { << 4811 kfree(nna_ar[node]); << 4812 nna_ar[node] = NULL; << 4813 } << 4814 << 4815 kfree(nna_ar[nr_node_ids]); << 4816 nna_ar[nr_node_ids] = NULL; << 4817 } << 4818 << 4819 static void init_node_nr_active(struct wq_nod << 4820 { << 4821 nna->max = WQ_DFL_MIN_ACTIVE; << 4822 atomic_set(&nna->nr, 0); << 4823 raw_spin_lock_init(&nna->lock); << 4824 INIT_LIST_HEAD(&nna->pending_pwqs); << 4825 } << 4826 << 4827 /* << 4828 * Each node's nr_active counter will be acce << 4829 * should be allocated in the node. << 4830 */ << 4831 static int alloc_node_nr_active(struct wq_nod << 4832 { << 4833 struct wq_node_nr_active *nna; << 4834 int node; << 4835 << 4836 for_each_node(node) { << 4837 nna = kzalloc_node(sizeof(*nn << 4838 if (!nna) << 4839 goto err_free; << 4840 init_node_nr_active(nna); << 4841 nna_ar[node] = nna; << 4842 } << 4843 << 4844 /* [nr_node_ids] is used as the fallb << 4845 nna = kzalloc_node(sizeof(*nna), GFP_ << 4846 if (!nna) << 4847 goto err_free; << 4848 init_node_nr_active(nna); << 4849 nna_ar[nr_node_ids] = nna; << 4850 << 4851 return 0; 3576 return 0; 4852 << 4853 err_free: << 4854 free_node_nr_active(nna_ar); << 4855 return -ENOMEM; << 4856 } << 4857 << 4858 static void rcu_free_wq(struct rcu_head *rcu) << 4859 { << 4860 struct workqueue_struct *wq = << 4861 container_of(rcu, struct work << 4862 << 4863 if (wq->flags & WQ_UNBOUND) << 4864 free_node_nr_active(wq->node_ << 4865 << 4866 wq_free_lockdep(wq); << 4867 free_percpu(wq->cpu_pwq); << 4868 free_workqueue_attrs(wq->unbound_attr << 4869 kfree(wq); << 4870 } 3577 } 4871 3578 4872 static void rcu_free_pool(struct rcu_head *rc 3579 static void rcu_free_pool(struct rcu_head *rcu) 4873 { 3580 { 4874 struct worker_pool *pool = container_ 3581 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); 4875 3582 4876 ida_destroy(&pool->worker_ida); !! 3583 idr_destroy(&pool->worker_idr); 4877 free_workqueue_attrs(pool->attrs); 3584 free_workqueue_attrs(pool->attrs); 4878 kfree(pool); 3585 kfree(pool); 4879 } 3586 } 4880 3587 4881 /** 3588 /** 4882 * put_unbound_pool - put a worker_pool 3589 * put_unbound_pool - put a worker_pool 4883 * @pool: worker_pool to put 3590 * @pool: worker_pool to put 4884 * 3591 * 4885 * Put @pool. If its refcnt reaches zero, it !! 3592 * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU 4886 * safe manner. get_unbound_pool() calls thi 3593 * safe manner. get_unbound_pool() calls this function on its failure path 4887 * and this function should be able to releas 3594 * and this function should be able to release pools which went through, 4888 * successfully or not, init_worker_pool(). 3595 * successfully or not, init_worker_pool(). 4889 * 3596 * 4890 * Should be called with wq_pool_mutex held. 3597 * Should be called with wq_pool_mutex held. 4891 */ 3598 */ 4892 static void put_unbound_pool(struct worker_po 3599 static void put_unbound_pool(struct worker_pool *pool) 4893 { 3600 { 4894 struct worker *worker; 3601 struct worker *worker; 4895 LIST_HEAD(cull_list); << 4896 3602 4897 lockdep_assert_held(&wq_pool_mutex); 3603 lockdep_assert_held(&wq_pool_mutex); 4898 3604 4899 if (--pool->refcnt) 3605 if (--pool->refcnt) 4900 return; 3606 return; 4901 3607 4902 /* sanity checks */ 3608 /* sanity checks */ 4903 if (WARN_ON(!(pool->cpu < 0)) || !! 3609 if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) || 4904 WARN_ON(!list_empty(&pool->workli 3610 WARN_ON(!list_empty(&pool->worklist))) 4905 return; 3611 return; 4906 3612 4907 /* release id and unhash */ 3613 /* release id and unhash */ 4908 if (pool->id >= 0) 3614 if (pool->id >= 0) 4909 idr_remove(&worker_pool_idr, 3615 idr_remove(&worker_pool_idr, pool->id); 4910 hash_del(&pool->hash_node); 3616 hash_del(&pool->hash_node); 4911 3617 4912 /* 3618 /* 4913 * Become the manager and destroy all !! 3619 * Become the manager and destroy all workers. Grabbing 4914 * @pool's workers from blocking on a !! 3620 * manager_arb prevents @pool's workers from blocking on 4915 * manager and @pool gets freed with !! 3621 * manager_mutex. 4916 * !! 3622 */ 4917 * Having a concurrent manager is qui !! 3623 mutex_lock(&pool->manager_arb); 4918 * only get here with !! 3624 mutex_lock(&pool->manager_mutex); 4919 * pwq->refcnt == pool->refcnt == 0 !! 3625 spin_lock_irq(&pool->lock); 4920 * which implies no work queued to th << 4921 * become the manager. However a work << 4922 * manager before the refcnts dropped << 4923 * drops pool->lock << 4924 */ << 4925 while (true) { << 4926 rcuwait_wait_event(&manager_w << 4927 !(pool->fl << 4928 TASK_UNINT << 4929 << 4930 mutex_lock(&wq_pool_attach_mu << 4931 raw_spin_lock_irq(&pool->lock << 4932 if (!(pool->flags & POOL_MANA << 4933 pool->flags |= POOL_M << 4934 break; << 4935 } << 4936 raw_spin_unlock_irq(&pool->lo << 4937 mutex_unlock(&wq_pool_attach_ << 4938 } << 4939 3626 4940 while ((worker = first_idle_worker(po !! 3627 while ((worker = first_worker(pool))) 4941 set_worker_dying(worker, &cul !! 3628 destroy_worker(worker); 4942 WARN_ON(pool->nr_workers || pool->nr_ 3629 WARN_ON(pool->nr_workers || pool->nr_idle); 4943 raw_spin_unlock_irq(&pool->lock); << 4944 << 4945 detach_dying_workers(&cull_list); << 4946 3630 4947 mutex_unlock(&wq_pool_attach_mutex); !! 3631 spin_unlock_irq(&pool->lock); 4948 !! 3632 mutex_unlock(&pool->manager_mutex); 4949 reap_dying_workers(&cull_list); !! 3633 mutex_unlock(&pool->manager_arb); 4950 3634 4951 /* shut down the timers */ 3635 /* shut down the timers */ 4952 del_timer_sync(&pool->idle_timer); 3636 del_timer_sync(&pool->idle_timer); 4953 cancel_work_sync(&pool->idle_cull_wor << 4954 del_timer_sync(&pool->mayday_timer); 3637 del_timer_sync(&pool->mayday_timer); 4955 3638 4956 /* RCU protected to allow dereference !! 3639 /* sched-RCU protected to allow dereferences from get_work_pool() */ 4957 call_rcu(&pool->rcu, rcu_free_pool); !! 3640 call_rcu_sched(&pool->rcu, rcu_free_pool); 4958 } 3641 } 4959 3642 4960 /** 3643 /** 4961 * get_unbound_pool - get a worker_pool with 3644 * get_unbound_pool - get a worker_pool with the specified attributes 4962 * @attrs: the attributes of the worker_pool 3645 * @attrs: the attributes of the worker_pool to get 4963 * 3646 * 4964 * Obtain a worker_pool which has the same at 3647 * Obtain a worker_pool which has the same attributes as @attrs, bump the 4965 * reference count and return it. If there a 3648 * reference count and return it. If there already is a matching 4966 * worker_pool, it will be used; otherwise, t 3649 * worker_pool, it will be used; otherwise, this function attempts to 4967 * create a new one. !! 3650 * create a new one. On failure, returns NULL. 4968 * 3651 * 4969 * Should be called with wq_pool_mutex held. 3652 * Should be called with wq_pool_mutex held. 4970 * << 4971 * Return: On success, a worker_pool with the << 4972 * On failure, %NULL. << 4973 */ 3653 */ 4974 static struct worker_pool *get_unbound_pool(c 3654 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) 4975 { 3655 { 4976 struct wq_pod_type *pt = &wq_pod_type << 4977 u32 hash = wqattrs_hash(attrs); 3656 u32 hash = wqattrs_hash(attrs); 4978 struct worker_pool *pool; 3657 struct worker_pool *pool; 4979 int pod, node = NUMA_NO_NODE; !! 3658 int node; 4980 3659 4981 lockdep_assert_held(&wq_pool_mutex); 3660 lockdep_assert_held(&wq_pool_mutex); 4982 3661 4983 /* do we already have a matching pool 3662 /* do we already have a matching pool? */ 4984 hash_for_each_possible(unbound_pool_h 3663 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 4985 if (wqattrs_equal(pool->attrs 3664 if (wqattrs_equal(pool->attrs, attrs)) { 4986 pool->refcnt++; 3665 pool->refcnt++; 4987 return pool; !! 3666 goto out_unlock; 4988 } << 4989 } << 4990 << 4991 /* If __pod_cpumask is contained insi << 4992 for (pod = 0; pod < pt->nr_pods; pod+ << 4993 if (cpumask_subset(attrs->__p << 4994 node = pt->pod_node[p << 4995 break; << 4996 } 3667 } 4997 } 3668 } 4998 3669 4999 /* nope, create a new one */ 3670 /* nope, create a new one */ 5000 pool = kzalloc_node(sizeof(*pool), GF !! 3671 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 5001 if (!pool || init_worker_pool(pool) < 3672 if (!pool || init_worker_pool(pool) < 0) 5002 goto fail; 3673 goto fail; 5003 3674 5004 pool->node = node; !! 3675 if (workqueue_freezing) >> 3676 pool->flags |= POOL_FREEZING; >> 3677 >> 3678 lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ 5005 copy_workqueue_attrs(pool->attrs, att 3679 copy_workqueue_attrs(pool->attrs, attrs); 5006 wqattrs_clear_for_pool(pool->attrs); !! 3680 >> 3681 /* >> 3682 * no_numa isn't a worker_pool attribute, always clear it. See >> 3683 * 'struct workqueue_attrs' comments for detail. >> 3684 */ >> 3685 pool->attrs->no_numa = false; >> 3686 >> 3687 /* if cpumask is contained inside a NUMA node, we belong to that node */ >> 3688 if (wq_numa_enabled) { >> 3689 for_each_node(node) { >> 3690 if (cpumask_subset(pool->attrs->cpumask, >> 3691 wq_numa_possible_cpumask[node])) { >> 3692 pool->node = node; >> 3693 break; >> 3694 } >> 3695 } >> 3696 } 5007 3697 5008 if (worker_pool_assign_id(pool) < 0) 3698 if (worker_pool_assign_id(pool) < 0) 5009 goto fail; 3699 goto fail; 5010 3700 5011 /* create and start the initial worke 3701 /* create and start the initial worker */ 5012 if (wq_online && !create_worker(pool) !! 3702 if (create_and_start_worker(pool) < 0) 5013 goto fail; 3703 goto fail; 5014 3704 5015 /* install */ 3705 /* install */ 5016 hash_add(unbound_pool_hash, &pool->ha 3706 hash_add(unbound_pool_hash, &pool->hash_node, hash); 5017 !! 3707 out_unlock: 5018 return pool; 3708 return pool; 5019 fail: 3709 fail: 5020 if (pool) 3710 if (pool) 5021 put_unbound_pool(pool); 3711 put_unbound_pool(pool); 5022 return NULL; 3712 return NULL; 5023 } 3713 } 5024 3714 >> 3715 static void rcu_free_pwq(struct rcu_head *rcu) >> 3716 { >> 3717 kmem_cache_free(pwq_cache, >> 3718 container_of(rcu, struct pool_workqueue, rcu)); >> 3719 } >> 3720 5025 /* 3721 /* 5026 * Scheduled on pwq_release_worker by put_pwq !! 3722 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt 5027 * refcnt and needs to be destroyed. !! 3723 * and needs to be destroyed. 5028 */ 3724 */ 5029 static void pwq_release_workfn(struct kthread !! 3725 static void pwq_unbound_release_workfn(struct work_struct *work) 5030 { 3726 { 5031 struct pool_workqueue *pwq = containe 3727 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, 5032 !! 3728 unbound_release_work); 5033 struct workqueue_struct *wq = pwq->wq 3729 struct workqueue_struct *wq = pwq->wq; 5034 struct worker_pool *pool = pwq->pool; 3730 struct worker_pool *pool = pwq->pool; 5035 bool is_last = false; !! 3731 bool is_last; >> 3732 >> 3733 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) >> 3734 return; 5036 3735 5037 /* 3736 /* 5038 * When @pwq is not linked, it doesn' !! 3737 * Unlink @pwq. Synchronization against wq->mutex isn't strictly 5039 * @wq, and @wq is invalid to access. !! 3738 * necessary on release but do it anyway. It's easier to verify >> 3739 * and consistent with the linking path. 5040 */ 3740 */ 5041 if (!list_empty(&pwq->pwqs_node)) { !! 3741 mutex_lock(&wq->mutex); 5042 mutex_lock(&wq->mutex); !! 3742 list_del_rcu(&pwq->pwqs_node); 5043 list_del_rcu(&pwq->pwqs_node) !! 3743 is_last = list_empty(&wq->pwqs); 5044 is_last = list_empty(&wq->pwq !! 3744 mutex_unlock(&wq->mutex); 5045 3745 5046 /* !! 3746 mutex_lock(&wq_pool_mutex); 5047 * For ordered workqueue with !! 3747 put_unbound_pool(pool); 5048 */ !! 3748 mutex_unlock(&wq_pool_mutex); 5049 if (!is_last && (wq->flags & << 5050 unplug_oldest_pwq(wq) << 5051 3749 5052 mutex_unlock(&wq->mutex); !! 3750 call_rcu_sched(&pwq->rcu, rcu_free_pwq); 5053 } << 5054 3751 5055 if (wq->flags & WQ_UNBOUND) { !! 3752 /* 5056 mutex_lock(&wq_pool_mutex); !! 3753 * If we're the last pwq going away, @wq is already dead and no one 5057 put_unbound_pool(pool); !! 3754 * is gonna access it anymore. Free it. 5058 mutex_unlock(&wq_pool_mutex); !! 3755 */ >> 3756 if (is_last) { >> 3757 free_workqueue_attrs(wq->unbound_attrs); >> 3758 kfree(wq); 5059 } 3759 } >> 3760 } 5060 3761 5061 if (!list_empty(&pwq->pending_node)) !! 3762 /** 5062 struct wq_node_nr_active *nna !! 3763 * pwq_adjust_max_active - update a pwq's max_active to the current setting 5063 wq_node_nr_active(pwq !! 3764 * @pwq: target pool_workqueue >> 3765 * >> 3766 * If @pwq isn't freezing, set @pwq->max_active to the associated >> 3767 * workqueue's saved_max_active and activate delayed work items >> 3768 * accordingly. If @pwq is freezing, clear @pwq->max_active to zero. >> 3769 */ >> 3770 static void pwq_adjust_max_active(struct pool_workqueue *pwq) >> 3771 { >> 3772 struct workqueue_struct *wq = pwq->wq; >> 3773 bool freezable = wq->flags & WQ_FREEZABLE; 5064 3774 5065 raw_spin_lock_irq(&nna->lock) !! 3775 /* for @wq->saved_max_active */ 5066 list_del_init(&pwq->pending_n !! 3776 lockdep_assert_held(&wq->mutex); 5067 raw_spin_unlock_irq(&nna->loc << 5068 } << 5069 3777 5070 kfree_rcu(pwq, rcu); !! 3778 /* fast exit for non-freezable wqs */ >> 3779 if (!freezable && pwq->max_active == wq->saved_max_active) >> 3780 return; 5071 3781 5072 /* !! 3782 spin_lock_irq(&pwq->pool->lock); 5073 * If we're the last pwq going away, !! 3783 5074 * is gonna access it anymore. Sched !! 3784 if (!freezable || !(pwq->pool->flags & POOL_FREEZING)) { 5075 */ !! 3785 pwq->max_active = wq->saved_max_active; 5076 if (is_last) { !! 3786 5077 wq_unregister_lockdep(wq); !! 3787 while (!list_empty(&pwq->delayed_works) && 5078 call_rcu(&wq->rcu, rcu_free_w !! 3788 pwq->nr_active < pwq->max_active) >> 3789 pwq_activate_first_delayed(pwq); >> 3790 >> 3791 /* >> 3792 * Need to kick a worker after thawed or an unbound wq's >> 3793 * max_active is bumped. It's a slow path. Do it always. >> 3794 */ >> 3795 wake_up_worker(pwq->pool); >> 3796 } else { >> 3797 pwq->max_active = 0; 5079 } 3798 } >> 3799 >> 3800 spin_unlock_irq(&pwq->pool->lock); 5080 } 3801 } 5081 3802 5082 /* initialize newly allocated @pwq which is a !! 3803 /* initialize newly alloced @pwq which is associated with @wq and @pool */ 5083 static void init_pwq(struct pool_workqueue *p 3804 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, 5084 struct worker_pool *pool 3805 struct worker_pool *pool) 5085 { 3806 { 5086 BUG_ON((unsigned long)pwq & ~WORK_STR !! 3807 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 5087 3808 5088 memset(pwq, 0, sizeof(*pwq)); 3809 memset(pwq, 0, sizeof(*pwq)); 5089 3810 5090 pwq->pool = pool; 3811 pwq->pool = pool; 5091 pwq->wq = wq; 3812 pwq->wq = wq; 5092 pwq->flush_color = -1; 3813 pwq->flush_color = -1; 5093 pwq->refcnt = 1; 3814 pwq->refcnt = 1; 5094 INIT_LIST_HEAD(&pwq->inactive_works); !! 3815 INIT_LIST_HEAD(&pwq->delayed_works); 5095 INIT_LIST_HEAD(&pwq->pending_node); << 5096 INIT_LIST_HEAD(&pwq->pwqs_node); 3816 INIT_LIST_HEAD(&pwq->pwqs_node); 5097 INIT_LIST_HEAD(&pwq->mayday_node); 3817 INIT_LIST_HEAD(&pwq->mayday_node); 5098 kthread_init_work(&pwq->release_work, !! 3818 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); 5099 } 3819 } 5100 3820 5101 /* sync @pwq with the current state of its as 3821 /* sync @pwq with the current state of its associated wq and link it */ 5102 static void link_pwq(struct pool_workqueue *p 3822 static void link_pwq(struct pool_workqueue *pwq) 5103 { 3823 { 5104 struct workqueue_struct *wq = pwq->wq 3824 struct workqueue_struct *wq = pwq->wq; 5105 3825 5106 lockdep_assert_held(&wq->mutex); 3826 lockdep_assert_held(&wq->mutex); 5107 3827 5108 /* may be called multiple times, igno 3828 /* may be called multiple times, ignore if already linked */ 5109 if (!list_empty(&pwq->pwqs_node)) 3829 if (!list_empty(&pwq->pwqs_node)) 5110 return; 3830 return; 5111 3831 5112 /* set the matching work_color */ !! 3832 /* >> 3833 * Set the matching work_color. This is synchronized with >> 3834 * wq->mutex to avoid confusing flush_workqueue(). >> 3835 */ 5113 pwq->work_color = wq->work_color; 3836 pwq->work_color = wq->work_color; 5114 3837 >> 3838 /* sync max_active to the current setting */ >> 3839 pwq_adjust_max_active(pwq); >> 3840 5115 /* link in @pwq */ 3841 /* link in @pwq */ 5116 list_add_tail_rcu(&pwq->pwqs_node, &w !! 3842 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 5117 } 3843 } 5118 3844 5119 /* obtain a pool matching @attr and create a 3845 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ 5120 static struct pool_workqueue *alloc_unbound_p 3846 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, 5121 const 3847 const struct workqueue_attrs *attrs) 5122 { 3848 { 5123 struct worker_pool *pool; 3849 struct worker_pool *pool; 5124 struct pool_workqueue *pwq; 3850 struct pool_workqueue *pwq; 5125 3851 5126 lockdep_assert_held(&wq_pool_mutex); 3852 lockdep_assert_held(&wq_pool_mutex); 5127 3853 5128 pool = get_unbound_pool(attrs); 3854 pool = get_unbound_pool(attrs); 5129 if (!pool) 3855 if (!pool) 5130 return NULL; 3856 return NULL; 5131 3857 5132 pwq = kmem_cache_alloc_node(pwq_cache 3858 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); 5133 if (!pwq) { 3859 if (!pwq) { 5134 put_unbound_pool(pool); 3860 put_unbound_pool(pool); 5135 return NULL; 3861 return NULL; 5136 } 3862 } 5137 3863 5138 init_pwq(pwq, wq, pool); 3864 init_pwq(pwq, wq, pool); 5139 return pwq; 3865 return pwq; 5140 } 3866 } 5141 3867 5142 static void apply_wqattrs_lock(void) !! 3868 /* undo alloc_unbound_pwq(), used only in the error path */ >> 3869 static void free_unbound_pwq(struct pool_workqueue *pwq) 5143 { 3870 { 5144 mutex_lock(&wq_pool_mutex); !! 3871 lockdep_assert_held(&wq_pool_mutex); 5145 } << 5146 3872 5147 static void apply_wqattrs_unlock(void) !! 3873 if (pwq) { 5148 { !! 3874 put_unbound_pool(pwq->pool); 5149 mutex_unlock(&wq_pool_mutex); !! 3875 kmem_cache_free(pwq_cache, pwq); >> 3876 } 5150 } 3877 } 5151 3878 5152 /** 3879 /** 5153 * wq_calc_pod_cpumask - calculate a wq_attrs !! 3880 * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node 5154 * @attrs: the wq_attrs of the default pwq of !! 3881 * @attrs: the wq_attrs of interest 5155 * @cpu: the target CPU !! 3882 * @node: the target NUMA node >> 3883 * @cpu_going_down: if >= 0, the CPU to consider as offline >> 3884 * @cpumask: outarg, the resulting cpumask 5156 * 3885 * 5157 * Calculate the cpumask a workqueue with @at !! 3886 * Calculate the cpumask a workqueue with @attrs should use on @node. If 5158 * The result is stored in @attrs->__pod_cpum !! 3887 * @cpu_going_down is >= 0, that cpu is considered offline during >> 3888 * calculation. The result is stored in @cpumask. This function returns >> 3889 * %true if the resulting @cpumask is different from @attrs->cpumask, >> 3890 * %false if equal. 5159 * 3891 * 5160 * If pod affinity is not enabled, @attrs->cp !! 3892 * If NUMA affinity is not enabled, @attrs->cpumask is always used. If 5161 * and @pod has online CPUs requested by @att !! 3893 * enabled and @node has online CPUs requested by @attrs, the returned 5162 * intersection of the possible CPUs of @pod !! 3894 * cpumask is the intersection of the possible CPUs of @node and >> 3895 * @attrs->cpumask. 5163 * 3896 * 5164 * The caller is responsible for ensuring tha !! 3897 * The caller is responsible for ensuring that the cpumask of @node stays >> 3898 * stable. 5165 */ 3899 */ 5166 static void wq_calc_pod_cpumask(struct workqu !! 3900 static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, >> 3901 int cpu_going_down, cpumask_t *cpumask) 5167 { 3902 { 5168 const struct wq_pod_type *pt = wqattr !! 3903 if (!wq_numa_enabled || attrs->no_numa) 5169 int pod = pt->cpu_pod[cpu]; !! 3904 goto use_dfl; 5170 3905 5171 /* calculate possible CPUs in @pod th !! 3906 /* does @node have any online CPUs @attrs wants? */ 5172 cpumask_and(attrs->__pod_cpumask, pt- !! 3907 cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask); 5173 /* does @pod have any online CPUs @at !! 3908 if (cpu_going_down >= 0) 5174 if (!cpumask_intersects(attrs->__pod_ !! 3909 cpumask_clear_cpu(cpu_going_down, cpumask); 5175 cpumask_copy(attrs->__pod_cpu !! 3910 5176 return; !! 3911 if (cpumask_empty(cpumask)) 5177 } !! 3912 goto use_dfl; >> 3913 >> 3914 /* yeap, return possible CPUs in @node that @attrs wants */ >> 3915 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]); >> 3916 return !cpumask_equal(cpumask, attrs->cpumask); >> 3917 >> 3918 use_dfl: >> 3919 cpumask_copy(cpumask, attrs->cpumask); >> 3920 return false; 5178 } 3921 } 5179 3922 5180 /* install @pwq into @wq and return the old p !! 3923 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */ 5181 static struct pool_workqueue *install_unbound !! 3924 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, 5182 int c !! 3925 int node, >> 3926 struct pool_workqueue *pwq) 5183 { 3927 { 5184 struct pool_workqueue __rcu **slot = << 5185 struct pool_workqueue *old_pwq; 3928 struct pool_workqueue *old_pwq; 5186 3929 5187 lockdep_assert_held(&wq_pool_mutex); << 5188 lockdep_assert_held(&wq->mutex); 3930 lockdep_assert_held(&wq->mutex); 5189 3931 5190 /* link_pwq() can handle duplicate ca 3932 /* link_pwq() can handle duplicate calls */ 5191 link_pwq(pwq); 3933 link_pwq(pwq); 5192 3934 5193 old_pwq = rcu_access_pointer(*slot); !! 3935 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); 5194 rcu_assign_pointer(*slot, pwq); !! 3936 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); 5195 return old_pwq; 3937 return old_pwq; 5196 } 3938 } 5197 3939 5198 /* context to store the prepared attrs & pwqs !! 3940 /** 5199 struct apply_wqattrs_ctx { !! 3941 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 5200 struct workqueue_struct *wq; !! 3942 * @wq: the target workqueue 5201 struct workqueue_attrs *attrs; !! 3943 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() 5202 struct list_head list; !! 3944 * 5203 struct pool_workqueue *dfl_pwq; !! 3945 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA 5204 struct pool_workqueue *pwq_tbl[]; !! 3946 * machines, this function maps a separate pwq to each NUMA node with 5205 }; !! 3947 * possibles CPUs in @attrs->cpumask so that work items are affine to the 5206 !! 3948 * NUMA node it was issued on. Older pwqs are released as in-flight work 5207 /* free the resources after success or abort !! 3949 * items finish. Note that a work item which repeatedly requeues itself 5208 static void apply_wqattrs_cleanup(struct appl !! 3950 * back-to-back will stay on its current pwq. >> 3951 * >> 3952 * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on >> 3953 * failure. >> 3954 */ >> 3955 int apply_workqueue_attrs(struct workqueue_struct *wq, >> 3956 const struct workqueue_attrs *attrs) 5209 { 3957 { 5210 if (ctx) { !! 3958 struct workqueue_attrs *new_attrs, *tmp_attrs; 5211 int cpu; !! 3959 struct pool_workqueue **pwq_tbl, *dfl_pwq; >> 3960 int node, ret; 5212 3961 5213 for_each_possible_cpu(cpu) !! 3962 /* only unbound workqueues can change attributes */ 5214 put_pwq_unlocked(ctx- !! 3963 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 5215 put_pwq_unlocked(ctx->dfl_pwq !! 3964 return -EINVAL; 5216 << 5217 free_workqueue_attrs(ctx->att << 5218 3965 5219 kfree(ctx); !! 3966 /* creating multiple pwqs breaks ordering guarantee */ >> 3967 if (!list_empty(&wq->pwqs)) { >> 3968 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) >> 3969 return -EINVAL; >> 3970 >> 3971 wq->flags &= ~__WQ_ORDERED; 5220 } 3972 } 5221 } << 5222 3973 5223 /* allocate the attrs and pwqs for later inst !! 3974 pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL); 5224 static struct apply_wqattrs_ctx * !! 3975 new_attrs = alloc_workqueue_attrs(GFP_KERNEL); 5225 apply_wqattrs_prepare(struct workqueue_struct !! 3976 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); 5226 const struct workqueue_ !! 3977 if (!pwq_tbl || !new_attrs || !tmp_attrs) 5227 const cpumask_var_t unb !! 3978 goto enomem; 5228 { << 5229 struct apply_wqattrs_ctx *ctx; << 5230 struct workqueue_attrs *new_attrs; << 5231 int cpu; << 5232 3979 5233 lockdep_assert_held(&wq_pool_mutex); !! 3980 /* make a copy of @attrs and sanitize it */ >> 3981 copy_workqueue_attrs(new_attrs, attrs); >> 3982 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); >> 3983 >> 3984 /* >> 3985 * We may create multiple pwqs with differing cpumasks. Make a >> 3986 * copy of @new_attrs which will be modified and used to obtain >> 3987 * pools. >> 3988 */ >> 3989 copy_workqueue_attrs(tmp_attrs, new_attrs); >> 3990 >> 3991 /* >> 3992 * CPUs should stay stable across pwq creations and installations. >> 3993 * Pin CPUs, determine the target cpumask for each node and create >> 3994 * pwqs accordingly. >> 3995 */ >> 3996 get_online_cpus(); 5234 3997 5235 if (WARN_ON(attrs->affn_scope < 0 || !! 3998 mutex_lock(&wq_pool_mutex); 5236 attrs->affn_scope >= WQ_A << 5237 return ERR_PTR(-EINVAL); << 5238 << 5239 ctx = kzalloc(struct_size(ctx, pwq_tb << 5240 << 5241 new_attrs = alloc_workqueue_attrs(); << 5242 if (!ctx || !new_attrs) << 5243 goto out_free; << 5244 3999 5245 /* 4000 /* 5246 * If something goes wrong during CPU 4001 * If something goes wrong during CPU up/down, we'll fall back to 5247 * the default pwq covering whole @at 4002 * the default pwq covering whole @attrs->cpumask. Always create 5248 * it even if we don't use it immedia 4003 * it even if we don't use it immediately. 5249 */ 4004 */ 5250 copy_workqueue_attrs(new_attrs, attrs !! 4005 dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 5251 wqattrs_actualize_cpumask(new_attrs, !! 4006 if (!dfl_pwq) 5252 cpumask_copy(new_attrs->__pod_cpumask !! 4007 goto enomem_pwq; 5253 ctx->dfl_pwq = alloc_unbound_pwq(wq, << 5254 if (!ctx->dfl_pwq) << 5255 goto out_free; << 5256 4008 5257 for_each_possible_cpu(cpu) { !! 4009 for_each_node(node) { 5258 if (new_attrs->ordered) { !! 4010 if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) { 5259 ctx->dfl_pwq->refcnt+ !! 4011 pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); 5260 ctx->pwq_tbl[cpu] = c !! 4012 if (!pwq_tbl[node]) >> 4013 goto enomem_pwq; 5261 } else { 4014 } else { 5262 wq_calc_pod_cpumask(n !! 4015 dfl_pwq->refcnt++; 5263 ctx->pwq_tbl[cpu] = a !! 4016 pwq_tbl[node] = dfl_pwq; 5264 if (!ctx->pwq_tbl[cpu << 5265 goto out_free << 5266 } 4017 } 5267 } 4018 } 5268 4019 5269 /* save the user configured attrs and !! 4020 mutex_unlock(&wq_pool_mutex); 5270 copy_workqueue_attrs(new_attrs, attrs << 5271 cpumask_and(new_attrs->cpumask, new_a << 5272 cpumask_copy(new_attrs->__pod_cpumask << 5273 ctx->attrs = new_attrs; << 5274 << 5275 /* << 5276 * For initialized ordered workqueues << 5277 * (dfl_pwq). Set the plugged flag of << 5278 * of newly queued work items until e << 5279 * the old pwq's have completed. << 5280 */ << 5281 if ((wq->flags & __WQ_ORDERED) && !li << 5282 ctx->dfl_pwq->plugged = true; << 5283 << 5284 ctx->wq = wq; << 5285 return ctx; << 5286 << 5287 out_free: << 5288 free_workqueue_attrs(new_attrs); << 5289 apply_wqattrs_cleanup(ctx); << 5290 return ERR_PTR(-ENOMEM); << 5291 } << 5292 << 5293 /* set attrs and install prepared pwqs, @ctx << 5294 static void apply_wqattrs_commit(struct apply << 5295 { << 5296 int cpu; << 5297 4021 5298 /* all pwqs have been created success 4022 /* all pwqs have been created successfully, let's install'em */ 5299 mutex_lock(&ctx->wq->mutex); !! 4023 mutex_lock(&wq->mutex); 5300 << 5301 copy_workqueue_attrs(ctx->wq->unbound << 5302 << 5303 /* save the previous pwqs and install << 5304 for_each_possible_cpu(cpu) << 5305 ctx->pwq_tbl[cpu] = install_u << 5306 << 5307 ctx->dfl_pwq = install_unbound_pwq(ct << 5308 << 5309 /* update node_nr_active->max */ << 5310 wq_update_node_max_active(ctx->wq, -1 << 5311 4024 5312 /* rescuer needs to respect wq cpumas !! 4025 copy_workqueue_attrs(wq->unbound_attrs, new_attrs); 5313 if (ctx->wq->rescuer) << 5314 set_cpus_allowed_ptr(ctx->wq- << 5315 unbound_ << 5316 4026 5317 mutex_unlock(&ctx->wq->mutex); !! 4027 /* save the previous pwq and install the new one */ 5318 } !! 4028 for_each_node(node) >> 4029 pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]); >> 4030 >> 4031 /* @dfl_pwq might not have been used, ensure it's linked */ >> 4032 link_pwq(dfl_pwq); >> 4033 swap(wq->dfl_pwq, dfl_pwq); 5319 4034 5320 static int apply_workqueue_attrs_locked(struc !! 4035 mutex_unlock(&wq->mutex); 5321 const << 5322 { << 5323 struct apply_wqattrs_ctx *ctx; << 5324 << 5325 /* only unbound workqueues can change << 5326 if (WARN_ON(!(wq->flags & WQ_UNBOUND) << 5327 return -EINVAL; << 5328 << 5329 ctx = apply_wqattrs_prepare(wq, attrs << 5330 if (IS_ERR(ctx)) << 5331 return PTR_ERR(ctx); << 5332 << 5333 /* the ctx has been prepared successf << 5334 apply_wqattrs_commit(ctx); << 5335 apply_wqattrs_cleanup(ctx); << 5336 << 5337 return 0; << 5338 } << 5339 4036 5340 /** !! 4037 /* put the old pwqs */ 5341 * apply_workqueue_attrs - apply new workqueu !! 4038 for_each_node(node) 5342 * @wq: the target workqueue !! 4039 put_pwq_unlocked(pwq_tbl[node]); 5343 * @attrs: the workqueue_attrs to apply, allo !! 4040 put_pwq_unlocked(dfl_pwq); 5344 * !! 4041 5345 * Apply @attrs to an unbound workqueue @wq. !! 4042 put_online_cpus(); 5346 * a separate pwq to each CPU pod with possib !! 4043 ret = 0; 5347 * work items are affine to the pod it was is !! 4044 /* fall through */ 5348 * in-flight work items finish. Note that a w !! 4045 out_free: 5349 * itself back-to-back will stay on its curre !! 4046 free_workqueue_attrs(tmp_attrs); 5350 * !! 4047 free_workqueue_attrs(new_attrs); 5351 * Performs GFP_KERNEL allocations. !! 4048 kfree(pwq_tbl); 5352 * !! 4049 return ret; 5353 * Return: 0 on success and -errno on failure << 5354 */ << 5355 int apply_workqueue_attrs(struct workqueue_st << 5356 const struct workqu << 5357 { << 5358 int ret; << 5359 4050 5360 mutex_lock(&wq_pool_mutex); !! 4051 enomem_pwq: 5361 ret = apply_workqueue_attrs_locked(wq !! 4052 free_unbound_pwq(dfl_pwq); >> 4053 for_each_node(node) >> 4054 if (pwq_tbl && pwq_tbl[node] != dfl_pwq) >> 4055 free_unbound_pwq(pwq_tbl[node]); 5362 mutex_unlock(&wq_pool_mutex); 4056 mutex_unlock(&wq_pool_mutex); 5363 !! 4057 put_online_cpus(); 5364 return ret; !! 4058 enomem: >> 4059 ret = -ENOMEM; >> 4060 goto out_free; 5365 } 4061 } 5366 4062 5367 /** 4063 /** 5368 * unbound_wq_update_pwq - update a pwq slot !! 4064 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug 5369 * @wq: the target workqueue 4065 * @wq: the target workqueue 5370 * @cpu: the CPU to update the pwq slot for !! 4066 * @cpu: the CPU coming up or going down >> 4067 * @online: whether @cpu is coming up or going down 5371 * 4068 * 5372 * This function is to be called from %CPU_DO 4069 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and 5373 * %CPU_DOWN_FAILED. @cpu is in the same pod !! 4070 * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of 5374 * !! 4071 * @wq accordingly. 5375 * << 5376 * If pod affinity can't be adjusted due to m << 5377 * back to @wq->dfl_pwq which may not be opti << 5378 * 4072 * 5379 * Note that when the last allowed CPU of a p !! 4073 * If NUMA affinity can't be adjusted due to memory allocation failure, it 5380 * with a cpumask spanning multiple pods, the !! 4074 * falls back to @wq->dfl_pwq which may not be optimal but is always 5381 * executing the work items for the workqueue !! 4075 * correct. 5382 * may execute on any CPU. This is similar to !! 4076 * 5383 * CPU_DOWN. If a workqueue user wants strict !! 4077 * Note that when the last allowed CPU of a NUMA node goes offline for a 5384 * responsibility to flush the work item from !! 4078 * workqueue with a cpumask spanning multiple nodes, the workers which were >> 4079 * already executing the work items for the workqueue will lose their CPU >> 4080 * affinity and may execute on any CPU. This is similar to how per-cpu >> 4081 * workqueues behave on CPU_DOWN. If a workqueue user wants strict >> 4082 * affinity, it's the user's responsibility to flush the work item from >> 4083 * CPU_DOWN_PREPARE. 5385 */ 4084 */ 5386 static void unbound_wq_update_pwq(struct work !! 4085 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, >> 4086 bool online) 5387 { 4087 { >> 4088 int node = cpu_to_node(cpu); >> 4089 int cpu_off = online ? -1 : cpu; 5388 struct pool_workqueue *old_pwq = NULL 4090 struct pool_workqueue *old_pwq = NULL, *pwq; 5389 struct workqueue_attrs *target_attrs; 4091 struct workqueue_attrs *target_attrs; >> 4092 cpumask_t *cpumask; 5390 4093 5391 lockdep_assert_held(&wq_pool_mutex); 4094 lockdep_assert_held(&wq_pool_mutex); 5392 4095 5393 if (!(wq->flags & WQ_UNBOUND) || wq-> !! 4096 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND)) 5394 return; 4097 return; 5395 4098 5396 /* 4099 /* 5397 * We don't wanna alloc/free wq_attrs 4100 * We don't wanna alloc/free wq_attrs for each wq for each CPU. 5398 * Let's use a preallocated one. The 4101 * Let's use a preallocated one. The following buf is protected by 5399 * CPU hotplug exclusion. 4102 * CPU hotplug exclusion. 5400 */ 4103 */ 5401 target_attrs = unbound_wq_update_pwq_ !! 4104 target_attrs = wq_update_unbound_numa_attrs_buf; >> 4105 cpumask = target_attrs->cpumask; >> 4106 >> 4107 mutex_lock(&wq->mutex); >> 4108 if (wq->unbound_attrs->no_numa) >> 4109 goto out_unlock; 5402 4110 5403 copy_workqueue_attrs(target_attrs, wq 4111 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); 5404 wqattrs_actualize_cpumask(target_attr !! 4112 pwq = unbound_pwq_by_node(wq, node); 5405 4113 5406 /* nothing to do if the target cpumas !! 4114 /* 5407 wq_calc_pod_cpumask(target_attrs, cpu !! 4115 * Let's determine what needs to be done. If the target cpumask is 5408 if (wqattrs_equal(target_attrs, unbou !! 4116 * different from wq's, we need to compare it to @pwq's and create 5409 return; !! 4117 * a new one if they don't match. If the target cpumask equals >> 4118 * wq's, the default pwq should be used. If @pwq is already the >> 4119 * default one, nothing to do; otherwise, install the default one. >> 4120 */ >> 4121 if (wq_calc_node_cpumask(wq->unbound_attrs, node, cpu_off, cpumask)) { >> 4122 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) >> 4123 goto out_unlock; >> 4124 } else { >> 4125 if (pwq == wq->dfl_pwq) >> 4126 goto out_unlock; >> 4127 else >> 4128 goto use_dfl_pwq; >> 4129 } >> 4130 >> 4131 mutex_unlock(&wq->mutex); 5410 4132 5411 /* create a new pwq */ 4133 /* create a new pwq */ 5412 pwq = alloc_unbound_pwq(wq, target_at 4134 pwq = alloc_unbound_pwq(wq, target_attrs); 5413 if (!pwq) { 4135 if (!pwq) { 5414 pr_warn("workqueue: allocatio !! 4136 pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n", 5415 wq->name); !! 4137 wq->name); >> 4138 mutex_lock(&wq->mutex); 5416 goto use_dfl_pwq; 4139 goto use_dfl_pwq; 5417 } 4140 } 5418 4141 5419 /* Install the new pwq. */ !! 4142 /* >> 4143 * Install the new pwq. As this function is called only from CPU >> 4144 * hotplug callbacks and applying a new attrs is wrapped with >> 4145 * get/put_online_cpus(), @wq->unbound_attrs couldn't have changed >> 4146 * inbetween. >> 4147 */ 5420 mutex_lock(&wq->mutex); 4148 mutex_lock(&wq->mutex); 5421 old_pwq = install_unbound_pwq(wq, cpu !! 4149 old_pwq = numa_pwq_tbl_install(wq, node, pwq); 5422 goto out_unlock; 4150 goto out_unlock; 5423 4151 5424 use_dfl_pwq: 4152 use_dfl_pwq: 5425 mutex_lock(&wq->mutex); !! 4153 spin_lock_irq(&wq->dfl_pwq->pool->lock); 5426 pwq = unbound_pwq(wq, -1); !! 4154 get_pwq(wq->dfl_pwq); 5427 raw_spin_lock_irq(&pwq->pool->lock); !! 4155 spin_unlock_irq(&wq->dfl_pwq->pool->lock); 5428 get_pwq(pwq); !! 4156 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); 5429 raw_spin_unlock_irq(&pwq->pool->lock) << 5430 old_pwq = install_unbound_pwq(wq, cpu << 5431 out_unlock: 4157 out_unlock: 5432 mutex_unlock(&wq->mutex); 4158 mutex_unlock(&wq->mutex); 5433 put_pwq_unlocked(old_pwq); 4159 put_pwq_unlocked(old_pwq); 5434 } 4160 } 5435 4161 5436 static int alloc_and_link_pwqs(struct workque 4162 static int alloc_and_link_pwqs(struct workqueue_struct *wq) 5437 { 4163 { 5438 bool highpri = wq->flags & WQ_HIGHPRI 4164 bool highpri = wq->flags & WQ_HIGHPRI; 5439 int cpu, ret; 4165 int cpu, ret; 5440 4166 5441 lockdep_assert_held(&wq_pool_mutex); << 5442 << 5443 wq->cpu_pwq = alloc_percpu(struct poo << 5444 if (!wq->cpu_pwq) << 5445 goto enomem; << 5446 << 5447 if (!(wq->flags & WQ_UNBOUND)) { 4167 if (!(wq->flags & WQ_UNBOUND)) { 5448 struct worker_pool __percpu * !! 4168 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); 5449 !! 4169 if (!wq->cpu_pwqs) 5450 if (wq->flags & WQ_BH) !! 4170 return -ENOMEM; 5451 pools = bh_worker_poo << 5452 else << 5453 pools = cpu_worker_po << 5454 4171 5455 for_each_possible_cpu(cpu) { 4172 for_each_possible_cpu(cpu) { 5456 struct pool_workqueue !! 4173 struct pool_workqueue *pwq = 5457 struct worker_pool *p !! 4174 per_cpu_ptr(wq->cpu_pwqs, cpu); >> 4175 struct worker_pool *cpu_pools = >> 4176 per_cpu(cpu_worker_pools, cpu); 5458 4177 5459 pool = &(per_cpu_ptr( !! 4178 init_pwq(pwq, wq, &cpu_pools[highpri]); 5460 pwq_p = per_cpu_ptr(w << 5461 << 5462 *pwq_p = kmem_cache_a << 5463 << 5464 if (!*pwq_p) << 5465 goto enomem; << 5466 << 5467 init_pwq(*pwq_p, wq, << 5468 4179 5469 mutex_lock(&wq->mutex 4180 mutex_lock(&wq->mutex); 5470 link_pwq(*pwq_p); !! 4181 link_pwq(pwq); 5471 mutex_unlock(&wq->mut 4182 mutex_unlock(&wq->mutex); 5472 } 4183 } 5473 return 0; 4184 return 0; 5474 } !! 4185 } else if (wq->flags & __WQ_ORDERED) { 5475 !! 4186 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 5476 if (wq->flags & __WQ_ORDERED) { << 5477 struct pool_workqueue *dfl_pw << 5478 << 5479 ret = apply_workqueue_attrs_l << 5480 /* there should only be singl 4187 /* there should only be single pwq for ordering guarantee */ 5481 dfl_pwq = rcu_access_pointer( !! 4188 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 5482 WARN(!ret && (wq->pwqs.next ! !! 4189 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), 5483 wq->pwqs.prev ! << 5484 "ordering guarantee brok 4190 "ordering guarantee broken for workqueue %s\n", wq->name); >> 4191 return ret; 5485 } else { 4192 } else { 5486 ret = apply_workqueue_attrs_l !! 4193 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 5487 } << 5488 << 5489 return ret; << 5490 << 5491 enomem: << 5492 if (wq->cpu_pwq) { << 5493 for_each_possible_cpu(cpu) { << 5494 struct pool_workqueue << 5495 << 5496 if (pwq) << 5497 kmem_cache_fr << 5498 } << 5499 free_percpu(wq->cpu_pwq); << 5500 wq->cpu_pwq = NULL; << 5501 } 4194 } 5502 return -ENOMEM; << 5503 } 4195 } 5504 4196 5505 static int wq_clamp_max_active(int max_active 4197 static int wq_clamp_max_active(int max_active, unsigned int flags, 5506 const char *na 4198 const char *name) 5507 { 4199 { 5508 if (max_active < 1 || max_active > WQ !! 4200 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; 5509 pr_warn("workqueue: max_activ << 5510 max_active, name, 1, << 5511 << 5512 return clamp_val(max_active, 1, WQ_MA << 5513 } << 5514 << 5515 /* << 5516 * Workqueues which may be used during memory << 5517 * to guarantee forward progress. << 5518 */ << 5519 static int init_rescuer(struct workqueue_stru << 5520 { << 5521 struct worker *rescuer; << 5522 char id_buf[WORKER_ID_LEN]; << 5523 int ret; << 5524 << 5525 lockdep_assert_held(&wq_pool_mutex); << 5526 << 5527 if (!(wq->flags & WQ_MEM_RECLAIM)) << 5528 return 0; << 5529 << 5530 rescuer = alloc_worker(NUMA_NO_NODE); << 5531 if (!rescuer) { << 5532 pr_err("workqueue: Failed to << 5533 wq->name); << 5534 return -ENOMEM; << 5535 } << 5536 << 5537 rescuer->rescue_wq = wq; << 5538 format_worker_id(id_buf, sizeof(id_bu << 5539 4201 5540 rescuer->task = kthread_create(rescue !! 4202 if (max_active < 1 || max_active > lim) 5541 if (IS_ERR(rescuer->task)) { !! 4203 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 5542 ret = PTR_ERR(rescuer->task); !! 4204 max_active, name, 1, lim); 5543 pr_err("workqueue: Failed to << 5544 wq->name, ERR_PTR(ret) << 5545 kfree(rescuer); << 5546 return ret; << 5547 } << 5548 << 5549 wq->rescuer = rescuer; << 5550 if (wq->flags & WQ_UNBOUND) << 5551 kthread_bind_mask(rescuer->ta << 5552 else << 5553 kthread_bind_mask(rescuer->ta << 5554 wake_up_process(rescuer->task); << 5555 << 5556 return 0; << 5557 } << 5558 << 5559 /** << 5560 * wq_adjust_max_active - update a wq's max_a << 5561 * @wq: target workqueue << 5562 * << 5563 * If @wq isn't freezing, set @wq->max_active << 5564 * activate inactive work items accordingly. << 5565 * @wq->max_active to zero. << 5566 */ << 5567 static void wq_adjust_max_active(struct workq << 5568 { << 5569 bool activated; << 5570 int new_max, new_min; << 5571 << 5572 lockdep_assert_held(&wq->mutex); << 5573 << 5574 if ((wq->flags & WQ_FREEZABLE) && wor << 5575 new_max = 0; << 5576 new_min = 0; << 5577 } else { << 5578 new_max = wq->saved_max_activ << 5579 new_min = wq->saved_min_activ << 5580 } << 5581 << 5582 if (wq->max_active == new_max && wq-> << 5583 return; << 5584 << 5585 /* << 5586 * Update @wq->max/min_active and the << 5587 * active work items are allowed. Thi << 5588 * because new work items are always << 5589 * work items if there are any. << 5590 */ << 5591 WRITE_ONCE(wq->max_active, new_max); << 5592 WRITE_ONCE(wq->min_active, new_min); << 5593 << 5594 if (wq->flags & WQ_UNBOUND) << 5595 wq_update_node_max_active(wq, << 5596 << 5597 if (new_max == 0) << 5598 return; << 5599 << 5600 /* << 5601 * Round-robin through pwq's activati << 5602 * until max_active is filled. << 5603 */ << 5604 do { << 5605 struct pool_workqueue *pwq; << 5606 << 5607 activated = false; << 5608 for_each_pwq(pwq, wq) { << 5609 unsigned long irq_fla << 5610 4205 5611 /* can be called duri !! 4206 return clamp_val(max_active, 1, lim); 5612 raw_spin_lock_irqsave << 5613 if (pwq_activate_firs << 5614 activated = t << 5615 kick_pool(pwq << 5616 } << 5617 raw_spin_unlock_irqre << 5618 } << 5619 } while (activated); << 5620 } 4207 } 5621 4208 5622 __printf(1, 4) !! 4209 struct workqueue_struct *__alloc_workqueue_key(const char *fmt, 5623 struct workqueue_struct *alloc_workqueue(cons !! 4210 unsigned int flags, 5624 unsi !! 4211 int max_active, 5625 int !! 4212 struct lock_class_key *key, >> 4213 const char *lock_name, ...) 5626 { 4214 { >> 4215 size_t tbl_size = 0; 5627 va_list args; 4216 va_list args; 5628 struct workqueue_struct *wq; 4217 struct workqueue_struct *wq; 5629 size_t wq_size; !! 4218 struct pool_workqueue *pwq; 5630 int name_len; << 5631 << 5632 if (flags & WQ_BH) { << 5633 if (WARN_ON_ONCE(flags & ~__W << 5634 return NULL; << 5635 if (WARN_ON_ONCE(max_active)) << 5636 return NULL; << 5637 } << 5638 4219 5639 /* see the comment above the definiti !! 4220 /* 5640 if ((flags & WQ_POWER_EFFICIENT) && w !! 4221 * Unbound && max_active == 1 used to imply ordered, which is no 5641 flags |= WQ_UNBOUND; !! 4222 * longer the case on NUMA machines due to per-node pools. While >> 4223 * alloc_ordered_workqueue() is the right way to create an ordered >> 4224 * workqueue, keep the previous behavior to avoid subtle breakages >> 4225 * on NUMA. >> 4226 */ >> 4227 if ((flags & WQ_UNBOUND) && max_active == 1) >> 4228 flags |= __WQ_ORDERED; 5642 4229 5643 /* allocate wq and format name */ 4230 /* allocate wq and format name */ 5644 if (flags & WQ_UNBOUND) 4231 if (flags & WQ_UNBOUND) 5645 wq_size = struct_size(wq, nod !! 4232 tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]); 5646 else << 5647 wq_size = sizeof(*wq); << 5648 4233 5649 wq = kzalloc(wq_size, GFP_KERNEL); !! 4234 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL); 5650 if (!wq) 4235 if (!wq) 5651 return NULL; 4236 return NULL; 5652 4237 5653 if (flags & WQ_UNBOUND) { 4238 if (flags & WQ_UNBOUND) { 5654 wq->unbound_attrs = alloc_wor !! 4239 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); 5655 if (!wq->unbound_attrs) 4240 if (!wq->unbound_attrs) 5656 goto err_free_wq; 4241 goto err_free_wq; 5657 } 4242 } 5658 4243 5659 va_start(args, max_active); !! 4244 va_start(args, lock_name); 5660 name_len = vsnprintf(wq->name, sizeof !! 4245 vsnprintf(wq->name, sizeof(wq->name), fmt, args); 5661 va_end(args); 4246 va_end(args); 5662 4247 5663 if (name_len >= WQ_NAME_LEN) !! 4248 max_active = max_active ?: WQ_DFL_ACTIVE; 5664 pr_warn_once("workqueue: name !! 4249 max_active = wq_clamp_max_active(max_active, flags, wq->name); 5665 wq->name); << 5666 << 5667 if (flags & WQ_BH) { << 5668 /* << 5669 * BH workqueues always share << 5670 * and don't impose any max_a << 5671 */ << 5672 max_active = INT_MAX; << 5673 } else { << 5674 max_active = max_active ?: WQ << 5675 max_active = wq_clamp_max_act << 5676 } << 5677 4250 5678 /* init wq */ 4251 /* init wq */ 5679 wq->flags = flags; 4252 wq->flags = flags; 5680 wq->max_active = max_active; !! 4253 wq->saved_max_active = max_active; 5681 wq->min_active = min(max_active, WQ_D << 5682 wq->saved_max_active = wq->max_active << 5683 wq->saved_min_active = wq->min_active << 5684 mutex_init(&wq->mutex); 4254 mutex_init(&wq->mutex); 5685 atomic_set(&wq->nr_pwqs_to_flush, 0); 4255 atomic_set(&wq->nr_pwqs_to_flush, 0); 5686 INIT_LIST_HEAD(&wq->pwqs); 4256 INIT_LIST_HEAD(&wq->pwqs); 5687 INIT_LIST_HEAD(&wq->flusher_queue); 4257 INIT_LIST_HEAD(&wq->flusher_queue); 5688 INIT_LIST_HEAD(&wq->flusher_overflow) 4258 INIT_LIST_HEAD(&wq->flusher_overflow); 5689 INIT_LIST_HEAD(&wq->maydays); 4259 INIT_LIST_HEAD(&wq->maydays); 5690 4260 5691 wq_init_lockdep(wq); !! 4261 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 5692 INIT_LIST_HEAD(&wq->list); 4262 INIT_LIST_HEAD(&wq->list); 5693 4263 5694 if (flags & WQ_UNBOUND) { !! 4264 if (alloc_and_link_pwqs(wq) < 0) 5695 if (alloc_node_nr_active(wq-> !! 4265 goto err_free_wq; 5696 goto err_unreg_lockde << 5697 } << 5698 4266 5699 /* 4267 /* 5700 * wq_pool_mutex protects the workque !! 4268 * Workqueues which may be used during memory reclaim should 5701 * and the global freeze state. !! 4269 * have a rescuer to guarantee forward progress. 5702 */ 4270 */ 5703 apply_wqattrs_lock(); !! 4271 if (flags & WQ_MEM_RECLAIM) { 5704 !! 4272 struct worker *rescuer; 5705 if (alloc_and_link_pwqs(wq) < 0) << 5706 goto err_unlock_free_node_nr_ << 5707 << 5708 mutex_lock(&wq->mutex); << 5709 wq_adjust_max_active(wq); << 5710 mutex_unlock(&wq->mutex); << 5711 4273 5712 list_add_tail_rcu(&wq->list, &workque !! 4274 rescuer = alloc_worker(); >> 4275 if (!rescuer) >> 4276 goto err_destroy; 5713 4277 5714 if (wq_online && init_rescuer(wq) < 0 !! 4278 rescuer->rescue_wq = wq; 5715 goto err_unlock_destroy; !! 4279 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", >> 4280 wq->name); >> 4281 if (IS_ERR(rescuer->task)) { >> 4282 kfree(rescuer); >> 4283 goto err_destroy; >> 4284 } 5716 4285 5717 apply_wqattrs_unlock(); !! 4286 wq->rescuer = rescuer; >> 4287 rescuer->task->flags |= PF_NO_SETAFFINITY; >> 4288 wake_up_process(rescuer->task); >> 4289 } 5718 4290 5719 if ((wq->flags & WQ_SYSFS) && workque 4291 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) 5720 goto err_destroy; 4292 goto err_destroy; 5721 4293 5722 return wq; << 5723 << 5724 err_unlock_free_node_nr_active: << 5725 apply_wqattrs_unlock(); << 5726 /* 4294 /* 5727 * Failed alloc_and_link_pwqs() may l !! 4295 * wq_pool_mutex protects global freeze state and workqueues list. 5728 * flushing the pwq_release_worker en !! 4296 * Grab it, adjust max_active and add the new @wq to workqueues 5729 * completes before calling kfree(wq) !! 4297 * list. 5730 */ 4298 */ 5731 if (wq->flags & WQ_UNBOUND) { !! 4299 mutex_lock(&wq_pool_mutex); 5732 kthread_flush_worker(pwq_rele !! 4300 5733 free_node_nr_active(wq->node_ !! 4301 mutex_lock(&wq->mutex); 5734 } !! 4302 for_each_pwq(pwq, wq) 5735 err_unreg_lockdep: !! 4303 pwq_adjust_max_active(pwq); 5736 wq_unregister_lockdep(wq); !! 4304 mutex_unlock(&wq->mutex); 5737 wq_free_lockdep(wq); !! 4305 >> 4306 list_add(&wq->list, &workqueues); >> 4307 >> 4308 mutex_unlock(&wq_pool_mutex); >> 4309 >> 4310 return wq; >> 4311 5738 err_free_wq: 4312 err_free_wq: 5739 free_workqueue_attrs(wq->unbound_attr 4313 free_workqueue_attrs(wq->unbound_attrs); 5740 kfree(wq); 4314 kfree(wq); 5741 return NULL; 4315 return NULL; 5742 err_unlock_destroy: << 5743 apply_wqattrs_unlock(); << 5744 err_destroy: 4316 err_destroy: 5745 destroy_workqueue(wq); 4317 destroy_workqueue(wq); 5746 return NULL; 4318 return NULL; 5747 } 4319 } 5748 EXPORT_SYMBOL_GPL(alloc_workqueue); !! 4320 EXPORT_SYMBOL_GPL(__alloc_workqueue_key); 5749 << 5750 static bool pwq_busy(struct pool_workqueue *p << 5751 { << 5752 int i; << 5753 << 5754 for (i = 0; i < WORK_NR_COLORS; i++) << 5755 if (pwq->nr_in_flight[i]) << 5756 return true; << 5757 << 5758 if ((pwq != rcu_access_pointer(pwq->w << 5759 return true; << 5760 if (!pwq_is_empty(pwq)) << 5761 return true; << 5762 << 5763 return false; << 5764 } << 5765 4321 5766 /** 4322 /** 5767 * destroy_workqueue - safely terminate a wor 4323 * destroy_workqueue - safely terminate a workqueue 5768 * @wq: target workqueue 4324 * @wq: target workqueue 5769 * 4325 * 5770 * Safely destroy a workqueue. All work curre 4326 * Safely destroy a workqueue. All work currently pending will be done first. 5771 */ 4327 */ 5772 void destroy_workqueue(struct workqueue_struc 4328 void destroy_workqueue(struct workqueue_struct *wq) 5773 { 4329 { 5774 struct pool_workqueue *pwq; 4330 struct pool_workqueue *pwq; 5775 int cpu; !! 4331 int node; 5776 << 5777 /* << 5778 * Remove it from sysfs first so that << 5779 * lead to sysfs name conflicts. << 5780 */ << 5781 workqueue_sysfs_unregister(wq); << 5782 << 5783 /* mark the workqueue destruction is << 5784 mutex_lock(&wq->mutex); << 5785 wq->flags |= __WQ_DESTROYING; << 5786 mutex_unlock(&wq->mutex); << 5787 4332 5788 /* drain it before proceeding with de 4333 /* drain it before proceeding with destruction */ 5789 drain_workqueue(wq); 4334 drain_workqueue(wq); 5790 4335 5791 /* kill rescuer, if sanity checks fai !! 4336 /* sanity checks */ 5792 if (wq->rescuer) { << 5793 struct worker *rescuer = wq-> << 5794 << 5795 /* this prevents new queueing << 5796 raw_spin_lock_irq(&wq_mayday_ << 5797 wq->rescuer = NULL; << 5798 raw_spin_unlock_irq(&wq_mayda << 5799 << 5800 /* rescuer will empty maydays << 5801 kthread_stop(rescuer->task); << 5802 kfree(rescuer); << 5803 } << 5804 << 5805 /* << 5806 * Sanity checks - grab all the locks << 5807 * in-flight operations which may do << 5808 */ << 5809 mutex_lock(&wq_pool_mutex); << 5810 mutex_lock(&wq->mutex); 4337 mutex_lock(&wq->mutex); 5811 for_each_pwq(pwq, wq) { 4338 for_each_pwq(pwq, wq) { 5812 raw_spin_lock_irq(&pwq->pool- !! 4339 int i; 5813 if (WARN_ON(pwq_busy(pwq))) { !! 4340 5814 pr_warn("%s: %s has t !! 4341 for (i = 0; i < WORK_NR_COLORS; i++) { 5815 __func__, wq- !! 4342 if (WARN_ON(pwq->nr_in_flight[i])) { 5816 show_pwq(pwq); !! 4343 mutex_unlock(&wq->mutex); 5817 raw_spin_unlock_irq(& !! 4344 return; >> 4345 } >> 4346 } >> 4347 >> 4348 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) || >> 4349 WARN_ON(pwq->nr_active) || >> 4350 WARN_ON(!list_empty(&pwq->delayed_works))) { 5818 mutex_unlock(&wq->mut 4351 mutex_unlock(&wq->mutex); 5819 mutex_unlock(&wq_pool << 5820 show_one_workqueue(wq << 5821 return; 4352 return; 5822 } 4353 } 5823 raw_spin_unlock_irq(&pwq->poo << 5824 } 4354 } 5825 mutex_unlock(&wq->mutex); 4355 mutex_unlock(&wq->mutex); 5826 4356 5827 /* 4357 /* 5828 * wq list is used to freeze wq, remo 4358 * wq list is used to freeze wq, remove from list after 5829 * flushing is complete in case freez 4359 * flushing is complete in case freeze races us. 5830 */ 4360 */ 5831 list_del_rcu(&wq->list); !! 4361 mutex_lock(&wq_pool_mutex); >> 4362 list_del_init(&wq->list); 5832 mutex_unlock(&wq_pool_mutex); 4363 mutex_unlock(&wq_pool_mutex); 5833 4364 5834 /* !! 4365 workqueue_sysfs_unregister(wq); 5835 * We're the sole accessor of @wq. Di << 5836 * to put the base refs. @wq will be << 5837 * pwq_put. RCU read lock prevents @w << 5838 */ << 5839 rcu_read_lock(); << 5840 4366 5841 for_each_possible_cpu(cpu) { !! 4367 if (wq->rescuer) { 5842 put_pwq_unlocked(unbound_pwq( !! 4368 kthread_stop(wq->rescuer->task); 5843 RCU_INIT_POINTER(*unbound_pwq !! 4369 kfree(wq->rescuer); >> 4370 wq->rescuer = NULL; 5844 } 4371 } 5845 4372 5846 put_pwq_unlocked(unbound_pwq(wq, -1)) !! 4373 if (!(wq->flags & WQ_UNBOUND)) { 5847 RCU_INIT_POINTER(*unbound_pwq_slot(wq !! 4374 /* >> 4375 * The base ref is never dropped on per-cpu pwqs. Directly >> 4376 * free the pwqs and wq. >> 4377 */ >> 4378 free_percpu(wq->cpu_pwqs); >> 4379 kfree(wq); >> 4380 } else { >> 4381 /* >> 4382 * We're the sole accessor of @wq at this point. Directly >> 4383 * access numa_pwq_tbl[] and dfl_pwq to put the base refs. >> 4384 * @wq will be freed when the last pwq is released. >> 4385 */ >> 4386 for_each_node(node) { >> 4387 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); >> 4388 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL); >> 4389 put_pwq_unlocked(pwq); >> 4390 } 5848 4391 5849 rcu_read_unlock(); !! 4392 /* >> 4393 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is >> 4394 * put. Don't access it afterwards. >> 4395 */ >> 4396 pwq = wq->dfl_pwq; >> 4397 wq->dfl_pwq = NULL; >> 4398 put_pwq_unlocked(pwq); >> 4399 } 5850 } 4400 } 5851 EXPORT_SYMBOL_GPL(destroy_workqueue); 4401 EXPORT_SYMBOL_GPL(destroy_workqueue); 5852 4402 5853 /** 4403 /** 5854 * workqueue_set_max_active - adjust max_acti 4404 * workqueue_set_max_active - adjust max_active of a workqueue 5855 * @wq: target workqueue 4405 * @wq: target workqueue 5856 * @max_active: new max_active value. 4406 * @max_active: new max_active value. 5857 * 4407 * 5858 * Set max_active of @wq to @max_active. See !! 4408 * Set max_active of @wq to @max_active. 5859 * comment. << 5860 * 4409 * 5861 * CONTEXT: 4410 * CONTEXT: 5862 * Don't call from IRQ context. 4411 * Don't call from IRQ context. 5863 */ 4412 */ 5864 void workqueue_set_max_active(struct workqueu 4413 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) 5865 { 4414 { 5866 /* max_active doesn't mean anything f !! 4415 struct pool_workqueue *pwq; 5867 if (WARN_ON(wq->flags & WQ_BH)) !! 4416 5868 return; << 5869 /* disallow meddling with max_active 4417 /* disallow meddling with max_active for ordered workqueues */ 5870 if (WARN_ON(wq->flags & __WQ_ORDERED) !! 4418 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 5871 return; 4419 return; 5872 4420 5873 max_active = wq_clamp_max_active(max_ 4421 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 5874 4422 5875 mutex_lock(&wq->mutex); 4423 mutex_lock(&wq->mutex); 5876 4424 >> 4425 wq->flags &= ~__WQ_ORDERED; 5877 wq->saved_max_active = max_active; 4426 wq->saved_max_active = max_active; 5878 if (wq->flags & WQ_UNBOUND) << 5879 wq->saved_min_active = min(wq << 5880 4427 5881 wq_adjust_max_active(wq); !! 4428 for_each_pwq(pwq, wq) >> 4429 pwq_adjust_max_active(pwq); 5882 4430 5883 mutex_unlock(&wq->mutex); 4431 mutex_unlock(&wq->mutex); 5884 } 4432 } 5885 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 4433 EXPORT_SYMBOL_GPL(workqueue_set_max_active); 5886 4434 5887 /** 4435 /** 5888 * workqueue_set_min_active - adjust min_acti << 5889 * @wq: target unbound workqueue << 5890 * @min_active: new min_active value << 5891 * << 5892 * Set min_active of an unbound workqueue. Un << 5893 * unbound workqueue is not guaranteed to be << 5894 * interdependent work items. Instead, an unb << 5895 * able to process min_active number of inter << 5896 * %WQ_DFL_MIN_ACTIVE by default. << 5897 * << 5898 * Use this function to adjust the min_active << 5899 * max_active. << 5900 */ << 5901 void workqueue_set_min_active(struct workqueu << 5902 { << 5903 /* min_active is only meaningful for << 5904 if (WARN_ON((wq->flags & (WQ_BH | WQ_ << 5905 WQ_UNBOUND)) << 5906 return; << 5907 << 5908 mutex_lock(&wq->mutex); << 5909 wq->saved_min_active = clamp(min_acti << 5910 wq_adjust_max_active(wq); << 5911 mutex_unlock(&wq->mutex); << 5912 } << 5913 << 5914 /** << 5915 * current_work - retrieve %current task's wo << 5916 * << 5917 * Determine if %current task is a workqueue << 5918 * Useful to find out the context that the %c << 5919 * << 5920 * Return: work struct if %current task is a << 5921 */ << 5922 struct work_struct *current_work(void) << 5923 { << 5924 struct worker *worker = current_wq_wo << 5925 << 5926 return worker ? worker->current_work << 5927 } << 5928 EXPORT_SYMBOL(current_work); << 5929 << 5930 /** << 5931 * current_is_workqueue_rescuer - is %current 4436 * current_is_workqueue_rescuer - is %current workqueue rescuer? 5932 * 4437 * 5933 * Determine whether %current is a workqueue 4438 * Determine whether %current is a workqueue rescuer. Can be used from 5934 * work functions to determine whether it's b 4439 * work functions to determine whether it's being run off the rescuer task. 5935 * << 5936 * Return: %true if %current is a workqueue r << 5937 */ 4440 */ 5938 bool current_is_workqueue_rescuer(void) 4441 bool current_is_workqueue_rescuer(void) 5939 { 4442 { 5940 struct worker *worker = current_wq_wo 4443 struct worker *worker = current_wq_worker(); 5941 4444 5942 return worker && worker->rescue_wq; 4445 return worker && worker->rescue_wq; 5943 } 4446 } 5944 4447 5945 /** 4448 /** 5946 * workqueue_congested - test whether a workq 4449 * workqueue_congested - test whether a workqueue is congested 5947 * @cpu: CPU in question 4450 * @cpu: CPU in question 5948 * @wq: target workqueue 4451 * @wq: target workqueue 5949 * 4452 * 5950 * Test whether @wq's cpu workqueue for @cpu 4453 * Test whether @wq's cpu workqueue for @cpu is congested. There is 5951 * no synchronization around this function an 4454 * no synchronization around this function and the test result is 5952 * unreliable and only useful as advisory hin 4455 * unreliable and only useful as advisory hints or for debugging. 5953 * 4456 * 5954 * If @cpu is WORK_CPU_UNBOUND, the test is p 4457 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU. >> 4458 * Note that both per-cpu and unbound workqueues may be associated with >> 4459 * multiple pool_workqueues which have separate congested states. A >> 4460 * workqueue being congested on one CPU doesn't mean the workqueue is also >> 4461 * contested on other CPUs / NUMA nodes. 5955 * 4462 * 5956 * With the exception of ordered workqueues, !! 4463 * RETURNS: 5957 * pool_workqueues, each with its own congest << 5958 * congested on one CPU doesn't mean that the << 5959 * other CPUs. << 5960 * << 5961 * Return: << 5962 * %true if congested, %false otherwise. 4464 * %true if congested, %false otherwise. 5963 */ 4465 */ 5964 bool workqueue_congested(int cpu, struct work 4466 bool workqueue_congested(int cpu, struct workqueue_struct *wq) 5965 { 4467 { 5966 struct pool_workqueue *pwq; 4468 struct pool_workqueue *pwq; 5967 bool ret; 4469 bool ret; 5968 4470 5969 rcu_read_lock(); !! 4471 rcu_read_lock_sched(); 5970 preempt_disable(); << 5971 4472 5972 if (cpu == WORK_CPU_UNBOUND) 4473 if (cpu == WORK_CPU_UNBOUND) 5973 cpu = smp_processor_id(); 4474 cpu = smp_processor_id(); 5974 4475 5975 pwq = *per_cpu_ptr(wq->cpu_pwq, cpu); !! 4476 if (!(wq->flags & WQ_UNBOUND)) 5976 ret = !list_empty(&pwq->inactive_work !! 4477 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); >> 4478 else >> 4479 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); 5977 4480 5978 preempt_enable(); !! 4481 ret = !list_empty(&pwq->delayed_works); 5979 rcu_read_unlock(); !! 4482 rcu_read_unlock_sched(); 5980 4483 5981 return ret; 4484 return ret; 5982 } 4485 } 5983 EXPORT_SYMBOL_GPL(workqueue_congested); 4486 EXPORT_SYMBOL_GPL(workqueue_congested); 5984 4487 5985 /** 4488 /** 5986 * work_busy - test whether a work is current 4489 * work_busy - test whether a work is currently pending or running 5987 * @work: the work to be tested 4490 * @work: the work to be tested 5988 * 4491 * 5989 * Test whether @work is currently pending or 4492 * Test whether @work is currently pending or running. There is no 5990 * synchronization around this function and t 4493 * synchronization around this function and the test result is 5991 * unreliable and only useful as advisory hin 4494 * unreliable and only useful as advisory hints or for debugging. 5992 * 4495 * 5993 * Return: !! 4496 * RETURNS: 5994 * OR'd bitmask of WORK_BUSY_* bits. 4497 * OR'd bitmask of WORK_BUSY_* bits. 5995 */ 4498 */ 5996 unsigned int work_busy(struct work_struct *wo 4499 unsigned int work_busy(struct work_struct *work) 5997 { 4500 { 5998 struct worker_pool *pool; 4501 struct worker_pool *pool; 5999 unsigned long irq_flags; !! 4502 unsigned long flags; 6000 unsigned int ret = 0; 4503 unsigned int ret = 0; 6001 4504 6002 if (work_pending(work)) 4505 if (work_pending(work)) 6003 ret |= WORK_BUSY_PENDING; 4506 ret |= WORK_BUSY_PENDING; 6004 4507 6005 rcu_read_lock(); !! 4508 local_irq_save(flags); 6006 pool = get_work_pool(work); 4509 pool = get_work_pool(work); 6007 if (pool) { 4510 if (pool) { 6008 raw_spin_lock_irqsave(&pool-> !! 4511 spin_lock(&pool->lock); 6009 if (find_worker_executing_wor 4512 if (find_worker_executing_work(pool, work)) 6010 ret |= WORK_BUSY_RUNN 4513 ret |= WORK_BUSY_RUNNING; 6011 raw_spin_unlock_irqrestore(&p !! 4514 spin_unlock(&pool->lock); 6012 } 4515 } 6013 rcu_read_unlock(); !! 4516 local_irq_restore(flags); 6014 4517 6015 return ret; 4518 return ret; 6016 } 4519 } 6017 EXPORT_SYMBOL_GPL(work_busy); 4520 EXPORT_SYMBOL_GPL(work_busy); 6018 4521 6019 /** 4522 /** 6020 * set_worker_desc - set description for the 4523 * set_worker_desc - set description for the current work item 6021 * @fmt: printf-style format string 4524 * @fmt: printf-style format string 6022 * @...: arguments for the format string 4525 * @...: arguments for the format string 6023 * 4526 * 6024 * This function can be called by a running w 4527 * This function can be called by a running work function to describe what 6025 * the work item is about. If the worker tas 4528 * the work item is about. If the worker task gets dumped, this 6026 * information will be printed out together t 4529 * information will be printed out together to help debugging. The 6027 * description can be at most WORKER_DESC_LEN 4530 * description can be at most WORKER_DESC_LEN including the trailing '\0'. 6028 */ 4531 */ 6029 void set_worker_desc(const char *fmt, ...) 4532 void set_worker_desc(const char *fmt, ...) 6030 { 4533 { 6031 struct worker *worker = current_wq_wo 4534 struct worker *worker = current_wq_worker(); 6032 va_list args; 4535 va_list args; 6033 4536 6034 if (worker) { 4537 if (worker) { 6035 va_start(args, fmt); 4538 va_start(args, fmt); 6036 vsnprintf(worker->desc, sizeo 4539 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 6037 va_end(args); 4540 va_end(args); >> 4541 worker->desc_valid = true; 6038 } 4542 } 6039 } 4543 } 6040 EXPORT_SYMBOL_GPL(set_worker_desc); << 6041 4544 6042 /** 4545 /** 6043 * print_worker_info - print out worker infor 4546 * print_worker_info - print out worker information and description 6044 * @log_lvl: the log level to use when printi 4547 * @log_lvl: the log level to use when printing 6045 * @task: target task 4548 * @task: target task 6046 * 4549 * 6047 * If @task is a worker and currently executi 4550 * If @task is a worker and currently executing a work item, print out the 6048 * name of the workqueue being serviced and w 4551 * name of the workqueue being serviced and worker description set with 6049 * set_worker_desc() by the currently executi 4552 * set_worker_desc() by the currently executing work item. 6050 * 4553 * 6051 * This function can be safely called on any 4554 * This function can be safely called on any task as long as the 6052 * task_struct itself is accessible. While s 4555 * task_struct itself is accessible. While safe, this function isn't 6053 * synchronized and may print out mixups or g 4556 * synchronized and may print out mixups or garbages of limited length. 6054 */ 4557 */ 6055 void print_worker_info(const char *log_lvl, s 4558 void print_worker_info(const char *log_lvl, struct task_struct *task) 6056 { 4559 { 6057 work_func_t *fn = NULL; 4560 work_func_t *fn = NULL; 6058 char name[WQ_NAME_LEN] = { }; 4561 char name[WQ_NAME_LEN] = { }; 6059 char desc[WORKER_DESC_LEN] = { }; 4562 char desc[WORKER_DESC_LEN] = { }; 6060 struct pool_workqueue *pwq = NULL; 4563 struct pool_workqueue *pwq = NULL; 6061 struct workqueue_struct *wq = NULL; 4564 struct workqueue_struct *wq = NULL; >> 4565 bool desc_valid = false; 6062 struct worker *worker; 4566 struct worker *worker; 6063 4567 6064 if (!(task->flags & PF_WQ_WORKER)) 4568 if (!(task->flags & PF_WQ_WORKER)) 6065 return; 4569 return; 6066 4570 6067 /* 4571 /* 6068 * This function is called without an 4572 * This function is called without any synchronization and @task 6069 * could be in any state. Be careful 4573 * could be in any state. Be careful with dereferences. 6070 */ 4574 */ 6071 worker = kthread_probe_data(task); !! 4575 worker = probe_kthread_data(task); 6072 4576 6073 /* 4577 /* 6074 * Carefully copy the associated work !! 4578 * Carefully copy the associated workqueue's workfn and name. Keep 6075 * Keep the original last '\0' in cas !! 4579 * the original last '\0' in case the original contains garbage. 6076 */ 4580 */ 6077 copy_from_kernel_nofault(&fn, &worker !! 4581 probe_kernel_read(&fn, &worker->current_func, sizeof(fn)); 6078 copy_from_kernel_nofault(&pwq, &worke !! 4582 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq)); 6079 copy_from_kernel_nofault(&wq, &pwq->w !! 4583 probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); 6080 copy_from_kernel_nofault(name, wq->na !! 4584 probe_kernel_read(name, wq->name, sizeof(name) - 1); 6081 copy_from_kernel_nofault(desc, worker !! 4585 >> 4586 /* copy worker description */ >> 4587 probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid)); >> 4588 if (desc_valid) >> 4589 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1); 6082 4590 6083 if (fn || name[0] || desc[0]) { 4591 if (fn || name[0] || desc[0]) { 6084 printk("%sWorkqueue: %s %ps", !! 4592 printk("%sWorkqueue: %s %pf", log_lvl, name, fn); 6085 if (strcmp(name, desc)) !! 4593 if (desc[0]) 6086 pr_cont(" (%s)", desc 4594 pr_cont(" (%s)", desc); 6087 pr_cont("\n"); 4595 pr_cont("\n"); 6088 } 4596 } 6089 } 4597 } 6090 4598 6091 static void pr_cont_pool_info(struct worker_p << 6092 { << 6093 pr_cont(" cpus=%*pbl", nr_cpumask_bit << 6094 if (pool->node != NUMA_NO_NODE) << 6095 pr_cont(" node=%d", pool->nod << 6096 pr_cont(" flags=0x%x", pool->flags); << 6097 if (pool->flags & POOL_BH) << 6098 pr_cont(" bh%s", << 6099 pool->attrs->nice == << 6100 else << 6101 pr_cont(" nice=%d", pool->att << 6102 } << 6103 << 6104 static void pr_cont_worker_id(struct worker * << 6105 { << 6106 struct worker_pool *pool = worker->po << 6107 << 6108 if (pool->flags & WQ_BH) << 6109 pr_cont("bh%s", << 6110 pool->attrs->nice == << 6111 else << 6112 pr_cont("%d%s", task_pid_nr(w << 6113 worker->rescue_wq ? " << 6114 } << 6115 << 6116 struct pr_cont_work_struct { << 6117 bool comma; << 6118 work_func_t func; << 6119 long ctr; << 6120 }; << 6121 << 6122 static void pr_cont_work_flush(bool comma, wo << 6123 { << 6124 if (!pcwsp->ctr) << 6125 goto out_record; << 6126 if (func == pcwsp->func) { << 6127 pcwsp->ctr++; << 6128 return; << 6129 } << 6130 if (pcwsp->ctr == 1) << 6131 pr_cont("%s %ps", pcwsp->comm << 6132 else << 6133 pr_cont("%s %ld*%ps", pcwsp-> << 6134 pcwsp->ctr = 0; << 6135 out_record: << 6136 if ((long)func == -1L) << 6137 return; << 6138 pcwsp->comma = comma; << 6139 pcwsp->func = func; << 6140 pcwsp->ctr = 1; << 6141 } << 6142 << 6143 static void pr_cont_work(bool comma, struct w << 6144 { << 6145 if (work->func == wq_barrier_func) { << 6146 struct wq_barrier *barr; << 6147 << 6148 barr = container_of(work, str << 6149 << 6150 pr_cont_work_flush(comma, (wo << 6151 pr_cont("%s BAR(%d)", comma ? << 6152 task_pid_nr(barr->tas << 6153 } else { << 6154 if (!comma) << 6155 pr_cont_work_flush(co << 6156 pr_cont_work_flush(comma, wor << 6157 } << 6158 } << 6159 << 6160 static void show_pwq(struct pool_workqueue *p << 6161 { << 6162 struct pr_cont_work_struct pcws = { . << 6163 struct worker_pool *pool = pwq->pool; << 6164 struct work_struct *work; << 6165 struct worker *worker; << 6166 bool has_in_flight = false, has_pendi << 6167 int bkt; << 6168 << 6169 pr_info(" pwq %d:", pool->id); << 6170 pr_cont_pool_info(pool); << 6171 << 6172 pr_cont(" active=%d refcnt=%d%s\n", << 6173 pwq->nr_active, pwq->refcnt, << 6174 !list_empty(&pwq->mayday_node << 6175 << 6176 hash_for_each(pool->busy_hash, bkt, w << 6177 if (worker->current_pwq == pw << 6178 has_in_flight = true; << 6179 break; << 6180 } << 6181 } << 6182 if (has_in_flight) { << 6183 bool comma = false; << 6184 << 6185 pr_info(" in-flight:"); << 6186 hash_for_each(pool->busy_hash << 6187 if (worker->current_p << 6188 continue; << 6189 << 6190 pr_cont(" %s", comma << 6191 pr_cont_worker_id(wor << 6192 pr_cont(":%ps", worke << 6193 list_for_each_entry(w << 6194 pr_cont_work( << 6195 pr_cont_work_flush(co << 6196 comma = true; << 6197 } << 6198 pr_cont("\n"); << 6199 } << 6200 << 6201 list_for_each_entry(work, &pool->work << 6202 if (get_work_pwq(work) == pwq << 6203 has_pending = true; << 6204 break; << 6205 } << 6206 } << 6207 if (has_pending) { << 6208 bool comma = false; << 6209 << 6210 pr_info(" pending:"); << 6211 list_for_each_entry(work, &po << 6212 if (get_work_pwq(work << 6213 continue; << 6214 << 6215 pr_cont_work(comma, w << 6216 comma = !(*work_data_ << 6217 } << 6218 pr_cont_work_flush(comma, (wo << 6219 pr_cont("\n"); << 6220 } << 6221 << 6222 if (!list_empty(&pwq->inactive_works) << 6223 bool comma = false; << 6224 << 6225 pr_info(" inactive:"); << 6226 list_for_each_entry(work, &pw << 6227 pr_cont_work(comma, w << 6228 comma = !(*work_data_ << 6229 } << 6230 pr_cont_work_flush(comma, (wo << 6231 pr_cont("\n"); << 6232 } << 6233 } << 6234 << 6235 /** << 6236 * show_one_workqueue - dump state of specifi << 6237 * @wq: workqueue whose state will be printed << 6238 */ << 6239 void show_one_workqueue(struct workqueue_stru << 6240 { << 6241 struct pool_workqueue *pwq; << 6242 bool idle = true; << 6243 unsigned long irq_flags; << 6244 << 6245 for_each_pwq(pwq, wq) { << 6246 if (!pwq_is_empty(pwq)) { << 6247 idle = false; << 6248 break; << 6249 } << 6250 } << 6251 if (idle) /* Nothing to print for idl << 6252 return; << 6253 << 6254 pr_info("workqueue %s: flags=0x%x\n", << 6255 << 6256 for_each_pwq(pwq, wq) { << 6257 raw_spin_lock_irqsave(&pwq->p << 6258 if (!pwq_is_empty(pwq)) { << 6259 /* << 6260 * Defer printing to << 6261 * drivers that queue << 6262 * also taken in thei << 6263 */ << 6264 printk_deferred_enter << 6265 show_pwq(pwq); << 6266 printk_deferred_exit( << 6267 } << 6268 raw_spin_unlock_irqrestore(&p << 6269 /* << 6270 * We could be printing a lot << 6271 * sysrq-t -> show_all_workqu << 6272 * hard lockup. << 6273 */ << 6274 touch_nmi_watchdog(); << 6275 } << 6276 << 6277 } << 6278 << 6279 /** << 6280 * show_one_worker_pool - dump state of speci << 6281 * @pool: worker pool whose state will be pri << 6282 */ << 6283 static void show_one_worker_pool(struct worke << 6284 { << 6285 struct worker *worker; << 6286 bool first = true; << 6287 unsigned long irq_flags; << 6288 unsigned long hung = 0; << 6289 << 6290 raw_spin_lock_irqsave(&pool->lock, ir << 6291 if (pool->nr_workers == pool->nr_idle << 6292 goto next_pool; << 6293 << 6294 /* How long the first pending work is << 6295 if (!list_empty(&pool->worklist)) << 6296 hung = jiffies_to_msecs(jiffi << 6297 << 6298 /* << 6299 * Defer printing to avoid deadlocks << 6300 * queue work while holding locks als << 6301 * paths. << 6302 */ << 6303 printk_deferred_enter(); << 6304 pr_info("pool %d:", pool->id); << 6305 pr_cont_pool_info(pool); << 6306 pr_cont(" hung=%lus workers=%d", hung << 6307 if (pool->manager) << 6308 pr_cont(" manager: %d", << 6309 task_pid_nr(pool->man << 6310 list_for_each_entry(worker, &pool->id << 6311 pr_cont(" %s", first ? "idle: << 6312 pr_cont_worker_id(worker); << 6313 first = false; << 6314 } << 6315 pr_cont("\n"); << 6316 printk_deferred_exit(); << 6317 next_pool: << 6318 raw_spin_unlock_irqrestore(&pool->loc << 6319 /* << 6320 * We could be printing a lot from at << 6321 * sysrq-t -> show_all_workqueues(). << 6322 * hard lockup. << 6323 */ << 6324 touch_nmi_watchdog(); << 6325 << 6326 } << 6327 << 6328 /** << 6329 * show_all_workqueues - dump workqueue state << 6330 * << 6331 * Called from a sysrq handler and prints out << 6332 */ << 6333 void show_all_workqueues(void) << 6334 { << 6335 struct workqueue_struct *wq; << 6336 struct worker_pool *pool; << 6337 int pi; << 6338 << 6339 rcu_read_lock(); << 6340 << 6341 pr_info("Showing busy workqueues and << 6342 << 6343 list_for_each_entry_rcu(wq, &workqueu << 6344 show_one_workqueue(wq); << 6345 << 6346 for_each_pool(pool, pi) << 6347 show_one_worker_pool(pool); << 6348 << 6349 rcu_read_unlock(); << 6350 } << 6351 << 6352 /** << 6353 * show_freezable_workqueues - dump freezable << 6354 * << 6355 * Called from try_to_freeze_tasks() and prin << 6356 * still busy. << 6357 */ << 6358 void show_freezable_workqueues(void) << 6359 { << 6360 struct workqueue_struct *wq; << 6361 << 6362 rcu_read_lock(); << 6363 << 6364 pr_info("Showing freezable workqueues << 6365 << 6366 list_for_each_entry_rcu(wq, &workqueu << 6367 if (!(wq->flags & WQ_FREEZABL << 6368 continue; << 6369 show_one_workqueue(wq); << 6370 } << 6371 << 6372 rcu_read_unlock(); << 6373 } << 6374 << 6375 /* used to show worker information through /p << 6376 void wq_worker_comm(char *buf, size_t size, s << 6377 { << 6378 /* stabilize PF_WQ_WORKER and worker << 6379 mutex_lock(&wq_pool_attach_mutex); << 6380 << 6381 if (task->flags & PF_WQ_WORKER) { << 6382 struct worker *worker = kthre << 6383 struct worker_pool *pool = wo << 6384 int off; << 6385 << 6386 off = format_worker_id(buf, s << 6387 << 6388 if (pool) { << 6389 raw_spin_lock_irq(&po << 6390 /* << 6391 * ->desc tracks info << 6392 * set_worker_desc()) << 6393 * current, prepend ' << 6394 */ << 6395 if (worker->desc[0] ! << 6396 if (worker->c << 6397 scnpr << 6398 << 6399 else << 6400 scnpr << 6401 << 6402 } << 6403 raw_spin_unlock_irq(& << 6404 } << 6405 } else { << 6406 strscpy(buf, task->comm, size << 6407 } << 6408 << 6409 mutex_unlock(&wq_pool_attach_mutex); << 6410 } << 6411 << 6412 #ifdef CONFIG_SMP << 6413 << 6414 /* 4599 /* 6415 * CPU hotplug. 4600 * CPU hotplug. 6416 * 4601 * 6417 * There are two challenges in supporting CPU 4602 * There are two challenges in supporting CPU hotplug. Firstly, there 6418 * are a lot of assumptions on strong associa 4603 * are a lot of assumptions on strong associations among work, pwq and 6419 * pool which make migrating pending and sche 4604 * pool which make migrating pending and scheduled works very 6420 * difficult to implement without impacting h 4605 * difficult to implement without impacting hot paths. Secondly, 6421 * worker pools serve mix of short, long and 4606 * worker pools serve mix of short, long and very long running works making 6422 * blocked draining impractical. 4607 * blocked draining impractical. 6423 * 4608 * 6424 * This is solved by allowing the pools to be 4609 * This is solved by allowing the pools to be disassociated from the CPU 6425 * running as an unbound one and allowing it 4610 * running as an unbound one and allowing it to be reattached later if the 6426 * cpu comes back online. 4611 * cpu comes back online. 6427 */ 4612 */ 6428 4613 6429 static void unbind_workers(int cpu) !! 4614 static void wq_unbind_fn(struct work_struct *work) 6430 { 4615 { >> 4616 int cpu = smp_processor_id(); 6431 struct worker_pool *pool; 4617 struct worker_pool *pool; 6432 struct worker *worker; 4618 struct worker *worker; >> 4619 int wi; 6433 4620 6434 for_each_cpu_worker_pool(pool, cpu) { 4621 for_each_cpu_worker_pool(pool, cpu) { 6435 mutex_lock(&wq_pool_attach_mu !! 4622 WARN_ON_ONCE(cpu != smp_processor_id()); 6436 raw_spin_lock_irq(&pool->lock !! 4623 >> 4624 mutex_lock(&pool->manager_mutex); >> 4625 spin_lock_irq(&pool->lock); 6437 4626 6438 /* 4627 /* 6439 * We've blocked all attach/d !! 4628 * We've blocked all manager operations. Make all workers 6440 * unbound and set DISASSOCIA 4629 * unbound and set DISASSOCIATED. Before this, all workers 6441 * must be on the cpu. After !! 4630 * except for the ones which are still executing works from 6442 * And the preemption disable !! 4631 * before the last CPU down must be on the cpu. After 6443 * are guaranteed to see WORK !! 4632 * this, they may become diasporas. 6444 * is on the same cpu. << 6445 */ 4633 */ 6446 for_each_pool_worker(worker, !! 4634 for_each_pool_worker(worker, wi, pool) 6447 worker->flags |= WORK 4635 worker->flags |= WORKER_UNBOUND; 6448 4636 6449 pool->flags |= POOL_DISASSOCI 4637 pool->flags |= POOL_DISASSOCIATED; 6450 4638 >> 4639 spin_unlock_irq(&pool->lock); >> 4640 mutex_unlock(&pool->manager_mutex); >> 4641 6451 /* 4642 /* 6452 * The handling of nr_running !! 4643 * Call schedule() so that we cross rq->lock and thus can 6453 * now. Zap nr_running. Aft !! 4644 * guarantee sched callbacks see the %WORKER_UNBOUND flag. 6454 * need_more_worker() and kee !! 4645 * This is necessary as scheduler callbacks may be invoked 6455 * long as the worklist is no !! 4646 * from other cpus. 6456 * an unbound (in terms of co !! 4647 */ >> 4648 schedule(); >> 4649 >> 4650 /* >> 4651 * Sched callbacks are disabled now. Zap nr_running. >> 4652 * After this, nr_running stays zero and need_more_worker() >> 4653 * and keep_working() are always true as long as the >> 4654 * worklist is not empty. This pool now behaves as an >> 4655 * unbound (in terms of concurrency management) pool which 6457 * are served by workers tied 4656 * are served by workers tied to the pool. 6458 */ 4657 */ 6459 pool->nr_running = 0; !! 4658 atomic_set(&pool->nr_running, 0); 6460 4659 6461 /* 4660 /* 6462 * With concurrency managemen 4661 * With concurrency management just turned off, a busy 6463 * worker blocking could lead 4662 * worker blocking could lead to lengthy stalls. Kick off 6464 * unbound chain execution of 4663 * unbound chain execution of currently pending work items. 6465 */ 4664 */ 6466 kick_pool(pool); !! 4665 spin_lock_irq(&pool->lock); 6467 !! 4666 wake_up_worker(pool); 6468 raw_spin_unlock_irq(&pool->lo !! 4667 spin_unlock_irq(&pool->lock); 6469 << 6470 for_each_pool_worker(worker, << 6471 unbind_worker(worker) << 6472 << 6473 mutex_unlock(&wq_pool_attach_ << 6474 } 4668 } 6475 } 4669 } 6476 4670 6477 /** 4671 /** 6478 * rebind_workers - rebind all workers of a p 4672 * rebind_workers - rebind all workers of a pool to the associated CPU 6479 * @pool: pool of interest 4673 * @pool: pool of interest 6480 * 4674 * 6481 * @pool->cpu is coming online. Rebind all w 4675 * @pool->cpu is coming online. Rebind all workers to the CPU. 6482 */ 4676 */ 6483 static void rebind_workers(struct worker_pool 4677 static void rebind_workers(struct worker_pool *pool) 6484 { 4678 { 6485 struct worker *worker; 4679 struct worker *worker; >> 4680 int wi; 6486 4681 6487 lockdep_assert_held(&wq_pool_attach_m !! 4682 lockdep_assert_held(&pool->manager_mutex); 6488 4683 6489 /* 4684 /* 6490 * Restore CPU affinity of all worker 4685 * Restore CPU affinity of all workers. As all idle workers should 6491 * be on the run-queue of the associa 4686 * be on the run-queue of the associated CPU before any local 6492 * wake-ups for concurrency managemen !! 4687 * wake-ups for concurrency management happen, restore CPU affinty 6493 * of all workers first and then clea 4688 * of all workers first and then clear UNBOUND. As we're called 6494 * from CPU_ONLINE, the following sho 4689 * from CPU_ONLINE, the following shouldn't fail. 6495 */ 4690 */ 6496 for_each_pool_worker(worker, pool) { !! 4691 for_each_pool_worker(worker, wi, pool) 6497 kthread_set_per_cpu(worker->t << 6498 WARN_ON_ONCE(set_cpus_allowed 4692 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 6499 !! 4693 pool->attrs->cpumask) < 0); 6500 } << 6501 4694 6502 raw_spin_lock_irq(&pool->lock); !! 4695 spin_lock_irq(&pool->lock); 6503 4696 6504 pool->flags &= ~POOL_DISASSOCIATED; !! 4697 for_each_pool_worker(worker, wi, pool) { 6505 << 6506 for_each_pool_worker(worker, pool) { << 6507 unsigned int worker_flags = w 4698 unsigned int worker_flags = worker->flags; 6508 4699 6509 /* 4700 /* >> 4701 * A bound idle worker should actually be on the runqueue >> 4702 * of the associated CPU for local wake-ups targeting it to >> 4703 * work. Kick all idle workers so that they migrate to the >> 4704 * associated CPU. Doing this in the same loop as >> 4705 * replacing UNBOUND with REBOUND is safe as no worker will >> 4706 * be bound before @pool->lock is released. >> 4707 */ >> 4708 if (worker_flags & WORKER_IDLE) >> 4709 wake_up_process(worker->task); >> 4710 >> 4711 /* 6510 * We want to clear UNBOUND b 4712 * We want to clear UNBOUND but can't directly call 6511 * worker_clr_flags() or adju 4713 * worker_clr_flags() or adjust nr_running. Atomically 6512 * replace UNBOUND with anoth 4714 * replace UNBOUND with another NOT_RUNNING flag REBOUND. 6513 * @worker will clear REBOUND 4715 * @worker will clear REBOUND using worker_clr_flags() when 6514 * it initiates the next exec 4716 * it initiates the next execution cycle thus restoring 6515 * concurrency management. N 4717 * concurrency management. Note that when or whether 6516 * @worker clears REBOUND doe 4718 * @worker clears REBOUND doesn't affect correctness. 6517 * 4719 * 6518 * WRITE_ONCE() is necessary !! 4720 * ACCESS_ONCE() is necessary because @worker->flags may be 6519 * tested without holding any 4721 * tested without holding any lock in 6520 * wq_worker_running(). With !! 4722 * wq_worker_waking_up(). Without it, NOT_RUNNING test may 6521 * fail incorrectly leading t 4723 * fail incorrectly leading to premature concurrency 6522 * management operations. 4724 * management operations. 6523 */ 4725 */ 6524 WARN_ON_ONCE(!(worker_flags & 4726 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 6525 worker_flags |= WORKER_REBOUN 4727 worker_flags |= WORKER_REBOUND; 6526 worker_flags &= ~WORKER_UNBOU 4728 worker_flags &= ~WORKER_UNBOUND; 6527 WRITE_ONCE(worker->flags, wor !! 4729 ACCESS_ONCE(worker->flags) = worker_flags; 6528 } 4730 } 6529 4731 6530 raw_spin_unlock_irq(&pool->lock); !! 4732 spin_unlock_irq(&pool->lock); 6531 } 4733 } 6532 4734 6533 /** 4735 /** 6534 * restore_unbound_workers_cpumask - restore 4736 * restore_unbound_workers_cpumask - restore cpumask of unbound workers 6535 * @pool: unbound pool of interest 4737 * @pool: unbound pool of interest 6536 * @cpu: the CPU which is coming up 4738 * @cpu: the CPU which is coming up 6537 * 4739 * 6538 * An unbound pool may end up with a cpumask 4740 * An unbound pool may end up with a cpumask which doesn't have any online 6539 * CPUs. When a worker of such pool get sche 4741 * CPUs. When a worker of such pool get scheduled, the scheduler resets 6540 * its cpus_allowed. If @cpu is in @pool's c 4742 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any 6541 * online CPU before, cpus_allowed of all its 4743 * online CPU before, cpus_allowed of all its workers should be restored. 6542 */ 4744 */ 6543 static void restore_unbound_workers_cpumask(s 4745 static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) 6544 { 4746 { 6545 static cpumask_t cpumask; 4747 static cpumask_t cpumask; 6546 struct worker *worker; 4748 struct worker *worker; >> 4749 int wi; 6547 4750 6548 lockdep_assert_held(&wq_pool_attach_m !! 4751 lockdep_assert_held(&pool->manager_mutex); 6549 4752 6550 /* is @cpu allowed for @pool? */ 4753 /* is @cpu allowed for @pool? */ 6551 if (!cpumask_test_cpu(cpu, pool->attr 4754 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 6552 return; 4755 return; 6553 4756 >> 4757 /* is @cpu the only online CPU? */ 6554 cpumask_and(&cpumask, pool->attrs->cp 4758 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); >> 4759 if (cpumask_weight(&cpumask) != 1) >> 4760 return; 6555 4761 6556 /* as we're called from CPU_ONLINE, t 4762 /* as we're called from CPU_ONLINE, the following shouldn't fail */ 6557 for_each_pool_worker(worker, pool) !! 4763 for_each_pool_worker(worker, wi, pool) 6558 WARN_ON_ONCE(set_cpus_allowed !! 4764 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 6559 } !! 4765 pool->attrs->cpumask) < 0); 6560 << 6561 int workqueue_prepare_cpu(unsigned int cpu) << 6562 { << 6563 struct worker_pool *pool; << 6564 << 6565 for_each_cpu_worker_pool(pool, cpu) { << 6566 if (pool->nr_workers) << 6567 continue; << 6568 if (!create_worker(pool)) << 6569 return -ENOMEM; << 6570 } << 6571 return 0; << 6572 } 4766 } 6573 4767 6574 int workqueue_online_cpu(unsigned int cpu) !! 4768 /* >> 4769 * Workqueues should be brought up before normal priority CPU notifiers. >> 4770 * This will be registered high priority CPU notifier. >> 4771 */ >> 4772 static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, >> 4773 unsigned long action, >> 4774 void *hcpu) 6575 { 4775 { >> 4776 int cpu = (unsigned long)hcpu; 6576 struct worker_pool *pool; 4777 struct worker_pool *pool; 6577 struct workqueue_struct *wq; 4778 struct workqueue_struct *wq; 6578 int pi; 4779 int pi; 6579 4780 6580 mutex_lock(&wq_pool_mutex); !! 4781 switch (action & ~CPU_TASKS_FROZEN) { 6581 !! 4782 case CPU_UP_PREPARE: 6582 cpumask_set_cpu(cpu, wq_online_cpumas !! 4783 for_each_cpu_worker_pool(pool, cpu) { >> 4784 if (pool->nr_workers) >> 4785 continue; >> 4786 if (create_and_start_worker(pool) < 0) >> 4787 return NOTIFY_BAD; >> 4788 } >> 4789 break; 6583 4790 6584 for_each_pool(pool, pi) { !! 4791 case CPU_DOWN_FAILED: 6585 /* BH pools aren't affected b !! 4792 case CPU_ONLINE: 6586 if (pool->flags & POOL_BH) !! 4793 mutex_lock(&wq_pool_mutex); 6587 continue; << 6588 4794 6589 mutex_lock(&wq_pool_attach_mu !! 4795 for_each_pool(pool, pi) { 6590 if (pool->cpu == cpu) !! 4796 mutex_lock(&pool->manager_mutex); 6591 rebind_workers(pool); << 6592 else if (pool->cpu < 0) << 6593 restore_unbound_worke << 6594 mutex_unlock(&wq_pool_attach_ << 6595 } << 6596 4797 6597 /* update pod affinity of unbound wor !! 4798 if (pool->cpu == cpu) { 6598 list_for_each_entry(wq, &workqueues, !! 4799 spin_lock_irq(&pool->lock); 6599 struct workqueue_attrs *attrs !! 4800 pool->flags &= ~POOL_DISASSOCIATED; >> 4801 spin_unlock_irq(&pool->lock); >> 4802 >> 4803 rebind_workers(pool); >> 4804 } else if (pool->cpu < 0) { >> 4805 restore_unbound_workers_cpumask(pool, cpu); >> 4806 } 6600 4807 6601 if (attrs) { !! 4808 mutex_unlock(&pool->manager_mutex); 6602 const struct wq_pod_t !! 4809 } 6603 int tcpu; << 6604 4810 6605 for_each_cpu(tcpu, pt !! 4811 /* update NUMA affinity of unbound workqueues */ 6606 unbound_wq_up !! 4812 list_for_each_entry(wq, &workqueues, list) >> 4813 wq_update_unbound_numa(wq, cpu, true); 6607 4814 6608 mutex_lock(&wq->mutex !! 4815 mutex_unlock(&wq_pool_mutex); 6609 wq_update_node_max_ac !! 4816 break; 6610 mutex_unlock(&wq->mut << 6611 } << 6612 } 4817 } 6613 !! 4818 return NOTIFY_OK; 6614 mutex_unlock(&wq_pool_mutex); << 6615 return 0; << 6616 } 4819 } 6617 4820 6618 int workqueue_offline_cpu(unsigned int cpu) !! 4821 /* >> 4822 * Workqueues should be brought down after normal priority CPU notifiers. >> 4823 * This will be registered as low priority CPU notifier. >> 4824 */ >> 4825 static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, >> 4826 unsigned long action, >> 4827 void *hcpu) 6619 { 4828 { >> 4829 int cpu = (unsigned long)hcpu; >> 4830 struct work_struct unbind_work; 6620 struct workqueue_struct *wq; 4831 struct workqueue_struct *wq; 6621 4832 6622 /* unbinding per-cpu workers should h !! 4833 switch (action & ~CPU_TASKS_FROZEN) { 6623 if (WARN_ON(cpu != smp_processor_id() !! 4834 case CPU_DOWN_PREPARE: 6624 return -1; !! 4835 /* unbinding per-cpu workers should happen on the local CPU */ 6625 !! 4836 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 6626 unbind_workers(cpu); !! 4837 queue_work_on(cpu, system_highpri_wq, &unbind_work); 6627 << 6628 /* update pod affinity of unbound wor << 6629 mutex_lock(&wq_pool_mutex); << 6630 << 6631 cpumask_clear_cpu(cpu, wq_online_cpum << 6632 4838 6633 list_for_each_entry(wq, &workqueues, !! 4839 /* update NUMA affinity of unbound workqueues */ 6634 struct workqueue_attrs *attrs !! 4840 mutex_lock(&wq_pool_mutex); 6635 !! 4841 list_for_each_entry(wq, &workqueues, list) 6636 if (attrs) { !! 4842 wq_update_unbound_numa(wq, cpu, false); 6637 const struct wq_pod_t !! 4843 mutex_unlock(&wq_pool_mutex); 6638 int tcpu; << 6639 << 6640 for_each_cpu(tcpu, pt << 6641 unbound_wq_up << 6642 4844 6643 mutex_lock(&wq->mutex !! 4845 /* wait for per-cpu unbinding to finish */ 6644 wq_update_node_max_ac !! 4846 flush_work(&unbind_work); 6645 mutex_unlock(&wq->mut !! 4847 break; 6646 } << 6647 } 4848 } 6648 mutex_unlock(&wq_pool_mutex); !! 4849 return NOTIFY_OK; 6649 << 6650 return 0; << 6651 } 4850 } 6652 4851 >> 4852 #ifdef CONFIG_SMP >> 4853 6653 struct work_for_cpu { 4854 struct work_for_cpu { 6654 struct work_struct work; 4855 struct work_struct work; 6655 long (*fn)(void *); 4856 long (*fn)(void *); 6656 void *arg; 4857 void *arg; 6657 long ret; 4858 long ret; 6658 }; 4859 }; 6659 4860 6660 static void work_for_cpu_fn(struct work_struc 4861 static void work_for_cpu_fn(struct work_struct *work) 6661 { 4862 { 6662 struct work_for_cpu *wfc = container_ 4863 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); 6663 4864 6664 wfc->ret = wfc->fn(wfc->arg); 4865 wfc->ret = wfc->fn(wfc->arg); 6665 } 4866 } 6666 4867 6667 /** 4868 /** 6668 * work_on_cpu_key - run a function in thread !! 4869 * work_on_cpu - run a function in user context on a particular cpu 6669 * @cpu: the cpu to run on 4870 * @cpu: the cpu to run on 6670 * @fn: the function to run 4871 * @fn: the function to run 6671 * @arg: the function arg 4872 * @arg: the function arg 6672 * @key: The lock class key for lock debuggin << 6673 * 4873 * >> 4874 * This will return the value @fn returns. 6674 * It is up to the caller to ensure that the 4875 * It is up to the caller to ensure that the cpu doesn't go offline. 6675 * The caller must not hold any locks which w 4876 * The caller must not hold any locks which would prevent @fn from completing. 6676 * << 6677 * Return: The value @fn returns. << 6678 */ 4877 */ 6679 long work_on_cpu_key(int cpu, long (*fn)(void !! 4878 long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 6680 void *arg, struct lock_c << 6681 { 4879 { 6682 struct work_for_cpu wfc = { .fn = fn, 4880 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 6683 4881 6684 INIT_WORK_ONSTACK_KEY(&wfc.work, work !! 4882 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 6685 schedule_work_on(cpu, &wfc.work); 4883 schedule_work_on(cpu, &wfc.work); 6686 flush_work(&wfc.work); 4884 flush_work(&wfc.work); 6687 destroy_work_on_stack(&wfc.work); << 6688 return wfc.ret; 4885 return wfc.ret; 6689 } 4886 } 6690 EXPORT_SYMBOL_GPL(work_on_cpu_key); !! 4887 EXPORT_SYMBOL_GPL(work_on_cpu); 6691 << 6692 /** << 6693 * work_on_cpu_safe_key - run a function in t << 6694 * @cpu: the cpu to run on << 6695 * @fn: the function to run << 6696 * @arg: the function argument << 6697 * @key: The lock class key for lock debuggin << 6698 * << 6699 * Disables CPU hotplug and calls work_on_cpu << 6700 * any locks which would prevent @fn from com << 6701 * << 6702 * Return: The value @fn returns. << 6703 */ << 6704 long work_on_cpu_safe_key(int cpu, long (*fn) << 6705 void *arg, struct l << 6706 { << 6707 long ret = -ENODEV; << 6708 << 6709 cpus_read_lock(); << 6710 if (cpu_online(cpu)) << 6711 ret = work_on_cpu_key(cpu, fn << 6712 cpus_read_unlock(); << 6713 return ret; << 6714 } << 6715 EXPORT_SYMBOL_GPL(work_on_cpu_safe_key); << 6716 #endif /* CONFIG_SMP */ 4888 #endif /* CONFIG_SMP */ 6717 4889 6718 #ifdef CONFIG_FREEZER 4890 #ifdef CONFIG_FREEZER 6719 4891 6720 /** 4892 /** 6721 * freeze_workqueues_begin - begin freezing w 4893 * freeze_workqueues_begin - begin freezing workqueues 6722 * 4894 * 6723 * Start freezing workqueues. After this fun 4895 * Start freezing workqueues. After this function returns, all freezable 6724 * workqueues will queue new works to their i !! 4896 * workqueues will queue new works to their delayed_works list instead of 6725 * pool->worklist. 4897 * pool->worklist. 6726 * 4898 * 6727 * CONTEXT: 4899 * CONTEXT: 6728 * Grabs and releases wq_pool_mutex, wq->mute 4900 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 6729 */ 4901 */ 6730 void freeze_workqueues_begin(void) 4902 void freeze_workqueues_begin(void) 6731 { 4903 { >> 4904 struct worker_pool *pool; 6732 struct workqueue_struct *wq; 4905 struct workqueue_struct *wq; >> 4906 struct pool_workqueue *pwq; >> 4907 int pi; 6733 4908 6734 mutex_lock(&wq_pool_mutex); 4909 mutex_lock(&wq_pool_mutex); 6735 4910 6736 WARN_ON_ONCE(workqueue_freezing); 4911 WARN_ON_ONCE(workqueue_freezing); 6737 workqueue_freezing = true; 4912 workqueue_freezing = true; 6738 4913 >> 4914 /* set FREEZING */ >> 4915 for_each_pool(pool, pi) { >> 4916 spin_lock_irq(&pool->lock); >> 4917 WARN_ON_ONCE(pool->flags & POOL_FREEZING); >> 4918 pool->flags |= POOL_FREEZING; >> 4919 spin_unlock_irq(&pool->lock); >> 4920 } >> 4921 6739 list_for_each_entry(wq, &workqueues, 4922 list_for_each_entry(wq, &workqueues, list) { 6740 mutex_lock(&wq->mutex); 4923 mutex_lock(&wq->mutex); 6741 wq_adjust_max_active(wq); !! 4924 for_each_pwq(pwq, wq) >> 4925 pwq_adjust_max_active(pwq); 6742 mutex_unlock(&wq->mutex); 4926 mutex_unlock(&wq->mutex); 6743 } 4927 } 6744 4928 6745 mutex_unlock(&wq_pool_mutex); 4929 mutex_unlock(&wq_pool_mutex); 6746 } 4930 } 6747 4931 6748 /** 4932 /** 6749 * freeze_workqueues_busy - are freezable wor 4933 * freeze_workqueues_busy - are freezable workqueues still busy? 6750 * 4934 * 6751 * Check whether freezing is complete. This 4935 * Check whether freezing is complete. This function must be called 6752 * between freeze_workqueues_begin() and thaw 4936 * between freeze_workqueues_begin() and thaw_workqueues(). 6753 * 4937 * 6754 * CONTEXT: 4938 * CONTEXT: 6755 * Grabs and releases wq_pool_mutex. 4939 * Grabs and releases wq_pool_mutex. 6756 * 4940 * 6757 * Return: !! 4941 * RETURNS: 6758 * %true if some freezable workqueues are sti 4942 * %true if some freezable workqueues are still busy. %false if freezing 6759 * is complete. 4943 * is complete. 6760 */ 4944 */ 6761 bool freeze_workqueues_busy(void) 4945 bool freeze_workqueues_busy(void) 6762 { 4946 { 6763 bool busy = false; 4947 bool busy = false; 6764 struct workqueue_struct *wq; 4948 struct workqueue_struct *wq; 6765 struct pool_workqueue *pwq; 4949 struct pool_workqueue *pwq; 6766 4950 6767 mutex_lock(&wq_pool_mutex); 4951 mutex_lock(&wq_pool_mutex); 6768 4952 6769 WARN_ON_ONCE(!workqueue_freezing); 4953 WARN_ON_ONCE(!workqueue_freezing); 6770 4954 6771 list_for_each_entry(wq, &workqueues, 4955 list_for_each_entry(wq, &workqueues, list) { 6772 if (!(wq->flags & WQ_FREEZABL 4956 if (!(wq->flags & WQ_FREEZABLE)) 6773 continue; 4957 continue; 6774 /* 4958 /* 6775 * nr_active is monotonically 4959 * nr_active is monotonically decreasing. It's safe 6776 * to peek without lock. 4960 * to peek without lock. 6777 */ 4961 */ 6778 rcu_read_lock(); !! 4962 rcu_read_lock_sched(); 6779 for_each_pwq(pwq, wq) { 4963 for_each_pwq(pwq, wq) { 6780 WARN_ON_ONCE(pwq->nr_ 4964 WARN_ON_ONCE(pwq->nr_active < 0); 6781 if (pwq->nr_active) { 4965 if (pwq->nr_active) { 6782 busy = true; 4966 busy = true; 6783 rcu_read_unlo !! 4967 rcu_read_unlock_sched(); 6784 goto out_unlo 4968 goto out_unlock; 6785 } 4969 } 6786 } 4970 } 6787 rcu_read_unlock(); !! 4971 rcu_read_unlock_sched(); 6788 } 4972 } 6789 out_unlock: 4973 out_unlock: 6790 mutex_unlock(&wq_pool_mutex); 4974 mutex_unlock(&wq_pool_mutex); 6791 return busy; 4975 return busy; 6792 } 4976 } 6793 4977 6794 /** 4978 /** 6795 * thaw_workqueues - thaw workqueues 4979 * thaw_workqueues - thaw workqueues 6796 * 4980 * 6797 * Thaw workqueues. Normal queueing is resto 4981 * Thaw workqueues. Normal queueing is restored and all collected 6798 * frozen works are transferred to their resp 4982 * frozen works are transferred to their respective pool worklists. 6799 * 4983 * 6800 * CONTEXT: 4984 * CONTEXT: 6801 * Grabs and releases wq_pool_mutex, wq->mute 4985 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 6802 */ 4986 */ 6803 void thaw_workqueues(void) 4987 void thaw_workqueues(void) 6804 { 4988 { 6805 struct workqueue_struct *wq; 4989 struct workqueue_struct *wq; >> 4990 struct pool_workqueue *pwq; >> 4991 struct worker_pool *pool; >> 4992 int pi; 6806 4993 6807 mutex_lock(&wq_pool_mutex); 4994 mutex_lock(&wq_pool_mutex); 6808 4995 6809 if (!workqueue_freezing) 4996 if (!workqueue_freezing) 6810 goto out_unlock; 4997 goto out_unlock; 6811 4998 6812 workqueue_freezing = false; !! 4999 /* clear FREEZING */ >> 5000 for_each_pool(pool, pi) { >> 5001 spin_lock_irq(&pool->lock); >> 5002 WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); >> 5003 pool->flags &= ~POOL_FREEZING; >> 5004 spin_unlock_irq(&pool->lock); >> 5005 } 6813 5006 6814 /* restore max_active and repopulate 5007 /* restore max_active and repopulate worklist */ 6815 list_for_each_entry(wq, &workqueues, 5008 list_for_each_entry(wq, &workqueues, list) { 6816 mutex_lock(&wq->mutex); 5009 mutex_lock(&wq->mutex); 6817 wq_adjust_max_active(wq); !! 5010 for_each_pwq(pwq, wq) >> 5011 pwq_adjust_max_active(pwq); 6818 mutex_unlock(&wq->mutex); 5012 mutex_unlock(&wq->mutex); 6819 } 5013 } 6820 5014 >> 5015 workqueue_freezing = false; 6821 out_unlock: 5016 out_unlock: 6822 mutex_unlock(&wq_pool_mutex); 5017 mutex_unlock(&wq_pool_mutex); 6823 } 5018 } 6824 #endif /* CONFIG_FREEZER */ 5019 #endif /* CONFIG_FREEZER */ 6825 5020 6826 static int workqueue_apply_unbound_cpumask(co !! 5021 static void __init wq_numa_init(void) 6827 { 5022 { 6828 LIST_HEAD(ctxs); !! 5023 cpumask_var_t *tbl; 6829 int ret = 0; !! 5024 int node, cpu; 6830 struct workqueue_struct *wq; << 6831 struct apply_wqattrs_ctx *ctx, *n; << 6832 << 6833 lockdep_assert_held(&wq_pool_mutex); << 6834 << 6835 list_for_each_entry(wq, &workqueues, << 6836 if (!(wq->flags & WQ_UNBOUND) << 6837 continue; << 6838 5025 6839 ctx = apply_wqattrs_prepare(w !! 5026 /* determine NUMA pwq table len - highest node id + 1 */ 6840 if (IS_ERR(ctx)) { !! 5027 for_each_node(node) 6841 ret = PTR_ERR(ctx); !! 5028 wq_numa_tbl_len = max(wq_numa_tbl_len, node + 1); 6842 break; << 6843 } << 6844 5029 6845 list_add_tail(&ctx->list, &ct !! 5030 if (num_possible_nodes() <= 1) 6846 } !! 5031 return; 6847 << 6848 list_for_each_entry_safe(ctx, n, &ctx << 6849 if (!ret) << 6850 apply_wqattrs_commit( << 6851 apply_wqattrs_cleanup(ctx); << 6852 } << 6853 5032 6854 if (!ret) { !! 5033 if (wq_disable_numa) { 6855 mutex_lock(&wq_pool_attach_mu !! 5034 pr_info("workqueue: NUMA affinity support disabled\n"); 6856 cpumask_copy(wq_unbound_cpuma !! 5035 return; 6857 mutex_unlock(&wq_pool_attach_ << 6858 } 5036 } 6859 return ret; << 6860 } << 6861 5037 6862 /** !! 5038 wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL); 6863 * workqueue_unbound_exclude_cpumask - Exclud !! 5039 BUG_ON(!wq_update_unbound_numa_attrs_buf); 6864 * @exclude_cpumask: the cpumask to be exclud << 6865 * << 6866 * This function can be called from cpuset co << 6867 * CPUs that should be excluded from wq_unbou << 6868 */ << 6869 int workqueue_unbound_exclude_cpumask(cpumask << 6870 { << 6871 cpumask_var_t cpumask; << 6872 int ret = 0; << 6873 << 6874 if (!zalloc_cpumask_var(&cpumask, GFP << 6875 return -ENOMEM; << 6876 << 6877 mutex_lock(&wq_pool_mutex); << 6878 5040 6879 /* 5041 /* 6880 * If the operation fails, it will fa !! 5042 * We want masks of possible CPUs of each node which isn't readily 6881 * wq_requested_unbound_cpumask which !! 5043 * available. Build one from cpu_to_node() which should have been 6882 * (HK_TYPE_WQ ∩ HK_TYPE_DOMAIN) ho !! 5044 * fully initialized by now. 6883 * by any subsequent write to workque << 6884 */ << 6885 if (!cpumask_andnot(cpumask, wq_reque << 6886 cpumask_copy(cpumask, wq_requ << 6887 if (!cpumask_equal(cpumask, wq_unboun << 6888 ret = workqueue_apply_unbound << 6889 << 6890 /* Save the current isolated cpumask << 6891 if (!ret) << 6892 cpumask_copy(wq_isolated_cpum << 6893 << 6894 mutex_unlock(&wq_pool_mutex); << 6895 free_cpumask_var(cpumask); << 6896 return ret; << 6897 } << 6898 << 6899 static int parse_affn_scope(const char *val) << 6900 { << 6901 int i; << 6902 << 6903 for (i = 0; i < ARRAY_SIZE(wq_affn_na << 6904 if (!strncasecmp(val, wq_affn << 6905 return i; << 6906 } << 6907 return -EINVAL; << 6908 } << 6909 << 6910 static int wq_affn_dfl_set(const char *val, c << 6911 { << 6912 struct workqueue_struct *wq; << 6913 int affn, cpu; << 6914 << 6915 affn = parse_affn_scope(val); << 6916 if (affn < 0) << 6917 return affn; << 6918 if (affn == WQ_AFFN_DFL) << 6919 return -EINVAL; << 6920 << 6921 cpus_read_lock(); << 6922 mutex_lock(&wq_pool_mutex); << 6923 << 6924 wq_affn_dfl = affn; << 6925 << 6926 list_for_each_entry(wq, &workqueues, << 6927 for_each_online_cpu(cpu) << 6928 unbound_wq_update_pwq << 6929 } << 6930 << 6931 mutex_unlock(&wq_pool_mutex); << 6932 cpus_read_unlock(); << 6933 << 6934 return 0; << 6935 } << 6936 << 6937 static int wq_affn_dfl_get(char *buffer, cons << 6938 { << 6939 return scnprintf(buffer, PAGE_SIZE, " << 6940 } << 6941 << 6942 static const struct kernel_param_ops wq_affn_ << 6943 .set = wq_affn_dfl_set, << 6944 .get = wq_affn_dfl_get, << 6945 }; << 6946 << 6947 module_param_cb(default_affinity_scope, &wq_a << 6948 << 6949 #ifdef CONFIG_SYSFS << 6950 /* << 6951 * Workqueues with WQ_SYSFS flag set is visib << 6952 * /sys/bus/workqueue/devices/WQ_NAME. All v << 6953 * following attributes. << 6954 * << 6955 * per_cpu RO bool : whether the << 6956 * max_active RW int : maximum num << 6957 * << 6958 * Unbound workqueues have the following extr << 6959 * << 6960 * nice RW int : nice value << 6961 * cpumask RW mask : bitmask of << 6962 * affinity_scope RW str : worker CPU << 6963 * affinity_strict RW bool : worker CPU << 6964 */ << 6965 struct wq_device { << 6966 struct workqueue_struct *wq; << 6967 struct device dev; << 6968 }; << 6969 << 6970 static struct workqueue_struct *dev_to_wq(str << 6971 { << 6972 struct wq_device *wq_dev = container_ << 6973 << 6974 return wq_dev->wq; << 6975 } << 6976 << 6977 static ssize_t per_cpu_show(struct device *de << 6978 char *buf) << 6979 { << 6980 struct workqueue_struct *wq = dev_to_ << 6981 << 6982 return scnprintf(buf, PAGE_SIZE, "%d\ << 6983 } << 6984 static DEVICE_ATTR_RO(per_cpu); << 6985 << 6986 static ssize_t max_active_show(struct device << 6987 struct device_ << 6988 { << 6989 struct workqueue_struct *wq = dev_to_ << 6990 << 6991 return scnprintf(buf, PAGE_SIZE, "%d\ << 6992 } << 6993 << 6994 static ssize_t max_active_store(struct device << 6995 struct device << 6996 size_t count) << 6997 { << 6998 struct workqueue_struct *wq = dev_to_ << 6999 int val; << 7000 << 7001 if (sscanf(buf, "%d", &val) != 1 || v << 7002 return -EINVAL; << 7003 << 7004 workqueue_set_max_active(wq, val); << 7005 return count; << 7006 } << 7007 static DEVICE_ATTR_RW(max_active); << 7008 << 7009 static struct attribute *wq_sysfs_attrs[] = { << 7010 &dev_attr_per_cpu.attr, << 7011 &dev_attr_max_active.attr, << 7012 NULL, << 7013 }; << 7014 ATTRIBUTE_GROUPS(wq_sysfs); << 7015 << 7016 static ssize_t wq_nice_show(struct device *de << 7017 char *buf) << 7018 { << 7019 struct workqueue_struct *wq = dev_to_ << 7020 int written; << 7021 << 7022 mutex_lock(&wq->mutex); << 7023 written = scnprintf(buf, PAGE_SIZE, " << 7024 mutex_unlock(&wq->mutex); << 7025 << 7026 return written; << 7027 } << 7028 << 7029 /* prepare workqueue_attrs for sysfs store op << 7030 static struct workqueue_attrs *wq_sysfs_prep_ << 7031 { << 7032 struct workqueue_attrs *attrs; << 7033 << 7034 lockdep_assert_held(&wq_pool_mutex); << 7035 << 7036 attrs = alloc_workqueue_attrs(); << 7037 if (!attrs) << 7038 return NULL; << 7039 << 7040 copy_workqueue_attrs(attrs, wq->unbou << 7041 return attrs; << 7042 } << 7043 << 7044 static ssize_t wq_nice_store(struct device *d << 7045 const char *buf, << 7046 { << 7047 struct workqueue_struct *wq = dev_to_ << 7048 struct workqueue_attrs *attrs; << 7049 int ret = -ENOMEM; << 7050 << 7051 apply_wqattrs_lock(); << 7052 << 7053 attrs = wq_sysfs_prep_attrs(wq); << 7054 if (!attrs) << 7055 goto out_unlock; << 7056 << 7057 if (sscanf(buf, "%d", &attrs->nice) = << 7058 attrs->nice >= MIN_NICE && attrs- << 7059 ret = apply_workqueue_attrs_l << 7060 else << 7061 ret = -EINVAL; << 7062 << 7063 out_unlock: << 7064 apply_wqattrs_unlock(); << 7065 free_workqueue_attrs(attrs); << 7066 return ret ?: count; << 7067 } << 7068 << 7069 static ssize_t wq_cpumask_show(struct device << 7070 struct device_ << 7071 { << 7072 struct workqueue_struct *wq = dev_to_ << 7073 int written; << 7074 << 7075 mutex_lock(&wq->mutex); << 7076 written = scnprintf(buf, PAGE_SIZE, " << 7077 cpumask_pr_args(w << 7078 mutex_unlock(&wq->mutex); << 7079 return written; << 7080 } << 7081 << 7082 static ssize_t wq_cpumask_store(struct device << 7083 struct device << 7084 const char *b << 7085 { << 7086 struct workqueue_struct *wq = dev_to_ << 7087 struct workqueue_attrs *attrs; << 7088 int ret = -ENOMEM; << 7089 << 7090 apply_wqattrs_lock(); << 7091 << 7092 attrs = wq_sysfs_prep_attrs(wq); << 7093 if (!attrs) << 7094 goto out_unlock; << 7095 << 7096 ret = cpumask_parse(buf, attrs->cpuma << 7097 if (!ret) << 7098 ret = apply_workqueue_attrs_l << 7099 << 7100 out_unlock: << 7101 apply_wqattrs_unlock(); << 7102 free_workqueue_attrs(attrs); << 7103 return ret ?: count; << 7104 } << 7105 << 7106 static ssize_t wq_affn_scope_show(struct devi << 7107 struct devi << 7108 { << 7109 struct workqueue_struct *wq = dev_to_ << 7110 int written; << 7111 << 7112 mutex_lock(&wq->mutex); << 7113 if (wq->unbound_attrs->affn_scope == << 7114 written = scnprintf(buf, PAGE << 7115 wq_affn_n << 7116 wq_affn_n << 7117 else << 7118 written = scnprintf(buf, PAGE << 7119 wq_affn_n << 7120 mutex_unlock(&wq->mutex); << 7121 << 7122 return written; << 7123 } << 7124 << 7125 static ssize_t wq_affn_scope_store(struct dev << 7126 struct dev << 7127 const char << 7128 { << 7129 struct workqueue_struct *wq = dev_to_ << 7130 struct workqueue_attrs *attrs; << 7131 int affn, ret = -ENOMEM; << 7132 << 7133 affn = parse_affn_scope(buf); << 7134 if (affn < 0) << 7135 return affn; << 7136 << 7137 apply_wqattrs_lock(); << 7138 attrs = wq_sysfs_prep_attrs(wq); << 7139 if (attrs) { << 7140 attrs->affn_scope = affn; << 7141 ret = apply_workqueue_attrs_l << 7142 } << 7143 apply_wqattrs_unlock(); << 7144 free_workqueue_attrs(attrs); << 7145 return ret ?: count; << 7146 } << 7147 << 7148 static ssize_t wq_affinity_strict_show(struct << 7149 struct << 7150 { << 7151 struct workqueue_struct *wq = dev_to_ << 7152 << 7153 return scnprintf(buf, PAGE_SIZE, "%d\ << 7154 wq->unbound_attrs->a << 7155 } << 7156 << 7157 static ssize_t wq_affinity_strict_store(struc << 7158 struc << 7159 const << 7160 { << 7161 struct workqueue_struct *wq = dev_to_ << 7162 struct workqueue_attrs *attrs; << 7163 int v, ret = -ENOMEM; << 7164 << 7165 if (sscanf(buf, "%d", &v) != 1) << 7166 return -EINVAL; << 7167 << 7168 apply_wqattrs_lock(); << 7169 attrs = wq_sysfs_prep_attrs(wq); << 7170 if (attrs) { << 7171 attrs->affn_strict = (bool)v; << 7172 ret = apply_workqueue_attrs_l << 7173 } << 7174 apply_wqattrs_unlock(); << 7175 free_workqueue_attrs(attrs); << 7176 return ret ?: count; << 7177 } << 7178 << 7179 static struct device_attribute wq_sysfs_unbou << 7180 __ATTR(nice, 0644, wq_nice_show, wq_n << 7181 __ATTR(cpumask, 0644, wq_cpumask_show << 7182 __ATTR(affinity_scope, 0644, wq_affn_ << 7183 __ATTR(affinity_strict, 0644, wq_affi << 7184 __ATTR_NULL, << 7185 }; << 7186 << 7187 static const struct bus_type wq_subsys = { << 7188 .name = "wo << 7189 .dev_groups = wq_ << 7190 }; << 7191 << 7192 /** << 7193 * workqueue_set_unbound_cpumask - Set the l << 7194 * @cpumask: the cpumask to set << 7195 * << 7196 * The low-level workqueues cpumask is a glo << 7197 * the affinity of all unbound workqueues. << 7198 * and apply it to all unbound workqueues an << 7199 * << 7200 * Return: 0 - Success << 7201 * -EINVAL - Invalid @cpumask << 7202 * -ENOMEM - Failed to allocate << 7203 */ << 7204 static int workqueue_set_unbound_cpumask(cpum << 7205 { << 7206 int ret = -EINVAL; << 7207 << 7208 /* << 7209 * Not excluding isolated cpus on pur << 7210 * If the user wishes to include them << 7211 */ << 7212 cpumask_and(cpumask, cpumask, cpu_pos << 7213 if (!cpumask_empty(cpumask)) { << 7214 ret = 0; << 7215 apply_wqattrs_lock(); << 7216 if (!cpumask_equal(cpumask, w << 7217 ret = workqueue_apply << 7218 if (!ret) << 7219 cpumask_copy(wq_reque << 7220 apply_wqattrs_unlock(); << 7221 } << 7222 << 7223 return ret; << 7224 } << 7225 << 7226 static ssize_t __wq_cpumask_show(struct devic << 7227 struct device_attribute *attr << 7228 { << 7229 int written; << 7230 << 7231 mutex_lock(&wq_pool_mutex); << 7232 written = scnprintf(buf, PAGE_SIZE, " << 7233 mutex_unlock(&wq_pool_mutex); << 7234 << 7235 return written; << 7236 } << 7237 << 7238 static ssize_t cpumask_requested_show(struct << 7239 struct device_attribute *attr << 7240 { << 7241 return __wq_cpumask_show(dev, attr, b << 7242 } << 7243 static DEVICE_ATTR_RO(cpumask_requested); << 7244 << 7245 static ssize_t cpumask_isolated_show(struct d << 7246 struct device_attribute *attr << 7247 { << 7248 return __wq_cpumask_show(dev, attr, b << 7249 } << 7250 static DEVICE_ATTR_RO(cpumask_isolated); << 7251 << 7252 static ssize_t cpumask_show(struct device *de << 7253 struct device_attribute *attr << 7254 { << 7255 return __wq_cpumask_show(dev, attr, b << 7256 } << 7257 << 7258 static ssize_t cpumask_store(struct device *d << 7259 struct device_attribute *attr << 7260 { << 7261 cpumask_var_t cpumask; << 7262 int ret; << 7263 << 7264 if (!zalloc_cpumask_var(&cpumask, GFP << 7265 return -ENOMEM; << 7266 << 7267 ret = cpumask_parse(buf, cpumask); << 7268 if (!ret) << 7269 ret = workqueue_set_unbound_c << 7270 << 7271 free_cpumask_var(cpumask); << 7272 return ret ? ret : count; << 7273 } << 7274 static DEVICE_ATTR_RW(cpumask); << 7275 << 7276 static struct attribute *wq_sysfs_cpumask_att << 7277 &dev_attr_cpumask.attr, << 7278 &dev_attr_cpumask_requested.attr, << 7279 &dev_attr_cpumask_isolated.attr, << 7280 NULL, << 7281 }; << 7282 ATTRIBUTE_GROUPS(wq_sysfs_cpumask); << 7283 << 7284 static int __init wq_sysfs_init(void) << 7285 { << 7286 return subsys_virtual_register(&wq_su << 7287 } << 7288 core_initcall(wq_sysfs_init); << 7289 << 7290 static void wq_device_release(struct device * << 7291 { << 7292 struct wq_device *wq_dev = container_ << 7293 << 7294 kfree(wq_dev); << 7295 } << 7296 << 7297 /** << 7298 * workqueue_sysfs_register - make a workqueu << 7299 * @wq: the workqueue to register << 7300 * << 7301 * Expose @wq in sysfs under /sys/bus/workque << 7302 * alloc_workqueue*() automatically calls thi << 7303 * which is the preferred method. << 7304 * << 7305 * Workqueue user should use this function di << 7306 * workqueue_attrs before making the workqueu << 7307 * apply_workqueue_attrs() may race against u << 7308 * attributes. << 7309 * << 7310 * Return: 0 on success, -errno on failure. << 7311 */ << 7312 int workqueue_sysfs_register(struct workqueue << 7313 { << 7314 struct wq_device *wq_dev; << 7315 int ret; << 7316 << 7317 /* << 7318 * Adjusting max_active breaks orderi << 7319 * ordered workqueues. << 7320 */ << 7321 if (WARN_ON(wq->flags & __WQ_ORDERED) << 7322 return -EINVAL; << 7323 << 7324 wq->wq_dev = wq_dev = kzalloc(sizeof( << 7325 if (!wq_dev) << 7326 return -ENOMEM; << 7327 << 7328 wq_dev->wq = wq; << 7329 wq_dev->dev.bus = &wq_subsys; << 7330 wq_dev->dev.release = wq_device_relea << 7331 dev_set_name(&wq_dev->dev, "%s", wq-> << 7332 << 7333 /* << 7334 * unbound_attrs are created separate << 7335 * everything is ready. << 7336 */ 5045 */ 7337 dev_set_uevent_suppress(&wq_dev->dev, !! 5046 tbl = kzalloc(wq_numa_tbl_len * sizeof(tbl[0]), GFP_KERNEL); 7338 !! 5047 BUG_ON(!tbl); 7339 ret = device_register(&wq_dev->dev); << 7340 if (ret) { << 7341 put_device(&wq_dev->dev); << 7342 wq->wq_dev = NULL; << 7343 return ret; << 7344 } << 7345 << 7346 if (wq->flags & WQ_UNBOUND) { << 7347 struct device_attribute *attr << 7348 << 7349 for (attr = wq_sysfs_unbound_ << 7350 ret = device_create_f << 7351 if (ret) { << 7352 device_unregi << 7353 wq->wq_dev = << 7354 return ret; << 7355 } << 7356 } << 7357 } << 7358 5048 7359 dev_set_uevent_suppress(&wq_dev->dev, !! 5049 for_each_node(node) 7360 kobject_uevent(&wq_dev->dev.kobj, KOB !! 5050 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL, 7361 return 0; !! 5051 node_online(node) ? node : NUMA_NO_NODE)); 7362 } << 7363 5052 7364 /** !! 5053 for_each_possible_cpu(cpu) { 7365 * workqueue_sysfs_unregister - undo workqueu !! 5054 node = cpu_to_node(cpu); 7366 * @wq: the workqueue to unregister !! 5055 if (WARN_ON(node == NUMA_NO_NODE)) { 7367 * !! 5056 pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu); 7368 * If @wq is registered to sysfs by workqueue !! 5057 /* happens iff arch is bonkers, let's just proceed */ 7369 */ !! 5058 return; 7370 static void workqueue_sysfs_unregister(struct << 7371 { << 7372 struct wq_device *wq_dev = wq->wq_dev << 7373 << 7374 if (!wq->wq_dev) << 7375 return; << 7376 << 7377 wq->wq_dev = NULL; << 7378 device_unregister(&wq_dev->dev); << 7379 } << 7380 #else /* CONFIG_SYSFS */ << 7381 static void workqueue_sysfs_unregister(struct << 7382 #endif /* CONFIG_SYSFS */ << 7383 << 7384 /* << 7385 * Workqueue watchdog. << 7386 * << 7387 * Stall may be caused by various bugs - miss << 7388 * flush dependency, a concurrency managed wo << 7389 * indefinitely. Workqueue stalls can be ver << 7390 * usual warning mechanisms don't trigger and << 7391 * largely opaque. << 7392 * << 7393 * Workqueue watchdog monitors all worker poo << 7394 * state if some pools failed to make forward << 7395 * forward progress is defined as the first i << 7396 * << 7397 * This mechanism is controlled through the k << 7398 * "workqueue.watchdog_thresh" which can be u << 7399 * corresponding sysfs parameter file. << 7400 */ << 7401 #ifdef CONFIG_WQ_WATCHDOG << 7402 << 7403 static unsigned long wq_watchdog_thresh = 30; << 7404 static struct timer_list wq_watchdog_timer; << 7405 << 7406 static unsigned long wq_watchdog_touched = IN << 7407 static DEFINE_PER_CPU(unsigned long, wq_watch << 7408 << 7409 /* << 7410 * Show workers that might prevent the proces << 7411 * The only candidates are CPU-bound workers << 7412 * Pending work items should be handled by an << 7413 * in all other situations. << 7414 */ << 7415 static void show_cpu_pool_hog(struct worker_p << 7416 { << 7417 struct worker *worker; << 7418 unsigned long irq_flags; << 7419 int bkt; << 7420 << 7421 raw_spin_lock_irqsave(&pool->lock, ir << 7422 << 7423 hash_for_each(pool->busy_hash, bkt, w << 7424 if (task_is_running(worker->t << 7425 /* << 7426 * Defer printing to << 7427 * drivers that queue << 7428 * also taken in thei << 7429 */ << 7430 printk_deferred_enter << 7431 << 7432 pr_info("pool %d:\n", << 7433 sched_show_task(worke << 7434 << 7435 printk_deferred_exit( << 7436 } << 7437 } << 7438 << 7439 raw_spin_unlock_irqrestore(&pool->loc << 7440 } << 7441 << 7442 static void show_cpu_pools_hogs(void) << 7443 { << 7444 struct worker_pool *pool; << 7445 int pi; << 7446 << 7447 pr_info("Showing backtraces of runnin << 7448 << 7449 rcu_read_lock(); << 7450 << 7451 for_each_pool(pool, pi) { << 7452 if (pool->cpu_stall) << 7453 show_cpu_pool_hog(poo << 7454 << 7455 } << 7456 << 7457 rcu_read_unlock(); << 7458 } << 7459 << 7460 static void wq_watchdog_reset_touched(void) << 7461 { << 7462 int cpu; << 7463 << 7464 wq_watchdog_touched = jiffies; << 7465 for_each_possible_cpu(cpu) << 7466 per_cpu(wq_watchdog_touched_c << 7467 } << 7468 << 7469 static void wq_watchdog_timer_fn(struct timer << 7470 { << 7471 unsigned long thresh = READ_ONCE(wq_w << 7472 bool lockup_detected = false; << 7473 bool cpu_pool_stall = false; << 7474 unsigned long now = jiffies; << 7475 struct worker_pool *pool; << 7476 int pi; << 7477 << 7478 if (!thresh) << 7479 return; << 7480 << 7481 rcu_read_lock(); << 7482 << 7483 for_each_pool(pool, pi) { << 7484 unsigned long pool_ts, touche << 7485 << 7486 pool->cpu_stall = false; << 7487 if (list_empty(&pool->worklis << 7488 continue; << 7489 << 7490 /* << 7491 * If a virtual machine is st << 7492 * the watchdog like a stall. << 7493 */ << 7494 kvm_check_and_clear_guest_pau << 7495 << 7496 /* get the latest of pool and << 7497 if (pool->cpu >= 0) << 7498 touched = READ_ONCE(p << 7499 else << 7500 touched = READ_ONCE(w << 7501 pool_ts = READ_ONCE(pool->wat << 7502 << 7503 if (time_after(pool_ts, touch << 7504 ts = pool_ts; << 7505 else << 7506 ts = touched; << 7507 << 7508 /* did we stall? */ << 7509 if (time_after(now, ts + thre << 7510 lockup_detected = tru << 7511 if (pool->cpu >= 0 && << 7512 pool->cpu_sta << 7513 cpu_pool_stal << 7514 } << 7515 pr_emerg("BUG: workqu << 7516 pr_cont_pool_info(poo << 7517 pr_cont(" stuck for % << 7518 jiffies_to_ms << 7519 } 5059 } 7520 !! 5060 cpumask_set_cpu(cpu, tbl[node]); 7521 << 7522 } 5061 } 7523 5062 7524 rcu_read_unlock(); !! 5063 wq_numa_possible_cpumask = tbl; 7525 !! 5064 wq_numa_enabled = true; 7526 if (lockup_detected) << 7527 show_all_workqueues(); << 7528 << 7529 if (cpu_pool_stall) << 7530 show_cpu_pools_hogs(); << 7531 << 7532 wq_watchdog_reset_touched(); << 7533 mod_timer(&wq_watchdog_timer, jiffies << 7534 } 5065 } 7535 5066 7536 notrace void wq_watchdog_touch(int cpu) !! 5067 static int __init init_workqueues(void) 7537 { 5068 { 7538 unsigned long thresh = READ_ONCE(wq_w << 7539 unsigned long touch_ts = READ_ONCE(wq << 7540 unsigned long now = jiffies; << 7541 << 7542 if (cpu >= 0) << 7543 per_cpu(wq_watchdog_touched_c << 7544 else << 7545 WARN_ONCE(1, "%s should be ca << 7546 << 7547 /* Don't unnecessarily store to globa << 7548 if (time_after(now, touch_ts + thresh << 7549 WRITE_ONCE(wq_watchdog_touche << 7550 } << 7551 << 7552 static void wq_watchdog_set_thresh(unsigned l << 7553 { << 7554 wq_watchdog_thresh = 0; << 7555 del_timer_sync(&wq_watchdog_timer); << 7556 << 7557 if (thresh) { << 7558 wq_watchdog_thresh = thresh; << 7559 wq_watchdog_reset_touched(); << 7560 mod_timer(&wq_watchdog_timer, << 7561 } << 7562 } << 7563 << 7564 static int wq_watchdog_param_set_thresh(const << 7565 const << 7566 { << 7567 unsigned long thresh; << 7568 int ret; << 7569 << 7570 ret = kstrtoul(val, 0, &thresh); << 7571 if (ret) << 7572 return ret; << 7573 << 7574 if (system_wq) << 7575 wq_watchdog_set_thresh(thresh << 7576 else << 7577 wq_watchdog_thresh = thresh; << 7578 << 7579 return 0; << 7580 } << 7581 << 7582 static const struct kernel_param_ops wq_watch << 7583 .set = wq_watchdog_param_set_thres << 7584 .get = param_get_ulong, << 7585 }; << 7586 << 7587 module_param_cb(watchdog_thresh, &wq_watchdog << 7588 0644); << 7589 << 7590 static void wq_watchdog_init(void) << 7591 { << 7592 timer_setup(&wq_watchdog_timer, wq_wa << 7593 wq_watchdog_set_thresh(wq_watchdog_th << 7594 } << 7595 << 7596 #else /* CONFIG_WQ_WATCHDOG */ << 7597 << 7598 static inline void wq_watchdog_init(void) { } << 7599 << 7600 #endif /* CONFIG_WQ_WATCHDOG */ << 7601 << 7602 static void bh_pool_kick_normal(struct irq_wo << 7603 { << 7604 raise_softirq_irqoff(TASKLET_SOFTIRQ) << 7605 } << 7606 << 7607 static void bh_pool_kick_highpri(struct irq_w << 7608 { << 7609 raise_softirq_irqoff(HI_SOFTIRQ); << 7610 } << 7611 << 7612 static void __init restrict_unbound_cpumask(c << 7613 { << 7614 if (!cpumask_intersects(wq_unbound_cp << 7615 pr_warn("workqueue: Restricti << 7616 cpumask_pr_args(wq_un << 7617 return; << 7618 } << 7619 << 7620 cpumask_and(wq_unbound_cpumask, wq_un << 7621 } << 7622 << 7623 static void __init init_cpu_worker_pool(struc << 7624 { << 7625 BUG_ON(init_worker_pool(pool)); << 7626 pool->cpu = cpu; << 7627 cpumask_copy(pool->attrs->cpumask, cp << 7628 cpumask_copy(pool->attrs->__pod_cpuma << 7629 pool->attrs->nice = nice; << 7630 pool->attrs->affn_strict = true; << 7631 pool->node = cpu_to_node(cpu); << 7632 << 7633 /* alloc pool ID */ << 7634 mutex_lock(&wq_pool_mutex); << 7635 BUG_ON(worker_pool_assign_id(pool)); << 7636 mutex_unlock(&wq_pool_mutex); << 7637 } << 7638 << 7639 /** << 7640 * workqueue_init_early - early init for work << 7641 * << 7642 * This is the first step of three-staged wor << 7643 * invoked as soon as the bare basics - memor << 7644 * up. It sets up all the data structures and << 7645 * boot code to create workqueues and queue/c << 7646 * execution starts only after kthreads can b << 7647 * before early initcalls. << 7648 */ << 7649 void __init workqueue_init_early(void) << 7650 { << 7651 struct wq_pod_type *pt = &wq_pod_type << 7652 int std_nice[NR_STD_WORKER_POOLS] = { 5069 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 7653 void (*irq_work_fns[2])(struct irq_wo << 7654 << 7655 int i, cpu; 5070 int i, cpu; 7656 5071 7657 BUILD_BUG_ON(__alignof__(struct pool_ !! 5072 /* make sure we have enough bits for OFFQ pool ID */ >> 5073 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < >> 5074 WORK_CPU_END * NR_STD_WORKER_POOLS); 7658 5075 7659 BUG_ON(!alloc_cpumask_var(&wq_online_ !! 5076 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 7660 BUG_ON(!alloc_cpumask_var(&wq_unbound << 7661 BUG_ON(!alloc_cpumask_var(&wq_request << 7662 BUG_ON(!zalloc_cpumask_var(&wq_isolat << 7663 << 7664 cpumask_copy(wq_online_cpumask, cpu_o << 7665 cpumask_copy(wq_unbound_cpumask, cpu_ << 7666 restrict_unbound_cpumask("HK_TYPE_WQ" << 7667 restrict_unbound_cpumask("HK_TYPE_DOM << 7668 if (!cpumask_empty(&wq_cmdline_cpumas << 7669 restrict_unbound_cpumask("wor << 7670 << 7671 cpumask_copy(wq_requested_unbound_cpu << 7672 5077 7673 pwq_cache = KMEM_CACHE(pool_workqueue 5078 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 7674 5079 7675 unbound_wq_update_pwq_attrs_buf = all !! 5080 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); 7676 BUG_ON(!unbound_wq_update_pwq_attrs_b !! 5081 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); 7677 5082 7678 /* !! 5083 wq_numa_init(); 7679 * If nohz_full is enabled, set power << 7680 * This allows workqueue items to be << 7681 */ << 7682 if (housekeeping_enabled(HK_TYPE_TICK << 7683 wq_power_efficient = true; << 7684 5084 7685 /* initialize WQ_AFFN_SYSTEM pods */ !! 5085 /* initialize CPU pools */ 7686 pt->pod_cpus = kcalloc(1, sizeof(pt-> << 7687 pt->pod_node = kcalloc(1, sizeof(pt-> << 7688 pt->cpu_pod = kcalloc(nr_cpu_ids, siz << 7689 BUG_ON(!pt->pod_cpus || !pt->pod_node << 7690 << 7691 BUG_ON(!zalloc_cpumask_var_node(&pt-> << 7692 << 7693 pt->nr_pods = 1; << 7694 cpumask_copy(pt->pod_cpus[0], cpu_pos << 7695 pt->pod_node[0] = NUMA_NO_NODE; << 7696 pt->cpu_pod[0] = 0; << 7697 << 7698 /* initialize BH and CPU pools */ << 7699 for_each_possible_cpu(cpu) { 5086 for_each_possible_cpu(cpu) { 7700 struct worker_pool *pool; 5087 struct worker_pool *pool; 7701 5088 7702 i = 0; 5089 i = 0; 7703 for_each_bh_worker_pool(pool, !! 5090 for_each_cpu_worker_pool(pool, cpu) { 7704 init_cpu_worker_pool( !! 5091 BUG_ON(init_worker_pool(pool)); 7705 pool->flags |= POOL_B !! 5092 pool->cpu = cpu; 7706 init_irq_work(bh_pool !! 5093 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 7707 i++; !! 5094 pool->attrs->nice = std_nice[i++]; >> 5095 pool->node = cpu_to_node(cpu); >> 5096 >> 5097 /* alloc pool ID */ >> 5098 mutex_lock(&wq_pool_mutex); >> 5099 BUG_ON(worker_pool_assign_id(pool)); >> 5100 mutex_unlock(&wq_pool_mutex); 7708 } 5101 } >> 5102 } 7709 5103 7710 i = 0; !! 5104 /* create the initial worker */ 7711 for_each_cpu_worker_pool(pool !! 5105 for_each_online_cpu(cpu) { 7712 init_cpu_worker_pool( !! 5106 struct worker_pool *pool; >> 5107 >> 5108 for_each_cpu_worker_pool(pool, cpu) { >> 5109 pool->flags &= ~POOL_DISASSOCIATED; >> 5110 BUG_ON(create_and_start_worker(pool) < 0); >> 5111 } 7713 } 5112 } 7714 5113 7715 /* create default unbound and ordered 5114 /* create default unbound and ordered wq attrs */ 7716 for (i = 0; i < NR_STD_WORKER_POOLS; 5115 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 7717 struct workqueue_attrs *attrs 5116 struct workqueue_attrs *attrs; 7718 5117 7719 BUG_ON(!(attrs = alloc_workqu !! 5118 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 7720 attrs->nice = std_nice[i]; 5119 attrs->nice = std_nice[i]; 7721 unbound_std_wq_attrs[i] = att 5120 unbound_std_wq_attrs[i] = attrs; 7722 5121 7723 /* 5122 /* 7724 * An ordered wq should have 5123 * An ordered wq should have only one pwq as ordering is 7725 * guaranteed by max_active w 5124 * guaranteed by max_active which is enforced by pwqs. >> 5125 * Turn off NUMA so that dfl_pwq is used for all nodes. 7726 */ 5126 */ 7727 BUG_ON(!(attrs = alloc_workqu !! 5127 BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL))); 7728 attrs->nice = std_nice[i]; 5128 attrs->nice = std_nice[i]; 7729 attrs->ordered = true; !! 5129 attrs->no_numa = true; 7730 ordered_wq_attrs[i] = attrs; 5130 ordered_wq_attrs[i] = attrs; 7731 } 5131 } 7732 5132 7733 system_wq = alloc_workqueue("events", 5133 system_wq = alloc_workqueue("events", 0, 0); 7734 system_highpri_wq = alloc_workqueue(" 5134 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 7735 system_long_wq = alloc_workqueue("eve 5135 system_long_wq = alloc_workqueue("events_long", 0, 0); 7736 system_unbound_wq = alloc_workqueue(" 5136 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 7737 W !! 5137 WQ_UNBOUND_MAX_ACTIVE); 7738 system_freezable_wq = alloc_workqueue 5138 system_freezable_wq = alloc_workqueue("events_freezable", 7739 5139 WQ_FREEZABLE, 0); 7740 system_power_efficient_wq = alloc_wor << 7741 << 7742 system_freezable_power_efficient_wq = << 7743 << 7744 << 7745 system_bh_wq = alloc_workqueue("event << 7746 system_bh_highpri_wq = alloc_workqueu << 7747 << 7748 BUG_ON(!system_wq || !system_highpri_ 5140 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 7749 !system_unbound_wq || !system_ !! 5141 !system_unbound_wq || !system_freezable_wq); 7750 !system_power_efficient_wq || !! 5142 return 0; 7751 !system_freezable_power_effici << 7752 !system_bh_wq || !system_bh_hi << 7753 } << 7754 << 7755 static void __init wq_cpu_intensive_thresh_in << 7756 { << 7757 unsigned long thresh; << 7758 unsigned long bogo; << 7759 << 7760 pwq_release_worker = kthread_create_w << 7761 BUG_ON(IS_ERR(pwq_release_worker)); << 7762 << 7763 /* if the user set it to a specific v << 7764 if (wq_cpu_intensive_thresh_us != ULO << 7765 return; << 7766 << 7767 /* << 7768 * The default of 10ms is derived fro << 7769 * 2023) processors can do a lot in 1 << 7770 * most consider human-perceivable. H << 7771 * lot slower CPUs including microcon << 7772 * too low. << 7773 * << 7774 * Let's scale up the threshold upto << 7775 * This is by no means accurate but i << 7776 * is still useful even when the thre << 7777 * the reports would usually be appli << 7778 * operating on longer thresholds won << 7779 * usefulness. << 7780 */ << 7781 thresh = 10 * USEC_PER_MSEC; << 7782 << 7783 /* see init/calibrate.c for lpj -> Bo << 7784 bogo = max_t(unsigned long, loops_per << 7785 if (bogo < 4000) << 7786 thresh = min_t(unsigned long, << 7787 << 7788 pr_debug("wq_cpu_intensive_thresh: lp << 7789 loops_per_jiffy, bogo, thres << 7790 << 7791 wq_cpu_intensive_thresh_us = thresh; << 7792 } << 7793 << 7794 /** << 7795 * workqueue_init - bring workqueue subsystem << 7796 * << 7797 * This is the second step of three-staged wo << 7798 * and invoked as soon as kthreads can be cre << 7799 * been created and work items queued on them << 7800 * executing the work items yet. Populate the << 7801 * workers and enable future kworker creation << 7802 */ << 7803 void __init workqueue_init(void) << 7804 { << 7805 struct workqueue_struct *wq; << 7806 struct worker_pool *pool; << 7807 int cpu, bkt; << 7808 << 7809 wq_cpu_intensive_thresh_init(); << 7810 << 7811 mutex_lock(&wq_pool_mutex); << 7812 << 7813 /* << 7814 * Per-cpu pools created earlier coul << 7815 * up. Also, create a rescuer for wor << 7816 */ << 7817 for_each_possible_cpu(cpu) { << 7818 for_each_bh_worker_pool(pool, << 7819 pool->node = cpu_to_n << 7820 for_each_cpu_worker_pool(pool << 7821 pool->node = cpu_to_n << 7822 } << 7823 << 7824 list_for_each_entry(wq, &workqueues, << 7825 WARN(init_rescuer(wq), << 7826 "workqueue: failed to cr << 7827 wq->name); << 7828 } << 7829 << 7830 mutex_unlock(&wq_pool_mutex); << 7831 << 7832 /* << 7833 * Create the initial workers. A BH p << 7834 * represents the shared BH execution << 7835 * affected by hotplug events. Create << 7836 * possible CPUs here. << 7837 */ << 7838 for_each_possible_cpu(cpu) << 7839 for_each_bh_worker_pool(pool, << 7840 BUG_ON(!create_worker << 7841 << 7842 for_each_online_cpu(cpu) { << 7843 for_each_cpu_worker_pool(pool << 7844 pool->flags &= ~POOL_ << 7845 BUG_ON(!create_worker << 7846 } << 7847 } << 7848 << 7849 hash_for_each(unbound_pool_hash, bkt, << 7850 BUG_ON(!create_worker(pool)); << 7851 << 7852 wq_online = true; << 7853 wq_watchdog_init(); << 7854 } << 7855 << 7856 /* << 7857 * Initialize @pt by first initializing @pt-> << 7858 * @cpu_shares_pod(). Each subset of CPUs tha << 7859 * and consecutive pod ID. The rest of @pt is << 7860 */ << 7861 static void __init init_pod_type(struct wq_po << 7862 bool (*cpus_ << 7863 { << 7864 int cur, pre, cpu, pod; << 7865 << 7866 pt->nr_pods = 0; << 7867 << 7868 /* init @pt->cpu_pod[] according to @ << 7869 pt->cpu_pod = kcalloc(nr_cpu_ids, siz << 7870 BUG_ON(!pt->cpu_pod); << 7871 << 7872 for_each_possible_cpu(cur) { << 7873 for_each_possible_cpu(pre) { << 7874 if (pre >= cur) { << 7875 pt->cpu_pod[c << 7876 break; << 7877 } << 7878 if (cpus_share_pod(cu << 7879 pt->cpu_pod[c << 7880 break; << 7881 } << 7882 } << 7883 } << 7884 << 7885 /* init the rest to match @pt->cpu_po << 7886 pt->pod_cpus = kcalloc(pt->nr_pods, s << 7887 pt->pod_node = kcalloc(pt->nr_pods, s << 7888 BUG_ON(!pt->pod_cpus || !pt->pod_node << 7889 << 7890 for (pod = 0; pod < pt->nr_pods; pod+ << 7891 BUG_ON(!zalloc_cpumask_var(&p << 7892 << 7893 for_each_possible_cpu(cpu) { << 7894 cpumask_set_cpu(cpu, pt->pod_ << 7895 pt->pod_node[pt->cpu_pod[cpu] << 7896 } << 7897 } << 7898 << 7899 static bool __init cpus_dont_share(int cpu0, << 7900 { << 7901 return false; << 7902 } << 7903 << 7904 static bool __init cpus_share_smt(int cpu0, i << 7905 { << 7906 #ifdef CONFIG_SCHED_SMT << 7907 return cpumask_test_cpu(cpu0, cpu_smt << 7908 #else << 7909 return false; << 7910 #endif << 7911 } << 7912 << 7913 static bool __init cpus_share_numa(int cpu0, << 7914 { << 7915 return cpu_to_node(cpu0) == cpu_to_no << 7916 } << 7917 << 7918 /** << 7919 * workqueue_init_topology - initialize CPU p << 7920 * << 7921 * This is the third step of three-staged wor << 7922 * invoked after SMP and topology information << 7923 * initializes the unbound CPU pods according << 7924 */ << 7925 void __init workqueue_init_topology(void) << 7926 { << 7927 struct workqueue_struct *wq; << 7928 int cpu; << 7929 << 7930 init_pod_type(&wq_pod_types[WQ_AFFN_C << 7931 init_pod_type(&wq_pod_types[WQ_AFFN_S << 7932 init_pod_type(&wq_pod_types[WQ_AFFN_C << 7933 init_pod_type(&wq_pod_types[WQ_AFFN_N << 7934 << 7935 wq_topo_initialized = true; << 7936 << 7937 mutex_lock(&wq_pool_mutex); << 7938 << 7939 /* << 7940 * Workqueues allocated earlier would << 7941 * worker pool. Explicitly call unbou << 7942 * and CPU combinations to apply per- << 7943 */ << 7944 list_for_each_entry(wq, &workqueues, << 7945 for_each_online_cpu(cpu) << 7946 unbound_wq_update_pwq << 7947 if (wq->flags & WQ_UNBOUND) { << 7948 mutex_lock(&wq->mutex << 7949 wq_update_node_max_ac << 7950 mutex_unlock(&wq->mut << 7951 } << 7952 } << 7953 << 7954 mutex_unlock(&wq_pool_mutex); << 7955 } << 7956 << 7957 void __warn_flushing_systemwide_wq(void) << 7958 { << 7959 pr_warn("WARNING: Flushing system-wid << 7960 dump_stack(); << 7961 } << 7962 EXPORT_SYMBOL(__warn_flushing_systemwide_wq); << 7963 << 7964 static int __init workqueue_unbound_cpus_setu << 7965 { << 7966 if (cpulist_parse(str, &wq_cmdline_cp << 7967 cpumask_clear(&wq_cmdline_cpu << 7968 pr_warn("workqueue.unbound_cp << 7969 } << 7970 << 7971 return 1; << 7972 } 5143 } 7973 __setup("workqueue.unbound_cpus=", workqueue_ !! 5144 early_initcall(init_workqueues); 7974 5145
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.