~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/io_uring/io-wq.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Basic worker thread pool for io_uring
  4  *
  5  * Copyright (C) 2019 Jens Axboe
  6  *
  7  */
  8 #include <linux/kernel.h>
  9 #include <linux/init.h>
 10 #include <linux/errno.h>
 11 #include <linux/sched/signal.h>
 12 #include <linux/percpu.h>
 13 #include <linux/slab.h>
 14 #include <linux/rculist_nulls.h>
 15 #include <linux/cpu.h>
 16 #include <linux/task_work.h>
 17 #include <linux/audit.h>
 18 #include <linux/mmu_context.h>
 19 #include <uapi/linux/io_uring.h>
 20 
 21 #include "io-wq.h"
 22 #include "slist.h"
 23 #include "io_uring.h"
 24 
 25 #define WORKER_IDLE_TIMEOUT     (5 * HZ)
 26 #define WORKER_INIT_LIMIT       3
 27 
 28 enum {
 29         IO_WORKER_F_UP          = 0,    /* up and active */
 30         IO_WORKER_F_RUNNING     = 1,    /* account as running */
 31         IO_WORKER_F_FREE        = 2,    /* worker on free list */
 32         IO_WORKER_F_BOUND       = 3,    /* is doing bounded work */
 33 };
 34 
 35 enum {
 36         IO_WQ_BIT_EXIT          = 0,    /* wq exiting */
 37 };
 38 
 39 enum {
 40         IO_ACCT_STALLED_BIT     = 0,    /* stalled on hash */
 41 };
 42 
 43 /*
 44  * One for each thread in a wq pool
 45  */
 46 struct io_worker {
 47         refcount_t ref;
 48         int create_index;
 49         unsigned long flags;
 50         struct hlist_nulls_node nulls_node;
 51         struct list_head all_list;
 52         struct task_struct *task;
 53         struct io_wq *wq;
 54 
 55         struct io_wq_work *cur_work;
 56         raw_spinlock_t lock;
 57 
 58         struct completion ref_done;
 59 
 60         unsigned long create_state;
 61         struct callback_head create_work;
 62         int init_retries;
 63 
 64         union {
 65                 struct rcu_head rcu;
 66                 struct work_struct work;
 67         };
 68 };
 69 
 70 #if BITS_PER_LONG == 64
 71 #define IO_WQ_HASH_ORDER        6
 72 #else
 73 #define IO_WQ_HASH_ORDER        5
 74 #endif
 75 
 76 #define IO_WQ_NR_HASH_BUCKETS   (1u << IO_WQ_HASH_ORDER)
 77 
 78 struct io_wq_acct {
 79         unsigned nr_workers;
 80         unsigned max_workers;
 81         int index;
 82         atomic_t nr_running;
 83         raw_spinlock_t lock;
 84         struct io_wq_work_list work_list;
 85         unsigned long flags;
 86 };
 87 
 88 enum {
 89         IO_WQ_ACCT_BOUND,
 90         IO_WQ_ACCT_UNBOUND,
 91         IO_WQ_ACCT_NR,
 92 };
 93 
 94 /*
 95  * Per io_wq state
 96   */
 97 struct io_wq {
 98         unsigned long state;
 99 
100         free_work_fn *free_work;
101         io_wq_work_fn *do_work;
102 
103         struct io_wq_hash *hash;
104 
105         atomic_t worker_refs;
106         struct completion worker_done;
107 
108         struct hlist_node cpuhp_node;
109 
110         struct task_struct *task;
111 
112         struct io_wq_acct acct[IO_WQ_ACCT_NR];
113 
114         /* lock protects access to elements below */
115         raw_spinlock_t lock;
116 
117         struct hlist_nulls_head free_list;
118         struct list_head all_list;
119 
120         struct wait_queue_entry wait;
121 
122         struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
123 
124         cpumask_var_t cpu_mask;
125 };
126 
127 static enum cpuhp_state io_wq_online;
128 
129 struct io_cb_cancel_data {
130         work_cancel_fn *fn;
131         void *data;
132         int nr_running;
133         int nr_pending;
134         bool cancel_all;
135 };
136 
137 static bool create_io_worker(struct io_wq *wq, int index);
138 static void io_wq_dec_running(struct io_worker *worker);
139 static bool io_acct_cancel_pending_work(struct io_wq *wq,
140                                         struct io_wq_acct *acct,
141                                         struct io_cb_cancel_data *match);
142 static void create_worker_cb(struct callback_head *cb);
143 static void io_wq_cancel_tw_create(struct io_wq *wq);
144 
145 static bool io_worker_get(struct io_worker *worker)
146 {
147         return refcount_inc_not_zero(&worker->ref);
148 }
149 
150 static void io_worker_release(struct io_worker *worker)
151 {
152         if (refcount_dec_and_test(&worker->ref))
153                 complete(&worker->ref_done);
154 }
155 
156 static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound)
157 {
158         return &wq->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
159 }
160 
161 static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq,
162                                                   struct io_wq_work *work)
163 {
164         return io_get_acct(wq, !(atomic_read(&work->flags) & IO_WQ_WORK_UNBOUND));
165 }
166 
167 static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker)
168 {
169         return io_get_acct(worker->wq, test_bit(IO_WORKER_F_BOUND, &worker->flags));
170 }
171 
172 static void io_worker_ref_put(struct io_wq *wq)
173 {
174         if (atomic_dec_and_test(&wq->worker_refs))
175                 complete(&wq->worker_done);
176 }
177 
178 bool io_wq_worker_stopped(void)
179 {
180         struct io_worker *worker = current->worker_private;
181 
182         if (WARN_ON_ONCE(!io_wq_current_is_worker()))
183                 return true;
184 
185         return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state);
186 }
187 
188 static void io_worker_cancel_cb(struct io_worker *worker)
189 {
190         struct io_wq_acct *acct = io_wq_get_acct(worker);
191         struct io_wq *wq = worker->wq;
192 
193         atomic_dec(&acct->nr_running);
194         raw_spin_lock(&wq->lock);
195         acct->nr_workers--;
196         raw_spin_unlock(&wq->lock);
197         io_worker_ref_put(wq);
198         clear_bit_unlock(0, &worker->create_state);
199         io_worker_release(worker);
200 }
201 
202 static bool io_task_worker_match(struct callback_head *cb, void *data)
203 {
204         struct io_worker *worker;
205 
206         if (cb->func != create_worker_cb)
207                 return false;
208         worker = container_of(cb, struct io_worker, create_work);
209         return worker == data;
210 }
211 
212 static void io_worker_exit(struct io_worker *worker)
213 {
214         struct io_wq *wq = worker->wq;
215 
216         while (1) {
217                 struct callback_head *cb = task_work_cancel_match(wq->task,
218                                                 io_task_worker_match, worker);
219 
220                 if (!cb)
221                         break;
222                 io_worker_cancel_cb(worker);
223         }
224 
225         io_worker_release(worker);
226         wait_for_completion(&worker->ref_done);
227 
228         raw_spin_lock(&wq->lock);
229         if (test_bit(IO_WORKER_F_FREE, &worker->flags))
230                 hlist_nulls_del_rcu(&worker->nulls_node);
231         list_del_rcu(&worker->all_list);
232         raw_spin_unlock(&wq->lock);
233         io_wq_dec_running(worker);
234         /*
235          * this worker is a goner, clear ->worker_private to avoid any
236          * inc/dec running calls that could happen as part of exit from
237          * touching 'worker'.
238          */
239         current->worker_private = NULL;
240 
241         kfree_rcu(worker, rcu);
242         io_worker_ref_put(wq);
243         do_exit(0);
244 }
245 
246 static inline bool __io_acct_run_queue(struct io_wq_acct *acct)
247 {
248         return !test_bit(IO_ACCT_STALLED_BIT, &acct->flags) &&
249                 !wq_list_empty(&acct->work_list);
250 }
251 
252 /*
253  * If there's work to do, returns true with acct->lock acquired. If not,
254  * returns false with no lock held.
255  */
256 static inline bool io_acct_run_queue(struct io_wq_acct *acct)
257         __acquires(&acct->lock)
258 {
259         raw_spin_lock(&acct->lock);
260         if (__io_acct_run_queue(acct))
261                 return true;
262 
263         raw_spin_unlock(&acct->lock);
264         return false;
265 }
266 
267 /*
268  * Check head of free list for an available worker. If one isn't available,
269  * caller must create one.
270  */
271 static bool io_wq_activate_free_worker(struct io_wq *wq,
272                                         struct io_wq_acct *acct)
273         __must_hold(RCU)
274 {
275         struct hlist_nulls_node *n;
276         struct io_worker *worker;
277 
278         /*
279          * Iterate free_list and see if we can find an idle worker to
280          * activate. If a given worker is on the free_list but in the process
281          * of exiting, keep trying.
282          */
283         hlist_nulls_for_each_entry_rcu(worker, n, &wq->free_list, nulls_node) {
284                 if (!io_worker_get(worker))
285                         continue;
286                 if (io_wq_get_acct(worker) != acct) {
287                         io_worker_release(worker);
288                         continue;
289                 }
290                 /*
291                  * If the worker is already running, it's either already
292                  * starting work or finishing work. In either case, if it does
293                  * to go sleep, we'll kick off a new task for this work anyway.
294                  */
295                 wake_up_process(worker->task);
296                 io_worker_release(worker);
297                 return true;
298         }
299 
300         return false;
301 }
302 
303 /*
304  * We need a worker. If we find a free one, we're good. If not, and we're
305  * below the max number of workers, create one.
306  */
307 static bool io_wq_create_worker(struct io_wq *wq, struct io_wq_acct *acct)
308 {
309         /*
310          * Most likely an attempt to queue unbounded work on an io_wq that
311          * wasn't setup with any unbounded workers.
312          */
313         if (unlikely(!acct->max_workers))
314                 pr_warn_once("io-wq is not configured for unbound workers");
315 
316         raw_spin_lock(&wq->lock);
317         if (acct->nr_workers >= acct->max_workers) {
318                 raw_spin_unlock(&wq->lock);
319                 return true;
320         }
321         acct->nr_workers++;
322         raw_spin_unlock(&wq->lock);
323         atomic_inc(&acct->nr_running);
324         atomic_inc(&wq->worker_refs);
325         return create_io_worker(wq, acct->index);
326 }
327 
328 static void io_wq_inc_running(struct io_worker *worker)
329 {
330         struct io_wq_acct *acct = io_wq_get_acct(worker);
331 
332         atomic_inc(&acct->nr_running);
333 }
334 
335 static void create_worker_cb(struct callback_head *cb)
336 {
337         struct io_worker *worker;
338         struct io_wq *wq;
339 
340         struct io_wq_acct *acct;
341         bool do_create = false;
342 
343         worker = container_of(cb, struct io_worker, create_work);
344         wq = worker->wq;
345         acct = &wq->acct[worker->create_index];
346         raw_spin_lock(&wq->lock);
347 
348         if (acct->nr_workers < acct->max_workers) {
349                 acct->nr_workers++;
350                 do_create = true;
351         }
352         raw_spin_unlock(&wq->lock);
353         if (do_create) {
354                 create_io_worker(wq, worker->create_index);
355         } else {
356                 atomic_dec(&acct->nr_running);
357                 io_worker_ref_put(wq);
358         }
359         clear_bit_unlock(0, &worker->create_state);
360         io_worker_release(worker);
361 }
362 
363 static bool io_queue_worker_create(struct io_worker *worker,
364                                    struct io_wq_acct *acct,
365                                    task_work_func_t func)
366 {
367         struct io_wq *wq = worker->wq;
368 
369         /* raced with exit, just ignore create call */
370         if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
371                 goto fail;
372         if (!io_worker_get(worker))
373                 goto fail;
374         /*
375          * create_state manages ownership of create_work/index. We should
376          * only need one entry per worker, as the worker going to sleep
377          * will trigger the condition, and waking will clear it once it
378          * runs the task_work.
379          */
380         if (test_bit(0, &worker->create_state) ||
381             test_and_set_bit_lock(0, &worker->create_state))
382                 goto fail_release;
383 
384         atomic_inc(&wq->worker_refs);
385         init_task_work(&worker->create_work, func);
386         worker->create_index = acct->index;
387         if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
388                 /*
389                  * EXIT may have been set after checking it above, check after
390                  * adding the task_work and remove any creation item if it is
391                  * now set. wq exit does that too, but we can have added this
392                  * work item after we canceled in io_wq_exit_workers().
393                  */
394                 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
395                         io_wq_cancel_tw_create(wq);
396                 io_worker_ref_put(wq);
397                 return true;
398         }
399         io_worker_ref_put(wq);
400         clear_bit_unlock(0, &worker->create_state);
401 fail_release:
402         io_worker_release(worker);
403 fail:
404         atomic_dec(&acct->nr_running);
405         io_worker_ref_put(wq);
406         return false;
407 }
408 
409 static void io_wq_dec_running(struct io_worker *worker)
410 {
411         struct io_wq_acct *acct = io_wq_get_acct(worker);
412         struct io_wq *wq = worker->wq;
413 
414         if (!test_bit(IO_WORKER_F_UP, &worker->flags))
415                 return;
416 
417         if (!atomic_dec_and_test(&acct->nr_running))
418                 return;
419         if (!io_acct_run_queue(acct))
420                 return;
421 
422         raw_spin_unlock(&acct->lock);
423         atomic_inc(&acct->nr_running);
424         atomic_inc(&wq->worker_refs);
425         io_queue_worker_create(worker, acct, create_worker_cb);
426 }
427 
428 /*
429  * Worker will start processing some work. Move it to the busy list, if
430  * it's currently on the freelist
431  */
432 static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker)
433 {
434         if (test_bit(IO_WORKER_F_FREE, &worker->flags)) {
435                 clear_bit(IO_WORKER_F_FREE, &worker->flags);
436                 raw_spin_lock(&wq->lock);
437                 hlist_nulls_del_init_rcu(&worker->nulls_node);
438                 raw_spin_unlock(&wq->lock);
439         }
440 }
441 
442 /*
443  * No work, worker going to sleep. Move to freelist.
444  */
445 static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker)
446         __must_hold(wq->lock)
447 {
448         if (!test_bit(IO_WORKER_F_FREE, &worker->flags)) {
449                 set_bit(IO_WORKER_F_FREE, &worker->flags);
450                 hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
451         }
452 }
453 
454 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
455 {
456         return atomic_read(&work->flags) >> IO_WQ_HASH_SHIFT;
457 }
458 
459 static bool io_wait_on_hash(struct io_wq *wq, unsigned int hash)
460 {
461         bool ret = false;
462 
463         spin_lock_irq(&wq->hash->wait.lock);
464         if (list_empty(&wq->wait.entry)) {
465                 __add_wait_queue(&wq->hash->wait, &wq->wait);
466                 if (!test_bit(hash, &wq->hash->map)) {
467                         __set_current_state(TASK_RUNNING);
468                         list_del_init(&wq->wait.entry);
469                         ret = true;
470                 }
471         }
472         spin_unlock_irq(&wq->hash->wait.lock);
473         return ret;
474 }
475 
476 static struct io_wq_work *io_get_next_work(struct io_wq_acct *acct,
477                                            struct io_worker *worker)
478         __must_hold(acct->lock)
479 {
480         struct io_wq_work_node *node, *prev;
481         struct io_wq_work *work, *tail;
482         unsigned int stall_hash = -1U;
483         struct io_wq *wq = worker->wq;
484 
485         wq_list_for_each(node, prev, &acct->work_list) {
486                 unsigned int hash;
487 
488                 work = container_of(node, struct io_wq_work, list);
489 
490                 /* not hashed, can run anytime */
491                 if (!io_wq_is_hashed(work)) {
492                         wq_list_del(&acct->work_list, node, prev);
493                         return work;
494                 }
495 
496                 hash = io_get_work_hash(work);
497                 /* all items with this hash lie in [work, tail] */
498                 tail = wq->hash_tail[hash];
499 
500                 /* hashed, can run if not already running */
501                 if (!test_and_set_bit(hash, &wq->hash->map)) {
502                         wq->hash_tail[hash] = NULL;
503                         wq_list_cut(&acct->work_list, &tail->list, prev);
504                         return work;
505                 }
506                 if (stall_hash == -1U)
507                         stall_hash = hash;
508                 /* fast forward to a next hash, for-each will fix up @prev */
509                 node = &tail->list;
510         }
511 
512         if (stall_hash != -1U) {
513                 bool unstalled;
514 
515                 /*
516                  * Set this before dropping the lock to avoid racing with new
517                  * work being added and clearing the stalled bit.
518                  */
519                 set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
520                 raw_spin_unlock(&acct->lock);
521                 unstalled = io_wait_on_hash(wq, stall_hash);
522                 raw_spin_lock(&acct->lock);
523                 if (unstalled) {
524                         clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
525                         if (wq_has_sleeper(&wq->hash->wait))
526                                 wake_up(&wq->hash->wait);
527                 }
528         }
529 
530         return NULL;
531 }
532 
533 static void io_assign_current_work(struct io_worker *worker,
534                                    struct io_wq_work *work)
535 {
536         if (work) {
537                 io_run_task_work();
538                 cond_resched();
539         }
540 
541         raw_spin_lock(&worker->lock);
542         worker->cur_work = work;
543         raw_spin_unlock(&worker->lock);
544 }
545 
546 /*
547  * Called with acct->lock held, drops it before returning
548  */
549 static void io_worker_handle_work(struct io_wq_acct *acct,
550                                   struct io_worker *worker)
551         __releases(&acct->lock)
552 {
553         struct io_wq *wq = worker->wq;
554         bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
555 
556         do {
557                 struct io_wq_work *work;
558 
559                 /*
560                  * If we got some work, mark us as busy. If we didn't, but
561                  * the list isn't empty, it means we stalled on hashed work.
562                  * Mark us stalled so we don't keep looking for work when we
563                  * can't make progress, any work completion or insertion will
564                  * clear the stalled flag.
565                  */
566                 work = io_get_next_work(acct, worker);
567                 if (work) {
568                         /*
569                          * Make sure cancelation can find this, even before
570                          * it becomes the active work. That avoids a window
571                          * where the work has been removed from our general
572                          * work list, but isn't yet discoverable as the
573                          * current work item for this worker.
574                          */
575                         raw_spin_lock(&worker->lock);
576                         worker->cur_work = work;
577                         raw_spin_unlock(&worker->lock);
578                 }
579 
580                 raw_spin_unlock(&acct->lock);
581 
582                 if (!work)
583                         break;
584 
585                 __io_worker_busy(wq, worker);
586 
587                 io_assign_current_work(worker, work);
588                 __set_current_state(TASK_RUNNING);
589 
590                 /* handle a whole dependent link */
591                 do {
592                         struct io_wq_work *next_hashed, *linked;
593                         unsigned int hash = io_get_work_hash(work);
594 
595                         next_hashed = wq_next_work(work);
596 
597                         if (do_kill &&
598                             (atomic_read(&work->flags) & IO_WQ_WORK_UNBOUND))
599                                 atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
600                         wq->do_work(work);
601                         io_assign_current_work(worker, NULL);
602 
603                         linked = wq->free_work(work);
604                         work = next_hashed;
605                         if (!work && linked && !io_wq_is_hashed(linked)) {
606                                 work = linked;
607                                 linked = NULL;
608                         }
609                         io_assign_current_work(worker, work);
610                         if (linked)
611                                 io_wq_enqueue(wq, linked);
612 
613                         if (hash != -1U && !next_hashed) {
614                                 /* serialize hash clear with wake_up() */
615                                 spin_lock_irq(&wq->hash->wait.lock);
616                                 clear_bit(hash, &wq->hash->map);
617                                 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
618                                 spin_unlock_irq(&wq->hash->wait.lock);
619                                 if (wq_has_sleeper(&wq->hash->wait))
620                                         wake_up(&wq->hash->wait);
621                         }
622                 } while (work);
623 
624                 if (!__io_acct_run_queue(acct))
625                         break;
626                 raw_spin_lock(&acct->lock);
627         } while (1);
628 }
629 
630 static int io_wq_worker(void *data)
631 {
632         struct io_worker *worker = data;
633         struct io_wq_acct *acct = io_wq_get_acct(worker);
634         struct io_wq *wq = worker->wq;
635         bool exit_mask = false, last_timeout = false;
636         char buf[TASK_COMM_LEN];
637 
638         set_mask_bits(&worker->flags, 0,
639                       BIT(IO_WORKER_F_UP) | BIT(IO_WORKER_F_RUNNING));
640 
641         snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
642         set_task_comm(current, buf);
643 
644         while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
645                 long ret;
646 
647                 set_current_state(TASK_INTERRUPTIBLE);
648 
649                 /*
650                  * If we have work to do, io_acct_run_queue() returns with
651                  * the acct->lock held. If not, it will drop it.
652                  */
653                 while (io_acct_run_queue(acct))
654                         io_worker_handle_work(acct, worker);
655 
656                 raw_spin_lock(&wq->lock);
657                 /*
658                  * Last sleep timed out. Exit if we're not the last worker,
659                  * or if someone modified our affinity.
660                  */
661                 if (last_timeout && (exit_mask || acct->nr_workers > 1)) {
662                         acct->nr_workers--;
663                         raw_spin_unlock(&wq->lock);
664                         __set_current_state(TASK_RUNNING);
665                         break;
666                 }
667                 last_timeout = false;
668                 __io_worker_idle(wq, worker);
669                 raw_spin_unlock(&wq->lock);
670                 if (io_run_task_work())
671                         continue;
672                 ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
673                 if (signal_pending(current)) {
674                         struct ksignal ksig;
675 
676                         if (!get_signal(&ksig))
677                                 continue;
678                         break;
679                 }
680                 if (!ret) {
681                         last_timeout = true;
682                         exit_mask = !cpumask_test_cpu(raw_smp_processor_id(),
683                                                         wq->cpu_mask);
684                 }
685         }
686 
687         if (test_bit(IO_WQ_BIT_EXIT, &wq->state) && io_acct_run_queue(acct))
688                 io_worker_handle_work(acct, worker);
689 
690         io_worker_exit(worker);
691         return 0;
692 }
693 
694 /*
695  * Called when a worker is scheduled in. Mark us as currently running.
696  */
697 void io_wq_worker_running(struct task_struct *tsk)
698 {
699         struct io_worker *worker = tsk->worker_private;
700 
701         if (!worker)
702                 return;
703         if (!test_bit(IO_WORKER_F_UP, &worker->flags))
704                 return;
705         if (test_bit(IO_WORKER_F_RUNNING, &worker->flags))
706                 return;
707         set_bit(IO_WORKER_F_RUNNING, &worker->flags);
708         io_wq_inc_running(worker);
709 }
710 
711 /*
712  * Called when worker is going to sleep. If there are no workers currently
713  * running and we have work pending, wake up a free one or create a new one.
714  */
715 void io_wq_worker_sleeping(struct task_struct *tsk)
716 {
717         struct io_worker *worker = tsk->worker_private;
718 
719         if (!worker)
720                 return;
721         if (!test_bit(IO_WORKER_F_UP, &worker->flags))
722                 return;
723         if (!test_bit(IO_WORKER_F_RUNNING, &worker->flags))
724                 return;
725 
726         clear_bit(IO_WORKER_F_RUNNING, &worker->flags);
727         io_wq_dec_running(worker);
728 }
729 
730 static void io_init_new_worker(struct io_wq *wq, struct io_worker *worker,
731                                struct task_struct *tsk)
732 {
733         tsk->worker_private = worker;
734         worker->task = tsk;
735         set_cpus_allowed_ptr(tsk, wq->cpu_mask);
736 
737         raw_spin_lock(&wq->lock);
738         hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
739         list_add_tail_rcu(&worker->all_list, &wq->all_list);
740         set_bit(IO_WORKER_F_FREE, &worker->flags);
741         raw_spin_unlock(&wq->lock);
742         wake_up_new_task(tsk);
743 }
744 
745 static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
746 {
747         return true;
748 }
749 
750 static inline bool io_should_retry_thread(struct io_worker *worker, long err)
751 {
752         /*
753          * Prevent perpetual task_work retry, if the task (or its group) is
754          * exiting.
755          */
756         if (fatal_signal_pending(current))
757                 return false;
758         if (worker->init_retries++ >= WORKER_INIT_LIMIT)
759                 return false;
760 
761         switch (err) {
762         case -EAGAIN:
763         case -ERESTARTSYS:
764         case -ERESTARTNOINTR:
765         case -ERESTARTNOHAND:
766                 return true;
767         default:
768                 return false;
769         }
770 }
771 
772 static void create_worker_cont(struct callback_head *cb)
773 {
774         struct io_worker *worker;
775         struct task_struct *tsk;
776         struct io_wq *wq;
777 
778         worker = container_of(cb, struct io_worker, create_work);
779         clear_bit_unlock(0, &worker->create_state);
780         wq = worker->wq;
781         tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
782         if (!IS_ERR(tsk)) {
783                 io_init_new_worker(wq, worker, tsk);
784                 io_worker_release(worker);
785                 return;
786         } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
787                 struct io_wq_acct *acct = io_wq_get_acct(worker);
788 
789                 atomic_dec(&acct->nr_running);
790                 raw_spin_lock(&wq->lock);
791                 acct->nr_workers--;
792                 if (!acct->nr_workers) {
793                         struct io_cb_cancel_data match = {
794                                 .fn             = io_wq_work_match_all,
795                                 .cancel_all     = true,
796                         };
797 
798                         raw_spin_unlock(&wq->lock);
799                         while (io_acct_cancel_pending_work(wq, acct, &match))
800                                 ;
801                 } else {
802                         raw_spin_unlock(&wq->lock);
803                 }
804                 io_worker_ref_put(wq);
805                 kfree(worker);
806                 return;
807         }
808 
809         /* re-create attempts grab a new worker ref, drop the existing one */
810         io_worker_release(worker);
811         schedule_work(&worker->work);
812 }
813 
814 static void io_workqueue_create(struct work_struct *work)
815 {
816         struct io_worker *worker = container_of(work, struct io_worker, work);
817         struct io_wq_acct *acct = io_wq_get_acct(worker);
818 
819         if (!io_queue_worker_create(worker, acct, create_worker_cont))
820                 kfree(worker);
821 }
822 
823 static bool create_io_worker(struct io_wq *wq, int index)
824 {
825         struct io_wq_acct *acct = &wq->acct[index];
826         struct io_worker *worker;
827         struct task_struct *tsk;
828 
829         __set_current_state(TASK_RUNNING);
830 
831         worker = kzalloc(sizeof(*worker), GFP_KERNEL);
832         if (!worker) {
833 fail:
834                 atomic_dec(&acct->nr_running);
835                 raw_spin_lock(&wq->lock);
836                 acct->nr_workers--;
837                 raw_spin_unlock(&wq->lock);
838                 io_worker_ref_put(wq);
839                 return false;
840         }
841 
842         refcount_set(&worker->ref, 1);
843         worker->wq = wq;
844         raw_spin_lock_init(&worker->lock);
845         init_completion(&worker->ref_done);
846 
847         if (index == IO_WQ_ACCT_BOUND)
848                 set_bit(IO_WORKER_F_BOUND, &worker->flags);
849 
850         tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
851         if (!IS_ERR(tsk)) {
852                 io_init_new_worker(wq, worker, tsk);
853         } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
854                 kfree(worker);
855                 goto fail;
856         } else {
857                 INIT_WORK(&worker->work, io_workqueue_create);
858                 schedule_work(&worker->work);
859         }
860 
861         return true;
862 }
863 
864 /*
865  * Iterate the passed in list and call the specific function for each
866  * worker that isn't exiting
867  */
868 static bool io_wq_for_each_worker(struct io_wq *wq,
869                                   bool (*func)(struct io_worker *, void *),
870                                   void *data)
871 {
872         struct io_worker *worker;
873         bool ret = false;
874 
875         list_for_each_entry_rcu(worker, &wq->all_list, all_list) {
876                 if (io_worker_get(worker)) {
877                         /* no task if node is/was offline */
878                         if (worker->task)
879                                 ret = func(worker, data);
880                         io_worker_release(worker);
881                         if (ret)
882                                 break;
883                 }
884         }
885 
886         return ret;
887 }
888 
889 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
890 {
891         __set_notify_signal(worker->task);
892         wake_up_process(worker->task);
893         return false;
894 }
895 
896 static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq)
897 {
898         do {
899                 atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
900                 wq->do_work(work);
901                 work = wq->free_work(work);
902         } while (work);
903 }
904 
905 static void io_wq_insert_work(struct io_wq *wq, struct io_wq_work *work)
906 {
907         struct io_wq_acct *acct = io_work_get_acct(wq, work);
908         unsigned int hash;
909         struct io_wq_work *tail;
910 
911         if (!io_wq_is_hashed(work)) {
912 append:
913                 wq_list_add_tail(&work->list, &acct->work_list);
914                 return;
915         }
916 
917         hash = io_get_work_hash(work);
918         tail = wq->hash_tail[hash];
919         wq->hash_tail[hash] = work;
920         if (!tail)
921                 goto append;
922 
923         wq_list_add_after(&work->list, &tail->list, &acct->work_list);
924 }
925 
926 static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
927 {
928         return work == data;
929 }
930 
931 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
932 {
933         struct io_wq_acct *acct = io_work_get_acct(wq, work);
934         unsigned int work_flags = atomic_read(&work->flags);
935         struct io_cb_cancel_data match = {
936                 .fn             = io_wq_work_match_item,
937                 .data           = work,
938                 .cancel_all     = false,
939         };
940         bool do_create;
941 
942         /*
943          * If io-wq is exiting for this task, or if the request has explicitly
944          * been marked as one that should not get executed, cancel it here.
945          */
946         if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
947             (work_flags & IO_WQ_WORK_CANCEL)) {
948                 io_run_cancel(work, wq);
949                 return;
950         }
951 
952         raw_spin_lock(&acct->lock);
953         io_wq_insert_work(wq, work);
954         clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
955         raw_spin_unlock(&acct->lock);
956 
957         rcu_read_lock();
958         do_create = !io_wq_activate_free_worker(wq, acct);
959         rcu_read_unlock();
960 
961         if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
962             !atomic_read(&acct->nr_running))) {
963                 bool did_create;
964 
965                 did_create = io_wq_create_worker(wq, acct);
966                 if (likely(did_create))
967                         return;
968 
969                 raw_spin_lock(&wq->lock);
970                 if (acct->nr_workers) {
971                         raw_spin_unlock(&wq->lock);
972                         return;
973                 }
974                 raw_spin_unlock(&wq->lock);
975 
976                 /* fatal condition, failed to create the first worker */
977                 io_acct_cancel_pending_work(wq, acct, &match);
978         }
979 }
980 
981 /*
982  * Work items that hash to the same value will not be done in parallel.
983  * Used to limit concurrent writes, generally hashed by inode.
984  */
985 void io_wq_hash_work(struct io_wq_work *work, void *val)
986 {
987         unsigned int bit;
988 
989         bit = hash_ptr(val, IO_WQ_HASH_ORDER);
990         atomic_or(IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT), &work->flags);
991 }
992 
993 static bool __io_wq_worker_cancel(struct io_worker *worker,
994                                   struct io_cb_cancel_data *match,
995                                   struct io_wq_work *work)
996 {
997         if (work && match->fn(work, match->data)) {
998                 atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
999                 __set_notify_signal(worker->task);
1000                 return true;
1001         }
1002 
1003         return false;
1004 }
1005 
1006 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
1007 {
1008         struct io_cb_cancel_data *match = data;
1009 
1010         /*
1011          * Hold the lock to avoid ->cur_work going out of scope, caller
1012          * may dereference the passed in work.
1013          */
1014         raw_spin_lock(&worker->lock);
1015         if (__io_wq_worker_cancel(worker, match, worker->cur_work))
1016                 match->nr_running++;
1017         raw_spin_unlock(&worker->lock);
1018 
1019         return match->nr_running && !match->cancel_all;
1020 }
1021 
1022 static inline void io_wq_remove_pending(struct io_wq *wq,
1023                                          struct io_wq_work *work,
1024                                          struct io_wq_work_node *prev)
1025 {
1026         struct io_wq_acct *acct = io_work_get_acct(wq, work);
1027         unsigned int hash = io_get_work_hash(work);
1028         struct io_wq_work *prev_work = NULL;
1029 
1030         if (io_wq_is_hashed(work) && work == wq->hash_tail[hash]) {
1031                 if (prev)
1032                         prev_work = container_of(prev, struct io_wq_work, list);
1033                 if (prev_work && io_get_work_hash(prev_work) == hash)
1034                         wq->hash_tail[hash] = prev_work;
1035                 else
1036                         wq->hash_tail[hash] = NULL;
1037         }
1038         wq_list_del(&acct->work_list, &work->list, prev);
1039 }
1040 
1041 static bool io_acct_cancel_pending_work(struct io_wq *wq,
1042                                         struct io_wq_acct *acct,
1043                                         struct io_cb_cancel_data *match)
1044 {
1045         struct io_wq_work_node *node, *prev;
1046         struct io_wq_work *work;
1047 
1048         raw_spin_lock(&acct->lock);
1049         wq_list_for_each(node, prev, &acct->work_list) {
1050                 work = container_of(node, struct io_wq_work, list);
1051                 if (!match->fn(work, match->data))
1052                         continue;
1053                 io_wq_remove_pending(wq, work, prev);
1054                 raw_spin_unlock(&acct->lock);
1055                 io_run_cancel(work, wq);
1056                 match->nr_pending++;
1057                 /* not safe to continue after unlock */
1058                 return true;
1059         }
1060         raw_spin_unlock(&acct->lock);
1061 
1062         return false;
1063 }
1064 
1065 static void io_wq_cancel_pending_work(struct io_wq *wq,
1066                                       struct io_cb_cancel_data *match)
1067 {
1068         int i;
1069 retry:
1070         for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1071                 struct io_wq_acct *acct = io_get_acct(wq, i == 0);
1072 
1073                 if (io_acct_cancel_pending_work(wq, acct, match)) {
1074                         if (match->cancel_all)
1075                                 goto retry;
1076                         break;
1077                 }
1078         }
1079 }
1080 
1081 static void io_wq_cancel_running_work(struct io_wq *wq,
1082                                        struct io_cb_cancel_data *match)
1083 {
1084         rcu_read_lock();
1085         io_wq_for_each_worker(wq, io_wq_worker_cancel, match);
1086         rcu_read_unlock();
1087 }
1088 
1089 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
1090                                   void *data, bool cancel_all)
1091 {
1092         struct io_cb_cancel_data match = {
1093                 .fn             = cancel,
1094                 .data           = data,
1095                 .cancel_all     = cancel_all,
1096         };
1097 
1098         /*
1099          * First check pending list, if we're lucky we can just remove it
1100          * from there. CANCEL_OK means that the work is returned as-new,
1101          * no completion will be posted for it.
1102          *
1103          * Then check if a free (going busy) or busy worker has the work
1104          * currently running. If we find it there, we'll return CANCEL_RUNNING
1105          * as an indication that we attempt to signal cancellation. The
1106          * completion will run normally in this case.
1107          *
1108          * Do both of these while holding the wq->lock, to ensure that
1109          * we'll find a work item regardless of state.
1110          */
1111         io_wq_cancel_pending_work(wq, &match);
1112         if (match.nr_pending && !match.cancel_all)
1113                 return IO_WQ_CANCEL_OK;
1114 
1115         raw_spin_lock(&wq->lock);
1116         io_wq_cancel_running_work(wq, &match);
1117         raw_spin_unlock(&wq->lock);
1118         if (match.nr_running && !match.cancel_all)
1119                 return IO_WQ_CANCEL_RUNNING;
1120 
1121         if (match.nr_running)
1122                 return IO_WQ_CANCEL_RUNNING;
1123         if (match.nr_pending)
1124                 return IO_WQ_CANCEL_OK;
1125         return IO_WQ_CANCEL_NOTFOUND;
1126 }
1127 
1128 static int io_wq_hash_wake(struct wait_queue_entry *wait, unsigned mode,
1129                             int sync, void *key)
1130 {
1131         struct io_wq *wq = container_of(wait, struct io_wq, wait);
1132         int i;
1133 
1134         list_del_init(&wait->entry);
1135 
1136         rcu_read_lock();
1137         for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1138                 struct io_wq_acct *acct = &wq->acct[i];
1139 
1140                 if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
1141                         io_wq_activate_free_worker(wq, acct);
1142         }
1143         rcu_read_unlock();
1144         return 1;
1145 }
1146 
1147 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
1148 {
1149         int ret, i;
1150         struct io_wq *wq;
1151 
1152         if (WARN_ON_ONCE(!data->free_work || !data->do_work))
1153                 return ERR_PTR(-EINVAL);
1154         if (WARN_ON_ONCE(!bounded))
1155                 return ERR_PTR(-EINVAL);
1156 
1157         wq = kzalloc(sizeof(struct io_wq), GFP_KERNEL);
1158         if (!wq)
1159                 return ERR_PTR(-ENOMEM);
1160 
1161         refcount_inc(&data->hash->refs);
1162         wq->hash = data->hash;
1163         wq->free_work = data->free_work;
1164         wq->do_work = data->do_work;
1165 
1166         ret = -ENOMEM;
1167 
1168         if (!alloc_cpumask_var(&wq->cpu_mask, GFP_KERNEL))
1169                 goto err;
1170         cpumask_copy(wq->cpu_mask, cpu_possible_mask);
1171         wq->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
1172         wq->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1173                                 task_rlimit(current, RLIMIT_NPROC);
1174         INIT_LIST_HEAD(&wq->wait.entry);
1175         wq->wait.func = io_wq_hash_wake;
1176         for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1177                 struct io_wq_acct *acct = &wq->acct[i];
1178 
1179                 acct->index = i;
1180                 atomic_set(&acct->nr_running, 0);
1181                 INIT_WQ_LIST(&acct->work_list);
1182                 raw_spin_lock_init(&acct->lock);
1183         }
1184 
1185         raw_spin_lock_init(&wq->lock);
1186         INIT_HLIST_NULLS_HEAD(&wq->free_list, 0);
1187         INIT_LIST_HEAD(&wq->all_list);
1188 
1189         wq->task = get_task_struct(data->task);
1190         atomic_set(&wq->worker_refs, 1);
1191         init_completion(&wq->worker_done);
1192         ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1193         if (ret)
1194                 goto err;
1195 
1196         return wq;
1197 err:
1198         io_wq_put_hash(data->hash);
1199         free_cpumask_var(wq->cpu_mask);
1200         kfree(wq);
1201         return ERR_PTR(ret);
1202 }
1203 
1204 static bool io_task_work_match(struct callback_head *cb, void *data)
1205 {
1206         struct io_worker *worker;
1207 
1208         if (cb->func != create_worker_cb && cb->func != create_worker_cont)
1209                 return false;
1210         worker = container_of(cb, struct io_worker, create_work);
1211         return worker->wq == data;
1212 }
1213 
1214 void io_wq_exit_start(struct io_wq *wq)
1215 {
1216         set_bit(IO_WQ_BIT_EXIT, &wq->state);
1217 }
1218 
1219 static void io_wq_cancel_tw_create(struct io_wq *wq)
1220 {
1221         struct callback_head *cb;
1222 
1223         while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
1224                 struct io_worker *worker;
1225 
1226                 worker = container_of(cb, struct io_worker, create_work);
1227                 io_worker_cancel_cb(worker);
1228                 /*
1229                  * Only the worker continuation helper has worker allocated and
1230                  * hence needs freeing.
1231                  */
1232                 if (cb->func == create_worker_cont)
1233                         kfree(worker);
1234         }
1235 }
1236 
1237 static void io_wq_exit_workers(struct io_wq *wq)
1238 {
1239         if (!wq->task)
1240                 return;
1241 
1242         io_wq_cancel_tw_create(wq);
1243 
1244         rcu_read_lock();
1245         io_wq_for_each_worker(wq, io_wq_worker_wake, NULL);
1246         rcu_read_unlock();
1247         io_worker_ref_put(wq);
1248         wait_for_completion(&wq->worker_done);
1249 
1250         spin_lock_irq(&wq->hash->wait.lock);
1251         list_del_init(&wq->wait.entry);
1252         spin_unlock_irq(&wq->hash->wait.lock);
1253 
1254         put_task_struct(wq->task);
1255         wq->task = NULL;
1256 }
1257 
1258 static void io_wq_destroy(struct io_wq *wq)
1259 {
1260         struct io_cb_cancel_data match = {
1261                 .fn             = io_wq_work_match_all,
1262                 .cancel_all     = true,
1263         };
1264 
1265         cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1266         io_wq_cancel_pending_work(wq, &match);
1267         free_cpumask_var(wq->cpu_mask);
1268         io_wq_put_hash(wq->hash);
1269         kfree(wq);
1270 }
1271 
1272 void io_wq_put_and_exit(struct io_wq *wq)
1273 {
1274         WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
1275 
1276         io_wq_exit_workers(wq);
1277         io_wq_destroy(wq);
1278 }
1279 
1280 struct online_data {
1281         unsigned int cpu;
1282         bool online;
1283 };
1284 
1285 static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1286 {
1287         struct online_data *od = data;
1288 
1289         if (od->online)
1290                 cpumask_set_cpu(od->cpu, worker->wq->cpu_mask);
1291         else
1292                 cpumask_clear_cpu(od->cpu, worker->wq->cpu_mask);
1293         return false;
1294 }
1295 
1296 static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
1297 {
1298         struct online_data od = {
1299                 .cpu = cpu,
1300                 .online = online
1301         };
1302 
1303         rcu_read_lock();
1304         io_wq_for_each_worker(wq, io_wq_worker_affinity, &od);
1305         rcu_read_unlock();
1306         return 0;
1307 }
1308 
1309 static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1310 {
1311         struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1312 
1313         return __io_wq_cpu_online(wq, cpu, true);
1314 }
1315 
1316 static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
1317 {
1318         struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1319 
1320         return __io_wq_cpu_online(wq, cpu, false);
1321 }
1322 
1323 int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
1324 {
1325         if (!tctx || !tctx->io_wq)
1326                 return -EINVAL;
1327 
1328         rcu_read_lock();
1329         if (mask)
1330                 cpumask_copy(tctx->io_wq->cpu_mask, mask);
1331         else
1332                 cpumask_copy(tctx->io_wq->cpu_mask, cpu_possible_mask);
1333         rcu_read_unlock();
1334 
1335         return 0;
1336 }
1337 
1338 /*
1339  * Set max number of unbounded workers, returns old value. If new_count is 0,
1340  * then just return the old value.
1341  */
1342 int io_wq_max_workers(struct io_wq *wq, int *new_count)
1343 {
1344         struct io_wq_acct *acct;
1345         int prev[IO_WQ_ACCT_NR];
1346         int i;
1347 
1348         BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND   != (int) IO_WQ_BOUND);
1349         BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
1350         BUILD_BUG_ON((int) IO_WQ_ACCT_NR      != 2);
1351 
1352         for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1353                 if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
1354                         new_count[i] = task_rlimit(current, RLIMIT_NPROC);
1355         }
1356 
1357         for (i = 0; i < IO_WQ_ACCT_NR; i++)
1358                 prev[i] = 0;
1359 
1360         rcu_read_lock();
1361 
1362         raw_spin_lock(&wq->lock);
1363         for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1364                 acct = &wq->acct[i];
1365                 prev[i] = max_t(int, acct->max_workers, prev[i]);
1366                 if (new_count[i])
1367                         acct->max_workers = new_count[i];
1368         }
1369         raw_spin_unlock(&wq->lock);
1370         rcu_read_unlock();
1371 
1372         for (i = 0; i < IO_WQ_ACCT_NR; i++)
1373                 new_count[i] = prev[i];
1374 
1375         return 0;
1376 }
1377 
1378 static __init int io_wq_init(void)
1379 {
1380         int ret;
1381 
1382         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1383                                         io_wq_cpu_online, io_wq_cpu_offline);
1384         if (ret < 0)
1385                 return ret;
1386         io_wq_online = ret;
1387         return 0;
1388 }
1389 subsys_initcall(io_wq_init);
1390 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php