~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-mq-tag.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Tag allocation using scalable bitmaps. Uses active queue tracking to support
  4  * fairer distribution of tags between multiple submitters when a shared tag map
  5  * is used.
  6  *
  7  * Copyright (C) 2013-2014 Jens Axboe
  8  */
  9 #include <linux/kernel.h>
 10 #include <linux/module.h>
 11 
 12 #include <linux/delay.h>
 13 #include "blk.h"
 14 #include "blk-mq.h"
 15 #include "blk-mq-sched.h"
 16 
 17 /*
 18  * Recalculate wakeup batch when tag is shared by hctx.
 19  */
 20 static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
 21                 unsigned int users)
 22 {
 23         if (!users)
 24                 return;
 25 
 26         sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags,
 27                         users);
 28         sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags,
 29                         users);
 30 }
 31 
 32 /*
 33  * If a previously inactive queue goes active, bump the active user count.
 34  * We need to do this before try to allocate driver tag, then even if fail
 35  * to get tag when first time, the other shared-tag users could reserve
 36  * budget for it.
 37  */
 38 void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 39 {
 40         unsigned int users;
 41         unsigned long flags;
 42         struct blk_mq_tags *tags = hctx->tags;
 43 
 44         /*
 45          * calling test_bit() prior to test_and_set_bit() is intentional,
 46          * it avoids dirtying the cacheline if the queue is already active.
 47          */
 48         if (blk_mq_is_shared_tags(hctx->flags)) {
 49                 struct request_queue *q = hctx->queue;
 50 
 51                 if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
 52                     test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
 53                         return;
 54         } else {
 55                 if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
 56                     test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 57                         return;
 58         }
 59 
 60         spin_lock_irqsave(&tags->lock, flags);
 61         users = tags->active_queues + 1;
 62         WRITE_ONCE(tags->active_queues, users);
 63         blk_mq_update_wake_batch(tags, users);
 64         spin_unlock_irqrestore(&tags->lock, flags);
 65 }
 66 
 67 /*
 68  * Wakeup all potentially sleeping on tags
 69  */
 70 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 71 {
 72         sbitmap_queue_wake_all(&tags->bitmap_tags);
 73         if (include_reserve)
 74                 sbitmap_queue_wake_all(&tags->breserved_tags);
 75 }
 76 
 77 /*
 78  * If a previously busy queue goes inactive, potential waiters could now
 79  * be allowed to queue. Wake them up and check.
 80  */
 81 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 82 {
 83         struct blk_mq_tags *tags = hctx->tags;
 84         unsigned int users;
 85 
 86         if (blk_mq_is_shared_tags(hctx->flags)) {
 87                 struct request_queue *q = hctx->queue;
 88 
 89                 if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
 90                                         &q->queue_flags))
 91                         return;
 92         } else {
 93                 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 94                         return;
 95         }
 96 
 97         spin_lock_irq(&tags->lock);
 98         users = tags->active_queues - 1;
 99         WRITE_ONCE(tags->active_queues, users);
100         blk_mq_update_wake_batch(tags, users);
101         spin_unlock_irq(&tags->lock);
102 
103         blk_mq_tag_wakeup_all(tags, false);
104 }
105 
106 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
107                             struct sbitmap_queue *bt)
108 {
109         if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
110                         !hctx_may_queue(data->hctx, bt))
111                 return BLK_MQ_NO_TAG;
112 
113         if (data->shallow_depth)
114                 return sbitmap_queue_get_shallow(bt, data->shallow_depth);
115         else
116                 return __sbitmap_queue_get(bt);
117 }
118 
119 unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
120                               unsigned int *offset)
121 {
122         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
123         struct sbitmap_queue *bt = &tags->bitmap_tags;
124         unsigned long ret;
125 
126         if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED ||
127             data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
128                 return 0;
129         ret = __sbitmap_queue_get_batch(bt, nr_tags, offset);
130         *offset += tags->nr_reserved_tags;
131         return ret;
132 }
133 
134 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
135 {
136         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
137         struct sbitmap_queue *bt;
138         struct sbq_wait_state *ws;
139         DEFINE_SBQ_WAIT(wait);
140         unsigned int tag_offset;
141         int tag;
142 
143         if (data->flags & BLK_MQ_REQ_RESERVED) {
144                 if (unlikely(!tags->nr_reserved_tags)) {
145                         WARN_ON_ONCE(1);
146                         return BLK_MQ_NO_TAG;
147                 }
148                 bt = &tags->breserved_tags;
149                 tag_offset = 0;
150         } else {
151                 bt = &tags->bitmap_tags;
152                 tag_offset = tags->nr_reserved_tags;
153         }
154 
155         tag = __blk_mq_get_tag(data, bt);
156         if (tag != BLK_MQ_NO_TAG)
157                 goto found_tag;
158 
159         if (data->flags & BLK_MQ_REQ_NOWAIT)
160                 return BLK_MQ_NO_TAG;
161 
162         ws = bt_wait_ptr(bt, data->hctx);
163         do {
164                 struct sbitmap_queue *bt_prev;
165 
166                 /*
167                  * We're out of tags on this hardware queue, kick any
168                  * pending IO submits before going to sleep waiting for
169                  * some to complete.
170                  */
171                 blk_mq_run_hw_queue(data->hctx, false);
172 
173                 /*
174                  * Retry tag allocation after running the hardware queue,
175                  * as running the queue may also have found completions.
176                  */
177                 tag = __blk_mq_get_tag(data, bt);
178                 if (tag != BLK_MQ_NO_TAG)
179                         break;
180 
181                 sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
182 
183                 tag = __blk_mq_get_tag(data, bt);
184                 if (tag != BLK_MQ_NO_TAG)
185                         break;
186 
187                 bt_prev = bt;
188                 io_schedule();
189 
190                 sbitmap_finish_wait(bt, ws, &wait);
191 
192                 data->ctx = blk_mq_get_ctx(data->q);
193                 data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
194                                                 data->ctx);
195                 tags = blk_mq_tags_from_data(data);
196                 if (data->flags & BLK_MQ_REQ_RESERVED)
197                         bt = &tags->breserved_tags;
198                 else
199                         bt = &tags->bitmap_tags;
200 
201                 /*
202                  * If destination hw queue is changed, fake wake up on
203                  * previous queue for compensating the wake up miss, so
204                  * other allocations on previous queue won't be starved.
205                  */
206                 if (bt != bt_prev)
207                         sbitmap_queue_wake_up(bt_prev, 1);
208 
209                 ws = bt_wait_ptr(bt, data->hctx);
210         } while (1);
211 
212         sbitmap_finish_wait(bt, ws, &wait);
213 
214 found_tag:
215         /*
216          * Give up this allocation if the hctx is inactive.  The caller will
217          * retry on an active hctx.
218          */
219         if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
220                 blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
221                 return BLK_MQ_NO_TAG;
222         }
223         return tag + tag_offset;
224 }
225 
226 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
227                     unsigned int tag)
228 {
229         if (!blk_mq_tag_is_reserved(tags, tag)) {
230                 const int real_tag = tag - tags->nr_reserved_tags;
231 
232                 BUG_ON(real_tag >= tags->nr_tags);
233                 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
234         } else {
235                 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
236         }
237 }
238 
239 void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
240 {
241         sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags,
242                                         tag_array, nr_tags);
243 }
244 
245 struct bt_iter_data {
246         struct blk_mq_hw_ctx *hctx;
247         struct request_queue *q;
248         busy_tag_iter_fn *fn;
249         void *data;
250         bool reserved;
251 };
252 
253 static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
254                 unsigned int bitnr)
255 {
256         struct request *rq;
257         unsigned long flags;
258 
259         spin_lock_irqsave(&tags->lock, flags);
260         rq = tags->rqs[bitnr];
261         if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
262                 rq = NULL;
263         spin_unlock_irqrestore(&tags->lock, flags);
264         return rq;
265 }
266 
267 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
268 {
269         struct bt_iter_data *iter_data = data;
270         struct blk_mq_hw_ctx *hctx = iter_data->hctx;
271         struct request_queue *q = iter_data->q;
272         struct blk_mq_tag_set *set = q->tag_set;
273         struct blk_mq_tags *tags;
274         struct request *rq;
275         bool ret = true;
276 
277         if (blk_mq_is_shared_tags(set->flags))
278                 tags = set->shared_tags;
279         else
280                 tags = hctx->tags;
281 
282         if (!iter_data->reserved)
283                 bitnr += tags->nr_reserved_tags;
284         /*
285          * We can hit rq == NULL here, because the tagging functions
286          * test and set the bit before assigning ->rqs[].
287          */
288         rq = blk_mq_find_and_get_req(tags, bitnr);
289         if (!rq)
290                 return true;
291 
292         if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
293                 ret = iter_data->fn(rq, iter_data->data);
294         blk_mq_put_rq_ref(rq);
295         return ret;
296 }
297 
298 /**
299  * bt_for_each - iterate over the requests associated with a hardware queue
300  * @hctx:       Hardware queue to examine.
301  * @q:          Request queue to examine.
302  * @bt:         sbitmap to examine. This is either the breserved_tags member
303  *              or the bitmap_tags member of struct blk_mq_tags.
304  * @fn:         Pointer to the function that will be called for each request
305  *              associated with @hctx that has been assigned a driver tag.
306  *              @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
307  *              where rq is a pointer to a request. Return true to continue
308  *              iterating tags, false to stop.
309  * @data:       Will be passed as third argument to @fn.
310  * @reserved:   Indicates whether @bt is the breserved_tags member or the
311  *              bitmap_tags member of struct blk_mq_tags.
312  */
313 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
314                         struct sbitmap_queue *bt, busy_tag_iter_fn *fn,
315                         void *data, bool reserved)
316 {
317         struct bt_iter_data iter_data = {
318                 .hctx = hctx,
319                 .fn = fn,
320                 .data = data,
321                 .reserved = reserved,
322                 .q = q,
323         };
324 
325         sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
326 }
327 
328 struct bt_tags_iter_data {
329         struct blk_mq_tags *tags;
330         busy_tag_iter_fn *fn;
331         void *data;
332         unsigned int flags;
333 };
334 
335 #define BT_TAG_ITER_RESERVED            (1 << 0)
336 #define BT_TAG_ITER_STARTED             (1 << 1)
337 #define BT_TAG_ITER_STATIC_RQS          (1 << 2)
338 
339 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
340 {
341         struct bt_tags_iter_data *iter_data = data;
342         struct blk_mq_tags *tags = iter_data->tags;
343         struct request *rq;
344         bool ret = true;
345         bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
346 
347         if (!(iter_data->flags & BT_TAG_ITER_RESERVED))
348                 bitnr += tags->nr_reserved_tags;
349 
350         /*
351          * We can hit rq == NULL here, because the tagging functions
352          * test and set the bit before assigning ->rqs[].
353          */
354         if (iter_static_rqs)
355                 rq = tags->static_rqs[bitnr];
356         else
357                 rq = blk_mq_find_and_get_req(tags, bitnr);
358         if (!rq)
359                 return true;
360 
361         if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
362             blk_mq_request_started(rq))
363                 ret = iter_data->fn(rq, iter_data->data);
364         if (!iter_static_rqs)
365                 blk_mq_put_rq_ref(rq);
366         return ret;
367 }
368 
369 /**
370  * bt_tags_for_each - iterate over the requests in a tag map
371  * @tags:       Tag map to iterate over.
372  * @bt:         sbitmap to examine. This is either the breserved_tags member
373  *              or the bitmap_tags member of struct blk_mq_tags.
374  * @fn:         Pointer to the function that will be called for each started
375  *              request. @fn will be called as follows: @fn(rq, @data,
376  *              @reserved) where rq is a pointer to a request. Return true
377  *              to continue iterating tags, false to stop.
378  * @data:       Will be passed as second argument to @fn.
379  * @flags:      BT_TAG_ITER_*
380  */
381 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
382                              busy_tag_iter_fn *fn, void *data, unsigned int flags)
383 {
384         struct bt_tags_iter_data iter_data = {
385                 .tags = tags,
386                 .fn = fn,
387                 .data = data,
388                 .flags = flags,
389         };
390 
391         if (tags->rqs)
392                 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
393 }
394 
395 static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
396                 busy_tag_iter_fn *fn, void *priv, unsigned int flags)
397 {
398         WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
399 
400         if (tags->nr_reserved_tags)
401                 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
402                                  flags | BT_TAG_ITER_RESERVED);
403         bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
404 }
405 
406 /**
407  * blk_mq_all_tag_iter - iterate over all requests in a tag map
408  * @tags:       Tag map to iterate over.
409  * @fn:         Pointer to the function that will be called for each
410  *              request. @fn will be called as follows: @fn(rq, @priv,
411  *              reserved) where rq is a pointer to a request. 'reserved'
412  *              indicates whether or not @rq is a reserved request. Return
413  *              true to continue iterating tags, false to stop.
414  * @priv:       Will be passed as second argument to @fn.
415  *
416  * Caller has to pass the tag map from which requests are allocated.
417  */
418 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
419                 void *priv)
420 {
421         __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
422 }
423 
424 /**
425  * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
426  * @tagset:     Tag set to iterate over.
427  * @fn:         Pointer to the function that will be called for each started
428  *              request. @fn will be called as follows: @fn(rq, @priv,
429  *              reserved) where rq is a pointer to a request. 'reserved'
430  *              indicates whether or not @rq is a reserved request. Return
431  *              true to continue iterating tags, false to stop.
432  * @priv:       Will be passed as second argument to @fn.
433  *
434  * We grab one request reference before calling @fn and release it after
435  * @fn returns.
436  */
437 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
438                 busy_tag_iter_fn *fn, void *priv)
439 {
440         unsigned int flags = tagset->flags;
441         int i, nr_tags;
442 
443         nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues;
444 
445         for (i = 0; i < nr_tags; i++) {
446                 if (tagset->tags && tagset->tags[i])
447                         __blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
448                                               BT_TAG_ITER_STARTED);
449         }
450 }
451 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
452 
453 static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data)
454 {
455         unsigned *count = data;
456 
457         if (blk_mq_request_completed(rq))
458                 (*count)++;
459         return true;
460 }
461 
462 /**
463  * blk_mq_tagset_wait_completed_request - Wait until all scheduled request
464  * completions have finished.
465  * @tagset:     Tag set to drain completed request
466  *
467  * Note: This function has to be run after all IO queues are shutdown
468  */
469 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
470 {
471         while (true) {
472                 unsigned count = 0;
473 
474                 blk_mq_tagset_busy_iter(tagset,
475                                 blk_mq_tagset_count_completed_rqs, &count);
476                 if (!count)
477                         break;
478                 msleep(5);
479         }
480 }
481 EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
482 
483 /**
484  * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
485  * @q:          Request queue to examine.
486  * @fn:         Pointer to the function that will be called for each request
487  *              on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
488  *              reserved) where rq is a pointer to a request and hctx points
489  *              to the hardware queue associated with the request. 'reserved'
490  *              indicates whether or not @rq is a reserved request.
491  * @priv:       Will be passed as third argument to @fn.
492  *
493  * Note: if @q->tag_set is shared with other request queues then @fn will be
494  * called for all requests on all queues that share that tag set and not only
495  * for requests associated with @q.
496  */
497 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
498                 void *priv)
499 {
500         /*
501          * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table
502          * while the queue is frozen. So we can use q_usage_counter to avoid
503          * racing with it.
504          */
505         if (!percpu_ref_tryget(&q->q_usage_counter))
506                 return;
507 
508         if (blk_mq_is_shared_tags(q->tag_set->flags)) {
509                 struct blk_mq_tags *tags = q->tag_set->shared_tags;
510                 struct sbitmap_queue *bresv = &tags->breserved_tags;
511                 struct sbitmap_queue *btags = &tags->bitmap_tags;
512 
513                 if (tags->nr_reserved_tags)
514                         bt_for_each(NULL, q, bresv, fn, priv, true);
515                 bt_for_each(NULL, q, btags, fn, priv, false);
516         } else {
517                 struct blk_mq_hw_ctx *hctx;
518                 unsigned long i;
519 
520                 queue_for_each_hw_ctx(q, hctx, i) {
521                         struct blk_mq_tags *tags = hctx->tags;
522                         struct sbitmap_queue *bresv = &tags->breserved_tags;
523                         struct sbitmap_queue *btags = &tags->bitmap_tags;
524 
525                         /*
526                          * If no software queues are currently mapped to this
527                          * hardware queue, there's nothing to check
528                          */
529                         if (!blk_mq_hw_queue_mapped(hctx))
530                                 continue;
531 
532                         if (tags->nr_reserved_tags)
533                                 bt_for_each(hctx, q, bresv, fn, priv, true);
534                         bt_for_each(hctx, q, btags, fn, priv, false);
535                 }
536         }
537         blk_queue_exit(q);
538 }
539 
540 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
541                     bool round_robin, int node)
542 {
543         return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
544                                        node);
545 }
546 
547 int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
548                         struct sbitmap_queue *breserved_tags,
549                         unsigned int queue_depth, unsigned int reserved,
550                         int node, int alloc_policy)
551 {
552         unsigned int depth = queue_depth - reserved;
553         bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
554 
555         if (bt_alloc(bitmap_tags, depth, round_robin, node))
556                 return -ENOMEM;
557         if (bt_alloc(breserved_tags, reserved, round_robin, node))
558                 goto free_bitmap_tags;
559 
560         return 0;
561 
562 free_bitmap_tags:
563         sbitmap_queue_free(bitmap_tags);
564         return -ENOMEM;
565 }
566 
567 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
568                                      unsigned int reserved_tags,
569                                      int node, int alloc_policy)
570 {
571         struct blk_mq_tags *tags;
572 
573         if (total_tags > BLK_MQ_TAG_MAX) {
574                 pr_err("blk-mq: tag depth too large\n");
575                 return NULL;
576         }
577 
578         tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
579         if (!tags)
580                 return NULL;
581 
582         tags->nr_tags = total_tags;
583         tags->nr_reserved_tags = reserved_tags;
584         spin_lock_init(&tags->lock);
585 
586         if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
587                                 total_tags, reserved_tags, node,
588                                 alloc_policy) < 0) {
589                 kfree(tags);
590                 return NULL;
591         }
592         return tags;
593 }
594 
595 void blk_mq_free_tags(struct blk_mq_tags *tags)
596 {
597         sbitmap_queue_free(&tags->bitmap_tags);
598         sbitmap_queue_free(&tags->breserved_tags);
599         kfree(tags);
600 }
601 
602 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
603                             struct blk_mq_tags **tagsptr, unsigned int tdepth,
604                             bool can_grow)
605 {
606         struct blk_mq_tags *tags = *tagsptr;
607 
608         if (tdepth <= tags->nr_reserved_tags)
609                 return -EINVAL;
610 
611         /*
612          * If we are allowed to grow beyond the original size, allocate
613          * a new set of tags before freeing the old one.
614          */
615         if (tdepth > tags->nr_tags) {
616                 struct blk_mq_tag_set *set = hctx->queue->tag_set;
617                 struct blk_mq_tags *new;
618 
619                 if (!can_grow)
620                         return -EINVAL;
621 
622                 /*
623                  * We need some sort of upper limit, set it high enough that
624                  * no valid use cases should require more.
625                  */
626                 if (tdepth > MAX_SCHED_RQ)
627                         return -EINVAL;
628 
629                 /*
630                  * Only the sbitmap needs resizing since we allocated the max
631                  * initially.
632                  */
633                 if (blk_mq_is_shared_tags(set->flags))
634                         return 0;
635 
636                 new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
637                 if (!new)
638                         return -ENOMEM;
639 
640                 blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
641                 *tagsptr = new;
642         } else {
643                 /*
644                  * Don't need (or can't) update reserved tags here, they
645                  * remain static and should never need resizing.
646                  */
647                 sbitmap_queue_resize(&tags->bitmap_tags,
648                                 tdepth - tags->nr_reserved_tags);
649         }
650 
651         return 0;
652 }
653 
654 void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size)
655 {
656         struct blk_mq_tags *tags = set->shared_tags;
657 
658         sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
659 }
660 
661 void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
662 {
663         sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
664                              q->nr_requests - q->tag_set->reserved_tags);
665 }
666 
667 /**
668  * blk_mq_unique_tag() - return a tag that is unique queue-wide
669  * @rq: request for which to compute a unique tag
670  *
671  * The tag field in struct request is unique per hardware queue but not over
672  * all hardware queues. Hence this function that returns a tag with the
673  * hardware context index in the upper bits and the per hardware queue tag in
674  * the lower bits.
675  *
676  * Note: When called for a request that is queued on a non-multiqueue request
677  * queue, the hardware context index is set to zero.
678  */
679 u32 blk_mq_unique_tag(struct request *rq)
680 {
681         return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
682                 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
683 }
684 EXPORT_SYMBOL(blk_mq_unique_tag);
685 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php