~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-mq.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef INT_BLK_MQ_H
  3 #define INT_BLK_MQ_H
  4 
  5 #include <linux/blk-mq.h>
  6 #include "blk-stat.h"
  7 
  8 struct blk_mq_tag_set;
  9 
 10 struct blk_mq_ctxs {
 11         struct kobject kobj;
 12         struct blk_mq_ctx __percpu      *queue_ctx;
 13 };
 14 
 15 /**
 16  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
 17  */
 18 struct blk_mq_ctx {
 19         struct {
 20                 spinlock_t              lock;
 21                 struct list_head        rq_lists[HCTX_MAX_TYPES];
 22         } ____cacheline_aligned_in_smp;
 23 
 24         unsigned int            cpu;
 25         unsigned short          index_hw[HCTX_MAX_TYPES];
 26         struct blk_mq_hw_ctx    *hctxs[HCTX_MAX_TYPES];
 27 
 28         struct request_queue    *queue;
 29         struct blk_mq_ctxs      *ctxs;
 30         struct kobject          kobj;
 31 } ____cacheline_aligned_in_smp;
 32 
 33 enum {
 34         BLK_MQ_NO_TAG           = -1U,
 35         BLK_MQ_TAG_MIN          = 1,
 36         BLK_MQ_TAG_MAX          = BLK_MQ_NO_TAG - 1,
 37 };
 38 
 39 #define BLK_MQ_CPU_WORK_BATCH   (8)
 40 
 41 typedef unsigned int __bitwise blk_insert_t;
 42 #define BLK_MQ_INSERT_AT_HEAD           ((__force blk_insert_t)0x01)
 43 
 44 void blk_mq_submit_bio(struct bio *bio);
 45 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
 46                 unsigned int flags);
 47 void blk_mq_exit_queue(struct request_queue *q);
 48 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 49 void blk_mq_wake_waiters(struct request_queue *q);
 50 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
 51                              unsigned int);
 52 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 53 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
 54                                         struct blk_mq_ctx *start);
 55 void blk_mq_put_rq_ref(struct request *rq);
 56 
 57 /*
 58  * Internal helpers for allocating/freeing the request map
 59  */
 60 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 61                      unsigned int hctx_idx);
 62 void blk_mq_free_rq_map(struct blk_mq_tags *tags);
 63 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
 64                                 unsigned int hctx_idx, unsigned int depth);
 65 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
 66                              struct blk_mq_tags *tags,
 67                              unsigned int hctx_idx);
 68 
 69 /*
 70  * CPU -> queue mappings
 71  */
 72 extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
 73 
 74 /*
 75  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
 76  * @q: request queue
 77  * @type: the hctx type index
 78  * @cpu: CPU
 79  */
 80 static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
 81                                                           enum hctx_type type,
 82                                                           unsigned int cpu)
 83 {
 84         return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
 85 }
 86 
 87 static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
 88 {
 89         enum hctx_type type = HCTX_TYPE_DEFAULT;
 90 
 91         /*
 92          * The caller ensure that if REQ_POLLED, poll must be enabled.
 93          */
 94         if (opf & REQ_POLLED)
 95                 type = HCTX_TYPE_POLL;
 96         else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
 97                 type = HCTX_TYPE_READ;
 98         return type;
 99 }
100 
101 /*
102  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
103  * @q: request queue
104  * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
105  * @ctx: software queue cpu ctx
106  */
107 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
108                                                      blk_opf_t opf,
109                                                      struct blk_mq_ctx *ctx)
110 {
111         return ctx->hctxs[blk_mq_get_hctx_type(opf)];
112 }
113 
114 /*
115  * sysfs helpers
116  */
117 extern void blk_mq_sysfs_init(struct request_queue *q);
118 extern void blk_mq_sysfs_deinit(struct request_queue *q);
119 int blk_mq_sysfs_register(struct gendisk *disk);
120 void blk_mq_sysfs_unregister(struct gendisk *disk);
121 int blk_mq_sysfs_register_hctxs(struct request_queue *q);
122 void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
123 extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
124 void blk_mq_free_plug_rqs(struct blk_plug *plug);
125 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
126 
127 void blk_mq_cancel_work_sync(struct request_queue *q);
128 
129 void blk_mq_release(struct request_queue *q);
130 
131 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
132                                            unsigned int cpu)
133 {
134         return per_cpu_ptr(q->queue_ctx, cpu);
135 }
136 
137 /*
138  * This assumes per-cpu software queueing queues. They could be per-node
139  * as well, for instance. For now this is hardcoded as-is. Note that we don't
140  * care about preemption, since we know the ctx's are persistent. This does
141  * mean that we can't rely on ctx always matching the currently running CPU.
142  */
143 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
144 {
145         return __blk_mq_get_ctx(q, raw_smp_processor_id());
146 }
147 
148 struct blk_mq_alloc_data {
149         /* input parameter */
150         struct request_queue *q;
151         blk_mq_req_flags_t flags;
152         unsigned int shallow_depth;
153         blk_opf_t cmd_flags;
154         req_flags_t rq_flags;
155 
156         /* allocate multiple requests/tags in one go */
157         unsigned int nr_tags;
158         struct request **cached_rq;
159 
160         /* input & output parameter */
161         struct blk_mq_ctx *ctx;
162         struct blk_mq_hw_ctx *hctx;
163 };
164 
165 struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
166                 unsigned int reserved_tags, int node, int alloc_policy);
167 void blk_mq_free_tags(struct blk_mq_tags *tags);
168 int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
169                 struct sbitmap_queue *breserved_tags, unsigned int queue_depth,
170                 unsigned int reserved, int node, int alloc_policy);
171 
172 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
173 unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
174                 unsigned int *offset);
175 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
176                 unsigned int tag);
177 void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
178 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
179                 struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
180 void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
181                 unsigned int size);
182 void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
183 
184 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
185 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
186                 void *priv);
187 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
188                 void *priv);
189 
190 static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
191                                                  struct blk_mq_hw_ctx *hctx)
192 {
193         if (!hctx)
194                 return &bt->ws[0];
195         return sbq_wait_ptr(bt, &hctx->wait_index);
196 }
197 
198 void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
199 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
200 
201 static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
202 {
203         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
204                 __blk_mq_tag_busy(hctx);
205 }
206 
207 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
208 {
209         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
210                 __blk_mq_tag_idle(hctx);
211 }
212 
213 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
214                                           unsigned int tag)
215 {
216         return tag < tags->nr_reserved_tags;
217 }
218 
219 static inline bool blk_mq_is_shared_tags(unsigned int flags)
220 {
221         return flags & BLK_MQ_F_TAG_HCTX_SHARED;
222 }
223 
224 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
225 {
226         if (data->rq_flags & RQF_SCHED_TAGS)
227                 return data->hctx->sched_tags;
228         return data->hctx->tags;
229 }
230 
231 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
232 {
233         return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
234 }
235 
236 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
237 {
238         return hctx->nr_ctx && hctx->tags;
239 }
240 
241 unsigned int blk_mq_in_flight(struct request_queue *q,
242                 struct block_device *part);
243 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
244                 unsigned int inflight[2]);
245 
246 static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
247                                               int budget_token)
248 {
249         if (q->mq_ops->put_budget)
250                 q->mq_ops->put_budget(q, budget_token);
251 }
252 
253 static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
254 {
255         if (q->mq_ops->get_budget)
256                 return q->mq_ops->get_budget(q);
257         return 0;
258 }
259 
260 static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
261 {
262         if (token < 0)
263                 return;
264 
265         if (rq->q->mq_ops->set_rq_budget_token)
266                 rq->q->mq_ops->set_rq_budget_token(rq, token);
267 }
268 
269 static inline int blk_mq_get_rq_budget_token(struct request *rq)
270 {
271         if (rq->q->mq_ops->get_rq_budget_token)
272                 return rq->q->mq_ops->get_rq_budget_token(rq);
273         return -1;
274 }
275 
276 static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
277                                                 int val)
278 {
279         if (blk_mq_is_shared_tags(hctx->flags))
280                 atomic_add(val, &hctx->queue->nr_active_requests_shared_tags);
281         else
282                 atomic_add(val, &hctx->nr_active);
283 }
284 
285 static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
286 {
287         __blk_mq_add_active_requests(hctx, 1);
288 }
289 
290 static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
291                 int val)
292 {
293         if (blk_mq_is_shared_tags(hctx->flags))
294                 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
295         else
296                 atomic_sub(val, &hctx->nr_active);
297 }
298 
299 static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
300 {
301         __blk_mq_sub_active_requests(hctx, 1);
302 }
303 
304 static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
305                                               int val)
306 {
307         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
308                 __blk_mq_add_active_requests(hctx, val);
309 }
310 
311 static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
312 {
313         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
314                 __blk_mq_inc_active_requests(hctx);
315 }
316 
317 static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
318                                               int val)
319 {
320         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
321                 __blk_mq_sub_active_requests(hctx, val);
322 }
323 
324 static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
325 {
326         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
327                 __blk_mq_dec_active_requests(hctx);
328 }
329 
330 static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
331 {
332         if (blk_mq_is_shared_tags(hctx->flags))
333                 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
334         return atomic_read(&hctx->nr_active);
335 }
336 static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
337                                            struct request *rq)
338 {
339         blk_mq_dec_active_requests(hctx);
340         blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
341         rq->tag = BLK_MQ_NO_TAG;
342 }
343 
344 static inline void blk_mq_put_driver_tag(struct request *rq)
345 {
346         if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
347                 return;
348 
349         __blk_mq_put_driver_tag(rq->mq_hctx, rq);
350 }
351 
352 bool __blk_mq_alloc_driver_tag(struct request *rq);
353 
354 static inline bool blk_mq_get_driver_tag(struct request *rq)
355 {
356         if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
357                 return false;
358 
359         return true;
360 }
361 
362 static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
363 {
364         int cpu;
365 
366         for_each_possible_cpu(cpu)
367                 qmap->mq_map[cpu] = 0;
368 }
369 
370 /* Free all requests on the list */
371 static inline void blk_mq_free_requests(struct list_head *list)
372 {
373         while (!list_empty(list)) {
374                 struct request *rq = list_entry_rq(list->next);
375 
376                 list_del_init(&rq->queuelist);
377                 blk_mq_free_request(rq);
378         }
379 }
380 
381 /*
382  * For shared tag users, we track the number of currently active users
383  * and attempt to provide a fair share of the tag depth for each of them.
384  */
385 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
386                                   struct sbitmap_queue *bt)
387 {
388         unsigned int depth, users;
389 
390         if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
391                 return true;
392 
393         /*
394          * Don't try dividing an ant
395          */
396         if (bt->sb.depth == 1)
397                 return true;
398 
399         if (blk_mq_is_shared_tags(hctx->flags)) {
400                 struct request_queue *q = hctx->queue;
401 
402                 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
403                         return true;
404         } else {
405                 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
406                         return true;
407         }
408 
409         users = READ_ONCE(hctx->tags->active_queues);
410         if (!users)
411                 return true;
412 
413         /*
414          * Allow at least some tags
415          */
416         depth = max((bt->sb.depth + users - 1) / users, 4U);
417         return __blk_mq_active_requests(hctx) < depth;
418 }
419 
420 /* run the code block in @dispatch_ops with rcu/srcu read lock held */
421 #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
422 do {                                                            \
423         if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) {          \
424                 struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
425                 int srcu_idx;                                   \
426                                                                 \
427                 might_sleep_if(check_sleep);                    \
428                 srcu_idx = srcu_read_lock(__tag_set->srcu);     \
429                 (dispatch_ops);                                 \
430                 srcu_read_unlock(__tag_set->srcu, srcu_idx);    \
431         } else {                                                \
432                 rcu_read_lock();                                \
433                 (dispatch_ops);                                 \
434                 rcu_read_unlock();                              \
435         }                                                       \
436 } while (0)
437 
438 #define blk_mq_run_dispatch_ops(q, dispatch_ops)                \
439         __blk_mq_run_dispatch_ops(q, true, dispatch_ops)        \
440 
441 #endif
442 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php