~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-settings.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Functions related to setting various queue properties from drivers
  4  */
  5 #include <linux/kernel.h>
  6 #include <linux/module.h>
  7 #include <linux/init.h>
  8 #include <linux/bio.h>
  9 #include <linux/blk-integrity.h>
 10 #include <linux/pagemap.h>
 11 #include <linux/backing-dev-defs.h>
 12 #include <linux/gcd.h>
 13 #include <linux/lcm.h>
 14 #include <linux/jiffies.h>
 15 #include <linux/gfp.h>
 16 #include <linux/dma-mapping.h>
 17 
 18 #include "blk.h"
 19 #include "blk-rq-qos.h"
 20 #include "blk-wbt.h"
 21 
 22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
 23 {
 24         q->rq_timeout = timeout;
 25 }
 26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
 27 
 28 /**
 29  * blk_set_stacking_limits - set default limits for stacking devices
 30  * @lim:  the queue_limits structure to reset
 31  *
 32  * Prepare queue limits for applying limits from underlying devices using
 33  * blk_stack_limits().
 34  */
 35 void blk_set_stacking_limits(struct queue_limits *lim)
 36 {
 37         memset(lim, 0, sizeof(*lim));
 38         lim->logical_block_size = SECTOR_SIZE;
 39         lim->physical_block_size = SECTOR_SIZE;
 40         lim->io_min = SECTOR_SIZE;
 41         lim->discard_granularity = SECTOR_SIZE;
 42         lim->dma_alignment = SECTOR_SIZE - 1;
 43         lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
 44 
 45         /* Inherit limits from component devices */
 46         lim->max_segments = USHRT_MAX;
 47         lim->max_discard_segments = USHRT_MAX;
 48         lim->max_hw_sectors = UINT_MAX;
 49         lim->max_segment_size = UINT_MAX;
 50         lim->max_sectors = UINT_MAX;
 51         lim->max_dev_sectors = UINT_MAX;
 52         lim->max_write_zeroes_sectors = UINT_MAX;
 53         lim->max_zone_append_sectors = UINT_MAX;
 54         lim->max_user_discard_sectors = UINT_MAX;
 55 }
 56 EXPORT_SYMBOL(blk_set_stacking_limits);
 57 
 58 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
 59                 struct queue_limits *lim)
 60 {
 61         /*
 62          * For read-ahead of large files to be effective, we need to read ahead
 63          * at least twice the optimal I/O size.
 64          */
 65         bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
 66         bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
 67 }
 68 
 69 static int blk_validate_zoned_limits(struct queue_limits *lim)
 70 {
 71         if (!(lim->features & BLK_FEAT_ZONED)) {
 72                 if (WARN_ON_ONCE(lim->max_open_zones) ||
 73                     WARN_ON_ONCE(lim->max_active_zones) ||
 74                     WARN_ON_ONCE(lim->zone_write_granularity) ||
 75                     WARN_ON_ONCE(lim->max_zone_append_sectors))
 76                         return -EINVAL;
 77                 return 0;
 78         }
 79 
 80         if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
 81                 return -EINVAL;
 82 
 83         /*
 84          * Given that active zones include open zones, the maximum number of
 85          * open zones cannot be larger than the maximum number of active zones.
 86          */
 87         if (lim->max_active_zones &&
 88             lim->max_open_zones > lim->max_active_zones)
 89                 return -EINVAL;
 90 
 91         if (lim->zone_write_granularity < lim->logical_block_size)
 92                 lim->zone_write_granularity = lim->logical_block_size;
 93 
 94         if (lim->max_zone_append_sectors) {
 95                 /*
 96                  * The Zone Append size is limited by the maximum I/O size
 97                  * and the zone size given that it can't span zones.
 98                  */
 99                 lim->max_zone_append_sectors =
100                         min3(lim->max_hw_sectors,
101                              lim->max_zone_append_sectors,
102                              lim->chunk_sectors);
103         }
104 
105         return 0;
106 }
107 
108 static int blk_validate_integrity_limits(struct queue_limits *lim)
109 {
110         struct blk_integrity *bi = &lim->integrity;
111 
112         if (!bi->tuple_size) {
113                 if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
114                     bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
115                         pr_warn("invalid PI settings.\n");
116                         return -EINVAL;
117                 }
118                 return 0;
119         }
120 
121         if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
122                 pr_warn("integrity support disabled.\n");
123                 return -EINVAL;
124         }
125 
126         if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
127             (bi->flags & BLK_INTEGRITY_REF_TAG)) {
128                 pr_warn("ref tag not support without checksum.\n");
129                 return -EINVAL;
130         }
131 
132         if (!bi->interval_exp)
133                 bi->interval_exp = ilog2(lim->logical_block_size);
134 
135         return 0;
136 }
137 
138 /*
139  * Returns max guaranteed bytes which we can fit in a bio.
140  *
141  * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
142  * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
143  * the first and last segments.
144  */
145 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
146 {
147         unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
148         unsigned int length;
149 
150         length = min(max_segments, 2) * lim->logical_block_size;
151         if (max_segments > 2)
152                 length += (max_segments - 2) * PAGE_SIZE;
153 
154         return length;
155 }
156 
157 static void blk_atomic_writes_update_limits(struct queue_limits *lim)
158 {
159         unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
160                                         blk_queue_max_guaranteed_bio(lim));
161 
162         unit_limit = rounddown_pow_of_two(unit_limit);
163 
164         lim->atomic_write_max_sectors =
165                 min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
166                         lim->max_hw_sectors);
167         lim->atomic_write_unit_min =
168                 min(lim->atomic_write_hw_unit_min, unit_limit);
169         lim->atomic_write_unit_max =
170                 min(lim->atomic_write_hw_unit_max, unit_limit);
171         lim->atomic_write_boundary_sectors =
172                 lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
173 }
174 
175 static void blk_validate_atomic_write_limits(struct queue_limits *lim)
176 {
177         unsigned int boundary_sectors;
178 
179         if (!lim->atomic_write_hw_max)
180                 goto unsupported;
181 
182         boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
183 
184         if (boundary_sectors) {
185                 /*
186                  * A feature of boundary support is that it disallows bios to
187                  * be merged which would result in a merged request which
188                  * crosses either a chunk sector or atomic write HW boundary,
189                  * even though chunk sectors may be just set for performance.
190                  * For simplicity, disallow atomic writes for a chunk sector
191                  * which is non-zero and smaller than atomic write HW boundary.
192                  * Furthermore, chunk sectors must be a multiple of atomic
193                  * write HW boundary. Otherwise boundary support becomes
194                  * complicated.
195                  * Devices which do not conform to these rules can be dealt
196                  * with if and when they show up.
197                  */
198                 if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
199                         goto unsupported;
200 
201                 /*
202                  * The boundary size just needs to be a multiple of unit_max
203                  * (and not necessarily a power-of-2), so this following check
204                  * could be relaxed in future.
205                  * Furthermore, if needed, unit_max could even be reduced so
206                  * that it is compliant with a !power-of-2 boundary.
207                  */
208                 if (!is_power_of_2(boundary_sectors))
209                         goto unsupported;
210         }
211 
212         blk_atomic_writes_update_limits(lim);
213         return;
214 
215 unsupported:
216         lim->atomic_write_max_sectors = 0;
217         lim->atomic_write_boundary_sectors = 0;
218         lim->atomic_write_unit_min = 0;
219         lim->atomic_write_unit_max = 0;
220 }
221 
222 /*
223  * Check that the limits in lim are valid, initialize defaults for unset
224  * values, and cap values based on others where needed.
225  */
226 static int blk_validate_limits(struct queue_limits *lim)
227 {
228         unsigned int max_hw_sectors;
229         unsigned int logical_block_sectors;
230         int err;
231 
232         /*
233          * Unless otherwise specified, default to 512 byte logical blocks and a
234          * physical block size equal to the logical block size.
235          */
236         if (!lim->logical_block_size)
237                 lim->logical_block_size = SECTOR_SIZE;
238         else if (blk_validate_block_size(lim->logical_block_size)) {
239                 pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
240                 return -EINVAL;
241         }
242         if (lim->physical_block_size < lim->logical_block_size)
243                 lim->physical_block_size = lim->logical_block_size;
244 
245         /*
246          * The minimum I/O size defaults to the physical block size unless
247          * explicitly overridden.
248          */
249         if (lim->io_min < lim->physical_block_size)
250                 lim->io_min = lim->physical_block_size;
251 
252         /*
253          * max_hw_sectors has a somewhat weird default for historical reason,
254          * but driver really should set their own instead of relying on this
255          * value.
256          *
257          * The block layer relies on the fact that every driver can
258          * handle at lest a page worth of data per I/O, and needs the value
259          * aligned to the logical block size.
260          */
261         if (!lim->max_hw_sectors)
262                 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
263         if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
264                 return -EINVAL;
265         logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
266         if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
267                 return -EINVAL;
268         lim->max_hw_sectors = round_down(lim->max_hw_sectors,
269                         logical_block_sectors);
270 
271         /*
272          * The actual max_sectors value is a complex beast and also takes the
273          * max_dev_sectors value (set by SCSI ULPs) and a user configurable
274          * value into account.  The ->max_sectors value is always calculated
275          * from these, so directly setting it won't have any effect.
276          */
277         max_hw_sectors = min_not_zero(lim->max_hw_sectors,
278                                 lim->max_dev_sectors);
279         if (lim->max_user_sectors) {
280                 if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
281                         return -EINVAL;
282                 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
283         } else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
284                 lim->max_sectors =
285                         min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
286         } else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
287                 lim->max_sectors =
288                         min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
289         } else {
290                 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
291         }
292         lim->max_sectors = round_down(lim->max_sectors,
293                         logical_block_sectors);
294 
295         /*
296          * Random default for the maximum number of segments.  Driver should not
297          * rely on this and set their own.
298          */
299         if (!lim->max_segments)
300                 lim->max_segments = BLK_MAX_SEGMENTS;
301 
302         lim->max_discard_sectors =
303                 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
304 
305         if (!lim->max_discard_segments)
306                 lim->max_discard_segments = 1;
307 
308         if (lim->discard_granularity < lim->physical_block_size)
309                 lim->discard_granularity = lim->physical_block_size;
310 
311         /*
312          * By default there is no limit on the segment boundary alignment,
313          * but if there is one it can't be smaller than the page size as
314          * that would break all the normal I/O patterns.
315          */
316         if (!lim->seg_boundary_mask)
317                 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
318         if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
319                 return -EINVAL;
320 
321         /*
322          * Stacking device may have both virtual boundary and max segment
323          * size limit, so allow this setting now, and long-term the two
324          * might need to move out of stacking limits since we have immutable
325          * bvec and lower layer bio splitting is supposed to handle the two
326          * correctly.
327          */
328         if (lim->virt_boundary_mask) {
329                 if (!lim->max_segment_size)
330                         lim->max_segment_size = UINT_MAX;
331         } else {
332                 /*
333                  * The maximum segment size has an odd historic 64k default that
334                  * drivers probably should override.  Just like the I/O size we
335                  * require drivers to at least handle a full page per segment.
336                  */
337                 if (!lim->max_segment_size)
338                         lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
339                 if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
340                         return -EINVAL;
341         }
342 
343         /*
344          * We require drivers to at least do logical block aligned I/O, but
345          * historically could not check for that due to the separate calls
346          * to set the limits.  Once the transition is finished the check
347          * below should be narrowed down to check the logical block size.
348          */
349         if (!lim->dma_alignment)
350                 lim->dma_alignment = SECTOR_SIZE - 1;
351         if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
352                 return -EINVAL;
353 
354         if (lim->alignment_offset) {
355                 lim->alignment_offset &= (lim->physical_block_size - 1);
356                 lim->flags &= ~BLK_FLAG_MISALIGNED;
357         }
358 
359         if (!(lim->features & BLK_FEAT_WRITE_CACHE))
360                 lim->features &= ~BLK_FEAT_FUA;
361 
362         blk_validate_atomic_write_limits(lim);
363 
364         err = blk_validate_integrity_limits(lim);
365         if (err)
366                 return err;
367         return blk_validate_zoned_limits(lim);
368 }
369 
370 /*
371  * Set the default limits for a newly allocated queue.  @lim contains the
372  * initial limits set by the driver, which could be no limit in which case
373  * all fields are cleared to zero.
374  */
375 int blk_set_default_limits(struct queue_limits *lim)
376 {
377         /*
378          * Most defaults are set by capping the bounds in blk_validate_limits,
379          * but max_user_discard_sectors is special and needs an explicit
380          * initialization to the max value here.
381          */
382         lim->max_user_discard_sectors = UINT_MAX;
383         return blk_validate_limits(lim);
384 }
385 
386 /**
387  * queue_limits_commit_update - commit an atomic update of queue limits
388  * @q:          queue to update
389  * @lim:        limits to apply
390  *
391  * Apply the limits in @lim that were obtained from queue_limits_start_update()
392  * and updated by the caller to @q.
393  *
394  * Returns 0 if successful, else a negative error code.
395  */
396 int queue_limits_commit_update(struct request_queue *q,
397                 struct queue_limits *lim)
398 {
399         int error;
400 
401         error = blk_validate_limits(lim);
402         if (error)
403                 goto out_unlock;
404 
405 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
406         if (q->crypto_profile && lim->integrity.tag_size) {
407                 pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
408                 error = -EINVAL;
409                 goto out_unlock;
410         }
411 #endif
412 
413         q->limits = *lim;
414         if (q->disk)
415                 blk_apply_bdi_limits(q->disk->bdi, lim);
416 out_unlock:
417         mutex_unlock(&q->limits_lock);
418         return error;
419 }
420 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
421 
422 /**
423  * queue_limits_set - apply queue limits to queue
424  * @q:          queue to update
425  * @lim:        limits to apply
426  *
427  * Apply the limits in @lim that were freshly initialized to @q.
428  * To update existing limits use queue_limits_start_update() and
429  * queue_limits_commit_update() instead.
430  *
431  * Returns 0 if successful, else a negative error code.
432  */
433 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
434 {
435         mutex_lock(&q->limits_lock);
436         return queue_limits_commit_update(q, lim);
437 }
438 EXPORT_SYMBOL_GPL(queue_limits_set);
439 
440 /**
441  * blk_limits_io_min - set minimum request size for a device
442  * @limits: the queue limits
443  * @min:  smallest I/O size in bytes
444  *
445  * Description:
446  *   Some devices have an internal block size bigger than the reported
447  *   hardware sector size.  This function can be used to signal the
448  *   smallest I/O the device can perform without incurring a performance
449  *   penalty.
450  */
451 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
452 {
453         limits->io_min = min;
454 
455         if (limits->io_min < limits->logical_block_size)
456                 limits->io_min = limits->logical_block_size;
457 
458         if (limits->io_min < limits->physical_block_size)
459                 limits->io_min = limits->physical_block_size;
460 }
461 EXPORT_SYMBOL(blk_limits_io_min);
462 
463 /**
464  * blk_limits_io_opt - set optimal request size for a device
465  * @limits: the queue limits
466  * @opt:  smallest I/O size in bytes
467  *
468  * Description:
469  *   Storage devices may report an optimal I/O size, which is the
470  *   device's preferred unit for sustained I/O.  This is rarely reported
471  *   for disk drives.  For RAID arrays it is usually the stripe width or
472  *   the internal track size.  A properly aligned multiple of
473  *   optimal_io_size is the preferred request size for workloads where
474  *   sustained throughput is desired.
475  */
476 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
477 {
478         limits->io_opt = opt;
479 }
480 EXPORT_SYMBOL(blk_limits_io_opt);
481 
482 static int queue_limit_alignment_offset(const struct queue_limits *lim,
483                 sector_t sector)
484 {
485         unsigned int granularity = max(lim->physical_block_size, lim->io_min);
486         unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
487                 << SECTOR_SHIFT;
488 
489         return (granularity + lim->alignment_offset - alignment) % granularity;
490 }
491 
492 static unsigned int queue_limit_discard_alignment(
493                 const struct queue_limits *lim, sector_t sector)
494 {
495         unsigned int alignment, granularity, offset;
496 
497         if (!lim->max_discard_sectors)
498                 return 0;
499 
500         /* Why are these in bytes, not sectors? */
501         alignment = lim->discard_alignment >> SECTOR_SHIFT;
502         granularity = lim->discard_granularity >> SECTOR_SHIFT;
503         if (!granularity)
504                 return 0;
505 
506         /* Offset of the partition start in 'granularity' sectors */
507         offset = sector_div(sector, granularity);
508 
509         /* And why do we do this modulus *again* in blkdev_issue_discard()? */
510         offset = (granularity + alignment - offset) % granularity;
511 
512         /* Turn it back into bytes, gaah */
513         return offset << SECTOR_SHIFT;
514 }
515 
516 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
517 {
518         sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
519         if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
520                 sectors = PAGE_SIZE >> SECTOR_SHIFT;
521         return sectors;
522 }
523 
524 /**
525  * blk_stack_limits - adjust queue_limits for stacked devices
526  * @t:  the stacking driver limits (top device)
527  * @b:  the underlying queue limits (bottom, component device)
528  * @start:  first data sector within component device
529  *
530  * Description:
531  *    This function is used by stacking drivers like MD and DM to ensure
532  *    that all component devices have compatible block sizes and
533  *    alignments.  The stacking driver must provide a queue_limits
534  *    struct (top) and then iteratively call the stacking function for
535  *    all component (bottom) devices.  The stacking function will
536  *    attempt to combine the values and ensure proper alignment.
537  *
538  *    Returns 0 if the top and bottom queue_limits are compatible.  The
539  *    top device's block sizes and alignment offsets may be adjusted to
540  *    ensure alignment with the bottom device. If no compatible sizes
541  *    and alignments exist, -1 is returned and the resulting top
542  *    queue_limits will have the misaligned flag set to indicate that
543  *    the alignment_offset is undefined.
544  */
545 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
546                      sector_t start)
547 {
548         unsigned int top, bottom, alignment, ret = 0;
549 
550         t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
551 
552         /*
553          * BLK_FEAT_NOWAIT and BLK_FEAT_POLL need to be supported both by the
554          * stacking driver and all underlying devices.  The stacking driver sets
555          * the flags before stacking the limits, and this will clear the flags
556          * if any of the underlying devices does not support it.
557          */
558         if (!(b->features & BLK_FEAT_NOWAIT))
559                 t->features &= ~BLK_FEAT_NOWAIT;
560         if (!(b->features & BLK_FEAT_POLL))
561                 t->features &= ~BLK_FEAT_POLL;
562 
563         t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
564 
565         t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
566         t->max_user_sectors = min_not_zero(t->max_user_sectors,
567                         b->max_user_sectors);
568         t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
569         t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
570         t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
571                                         b->max_write_zeroes_sectors);
572         t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
573                                          queue_limits_max_zone_append_sectors(b));
574 
575         t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
576                                             b->seg_boundary_mask);
577         t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
578                                             b->virt_boundary_mask);
579 
580         t->max_segments = min_not_zero(t->max_segments, b->max_segments);
581         t->max_discard_segments = min_not_zero(t->max_discard_segments,
582                                                b->max_discard_segments);
583         t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
584                                                  b->max_integrity_segments);
585 
586         t->max_segment_size = min_not_zero(t->max_segment_size,
587                                            b->max_segment_size);
588 
589         alignment = queue_limit_alignment_offset(b, start);
590 
591         /* Bottom device has different alignment.  Check that it is
592          * compatible with the current top alignment.
593          */
594         if (t->alignment_offset != alignment) {
595 
596                 top = max(t->physical_block_size, t->io_min)
597                         + t->alignment_offset;
598                 bottom = max(b->physical_block_size, b->io_min) + alignment;
599 
600                 /* Verify that top and bottom intervals line up */
601                 if (max(top, bottom) % min(top, bottom)) {
602                         t->flags |= BLK_FLAG_MISALIGNED;
603                         ret = -1;
604                 }
605         }
606 
607         t->logical_block_size = max(t->logical_block_size,
608                                     b->logical_block_size);
609 
610         t->physical_block_size = max(t->physical_block_size,
611                                      b->physical_block_size);
612 
613         t->io_min = max(t->io_min, b->io_min);
614         t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
615         t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
616 
617         /* Set non-power-of-2 compatible chunk_sectors boundary */
618         if (b->chunk_sectors)
619                 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
620 
621         /* Physical block size a multiple of the logical block size? */
622         if (t->physical_block_size & (t->logical_block_size - 1)) {
623                 t->physical_block_size = t->logical_block_size;
624                 t->flags |= BLK_FLAG_MISALIGNED;
625                 ret = -1;
626         }
627 
628         /* Minimum I/O a multiple of the physical block size? */
629         if (t->io_min & (t->physical_block_size - 1)) {
630                 t->io_min = t->physical_block_size;
631                 t->flags |= BLK_FLAG_MISALIGNED;
632                 ret = -1;
633         }
634 
635         /* Optimal I/O a multiple of the physical block size? */
636         if (t->io_opt & (t->physical_block_size - 1)) {
637                 t->io_opt = 0;
638                 t->flags |= BLK_FLAG_MISALIGNED;
639                 ret = -1;
640         }
641 
642         /* chunk_sectors a multiple of the physical block size? */
643         if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
644                 t->chunk_sectors = 0;
645                 t->flags |= BLK_FLAG_MISALIGNED;
646                 ret = -1;
647         }
648 
649         /* Find lowest common alignment_offset */
650         t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
651                 % max(t->physical_block_size, t->io_min);
652 
653         /* Verify that new alignment_offset is on a logical block boundary */
654         if (t->alignment_offset & (t->logical_block_size - 1)) {
655                 t->flags |= BLK_FLAG_MISALIGNED;
656                 ret = -1;
657         }
658 
659         t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
660         t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
661         t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
662 
663         /* Discard alignment and granularity */
664         if (b->discard_granularity) {
665                 alignment = queue_limit_discard_alignment(b, start);
666 
667                 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
668                                                       b->max_discard_sectors);
669                 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
670                                                          b->max_hw_discard_sectors);
671                 t->discard_granularity = max(t->discard_granularity,
672                                              b->discard_granularity);
673                 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
674                         t->discard_granularity;
675         }
676         t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
677                                                    b->max_secure_erase_sectors);
678         t->zone_write_granularity = max(t->zone_write_granularity,
679                                         b->zone_write_granularity);
680         if (!(t->features & BLK_FEAT_ZONED)) {
681                 t->zone_write_granularity = 0;
682                 t->max_zone_append_sectors = 0;
683         }
684         return ret;
685 }
686 EXPORT_SYMBOL(blk_stack_limits);
687 
688 /**
689  * queue_limits_stack_bdev - adjust queue_limits for stacked devices
690  * @t:  the stacking driver limits (top device)
691  * @bdev:  the underlying block device (bottom)
692  * @offset:  offset to beginning of data within component device
693  * @pfx: prefix to use for warnings logged
694  *
695  * Description:
696  *    This function is used by stacking drivers like MD and DM to ensure
697  *    that all component devices have compatible block sizes and
698  *    alignments.  The stacking driver must provide a queue_limits
699  *    struct (top) and then iteratively call the stacking function for
700  *    all component (bottom) devices.  The stacking function will
701  *    attempt to combine the values and ensure proper alignment.
702  */
703 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
704                 sector_t offset, const char *pfx)
705 {
706         if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits,
707                         get_start_sect(bdev) + offset))
708                 pr_notice("%s: Warning: Device %pg is misaligned\n",
709                         pfx, bdev);
710 }
711 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
712 
713 /**
714  * queue_limits_stack_integrity - stack integrity profile
715  * @t: target queue limits
716  * @b: base queue limits
717  *
718  * Check if the integrity profile in the @b can be stacked into the
719  * target @t.  Stacking is possible if either:
720  *
721  *   a) does not have any integrity information stacked into it yet
722  *   b) the integrity profile in @b is identical to the one in @t
723  *
724  * If @b can be stacked into @t, return %true.  Else return %false and clear the
725  * integrity information in @t.
726  */
727 bool queue_limits_stack_integrity(struct queue_limits *t,
728                 struct queue_limits *b)
729 {
730         struct blk_integrity *ti = &t->integrity;
731         struct blk_integrity *bi = &b->integrity;
732 
733         if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
734                 return true;
735 
736         if (!ti->tuple_size) {
737                 /* inherit the settings from the first underlying device */
738                 if (!(ti->flags & BLK_INTEGRITY_STACKED)) {
739                         ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE |
740                                 (bi->flags & BLK_INTEGRITY_REF_TAG);
741                         ti->csum_type = bi->csum_type;
742                         ti->tuple_size = bi->tuple_size;
743                         ti->pi_offset = bi->pi_offset;
744                         ti->interval_exp = bi->interval_exp;
745                         ti->tag_size = bi->tag_size;
746                         goto done;
747                 }
748                 if (!bi->tuple_size)
749                         goto done;
750         }
751 
752         if (ti->tuple_size != bi->tuple_size)
753                 goto incompatible;
754         if (ti->interval_exp != bi->interval_exp)
755                 goto incompatible;
756         if (ti->tag_size != bi->tag_size)
757                 goto incompatible;
758         if (ti->csum_type != bi->csum_type)
759                 goto incompatible;
760         if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
761             (bi->flags & BLK_INTEGRITY_REF_TAG))
762                 goto incompatible;
763 
764 done:
765         ti->flags |= BLK_INTEGRITY_STACKED;
766         return true;
767 
768 incompatible:
769         memset(ti, 0, sizeof(*ti));
770         return false;
771 }
772 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
773 
774 /**
775  * blk_set_queue_depth - tell the block layer about the device queue depth
776  * @q:          the request queue for the device
777  * @depth:              queue depth
778  *
779  */
780 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
781 {
782         q->queue_depth = depth;
783         rq_qos_queue_depth_changed(q);
784 }
785 EXPORT_SYMBOL(blk_set_queue_depth);
786 
787 int bdev_alignment_offset(struct block_device *bdev)
788 {
789         struct request_queue *q = bdev_get_queue(bdev);
790 
791         if (q->limits.flags & BLK_FLAG_MISALIGNED)
792                 return -1;
793         if (bdev_is_partition(bdev))
794                 return queue_limit_alignment_offset(&q->limits,
795                                 bdev->bd_start_sect);
796         return q->limits.alignment_offset;
797 }
798 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
799 
800 unsigned int bdev_discard_alignment(struct block_device *bdev)
801 {
802         struct request_queue *q = bdev_get_queue(bdev);
803 
804         if (bdev_is_partition(bdev))
805                 return queue_limit_discard_alignment(&q->limits,
806                                 bdev->bd_start_sect);
807         return q->limits.discard_alignment;
808 }
809 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
810 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php