~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-lib.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Functions related to generic helpers functions
  4  */
  5 #include <linux/kernel.h>
  6 #include <linux/module.h>
  7 #include <linux/bio.h>
  8 #include <linux/blkdev.h>
  9 #include <linux/scatterlist.h>
 10 
 11 #include "blk.h"
 12 
 13 static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
 14 {
 15         unsigned int discard_granularity = bdev_discard_granularity(bdev);
 16         sector_t granularity_aligned_sector;
 17 
 18         if (bdev_is_partition(bdev))
 19                 sector += bdev->bd_start_sect;
 20 
 21         granularity_aligned_sector =
 22                 round_up(sector, discard_granularity >> SECTOR_SHIFT);
 23 
 24         /*
 25          * Make sure subsequent bios start aligned to the discard granularity if
 26          * it needs to be split.
 27          */
 28         if (granularity_aligned_sector != sector)
 29                 return granularity_aligned_sector - sector;
 30 
 31         /*
 32          * Align the bio size to the discard granularity to make splitting the bio
 33          * at discard granularity boundaries easier in the driver if needed.
 34          */
 35         return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
 36 }
 37 
 38 struct bio *blk_alloc_discard_bio(struct block_device *bdev,
 39                 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask)
 40 {
 41         sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector));
 42         struct bio *bio;
 43 
 44         if (!bio_sects)
 45                 return NULL;
 46 
 47         bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask);
 48         if (!bio)
 49                 return NULL;
 50         bio->bi_iter.bi_sector = *sector;
 51         bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT;
 52         *sector += bio_sects;
 53         *nr_sects -= bio_sects;
 54         /*
 55          * We can loop for a long time in here if someone does full device
 56          * discards (like mkfs).  Be nice and allow us to schedule out to avoid
 57          * softlocking if preempt is disabled.
 58          */
 59         cond_resched();
 60         return bio;
 61 }
 62 
 63 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 64                 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
 65 {
 66         struct bio *bio;
 67 
 68         while ((bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects,
 69                         gfp_mask)))
 70                 *biop = bio_chain_and_submit(*biop, bio);
 71         return 0;
 72 }
 73 EXPORT_SYMBOL(__blkdev_issue_discard);
 74 
 75 /**
 76  * blkdev_issue_discard - queue a discard
 77  * @bdev:       blockdev to issue discard for
 78  * @sector:     start sector
 79  * @nr_sects:   number of sectors to discard
 80  * @gfp_mask:   memory allocation flags (for bio_alloc)
 81  *
 82  * Description:
 83  *    Issue a discard request for the sectors in question.
 84  */
 85 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 86                 sector_t nr_sects, gfp_t gfp_mask)
 87 {
 88         struct bio *bio = NULL;
 89         struct blk_plug plug;
 90         int ret;
 91 
 92         blk_start_plug(&plug);
 93         ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio);
 94         if (!ret && bio) {
 95                 ret = submit_bio_wait(bio);
 96                 if (ret == -EOPNOTSUPP)
 97                         ret = 0;
 98                 bio_put(bio);
 99         }
100         blk_finish_plug(&plug);
101 
102         return ret;
103 }
104 EXPORT_SYMBOL(blkdev_issue_discard);
105 
106 static sector_t bio_write_zeroes_limit(struct block_device *bdev)
107 {
108         sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
109 
110         return min(bdev_write_zeroes_sectors(bdev),
111                 (UINT_MAX >> SECTOR_SHIFT) & ~bs_mask);
112 }
113 
114 /*
115  * There is no reliable way for the SCSI subsystem to determine whether a
116  * device supports a WRITE SAME operation without actually performing a write
117  * to media. As a result, write_zeroes is enabled by default and will be
118  * disabled if a zeroing operation subsequently fails. This means that this
119  * queue limit is likely to change at runtime.
120  */
121 static void __blkdev_issue_write_zeroes(struct block_device *bdev,
122                 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
123                 struct bio **biop, unsigned flags, sector_t limit)
124 {
125 
126         while (nr_sects) {
127                 unsigned int len = min(nr_sects, limit);
128                 struct bio *bio;
129 
130                 if ((flags & BLKDEV_ZERO_KILLABLE) &&
131                     fatal_signal_pending(current))
132                         break;
133 
134                 bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
135                 bio->bi_iter.bi_sector = sector;
136                 if (flags & BLKDEV_ZERO_NOUNMAP)
137                         bio->bi_opf |= REQ_NOUNMAP;
138 
139                 bio->bi_iter.bi_size = len << SECTOR_SHIFT;
140                 *biop = bio_chain_and_submit(*biop, bio);
141 
142                 nr_sects -= len;
143                 sector += len;
144                 cond_resched();
145         }
146 }
147 
148 static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector,
149                 sector_t nr_sects, gfp_t gfp, unsigned flags)
150 {
151         sector_t limit = bio_write_zeroes_limit(bdev);
152         struct bio *bio = NULL;
153         struct blk_plug plug;
154         int ret = 0;
155 
156         blk_start_plug(&plug);
157         __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio,
158                         flags, limit);
159         if (bio) {
160                 if ((flags & BLKDEV_ZERO_KILLABLE) &&
161                     fatal_signal_pending(current)) {
162                         bio_await_chain(bio);
163                         blk_finish_plug(&plug);
164                         return -EINTR;
165                 }
166                 ret = submit_bio_wait(bio);
167                 bio_put(bio);
168         }
169         blk_finish_plug(&plug);
170 
171         /*
172          * For some devices there is no non-destructive way to verify whether
173          * WRITE ZEROES is actually supported.  These will clear the capability
174          * on an I/O error, in which case we'll turn any error into
175          * "not supported" here.
176          */
177         if (ret && !bdev_write_zeroes_sectors(bdev))
178                 return -EOPNOTSUPP;
179         return ret;
180 }
181 
182 /*
183  * Convert a number of 512B sectors to a number of pages.
184  * The result is limited to a number of pages that can fit into a BIO.
185  * Also make sure that the result is always at least 1 (page) for the cases
186  * where nr_sects is lower than the number of sectors in a page.
187  */
188 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
189 {
190         sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
191 
192         return min(pages, (sector_t)BIO_MAX_VECS);
193 }
194 
195 static void __blkdev_issue_zero_pages(struct block_device *bdev,
196                 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
197                 struct bio **biop, unsigned int flags)
198 {
199         while (nr_sects) {
200                 unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects);
201                 struct bio *bio;
202 
203                 bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask);
204                 bio->bi_iter.bi_sector = sector;
205 
206                 if ((flags & BLKDEV_ZERO_KILLABLE) &&
207                     fatal_signal_pending(current))
208                         break;
209 
210                 do {
211                         unsigned int len, added;
212 
213                         len = min_t(sector_t,
214                                 PAGE_SIZE, nr_sects << SECTOR_SHIFT);
215                         added = bio_add_page(bio, ZERO_PAGE(0), len, 0);
216                         if (added < len)
217                                 break;
218                         nr_sects -= added >> SECTOR_SHIFT;
219                         sector += added >> SECTOR_SHIFT;
220                 } while (nr_sects);
221 
222                 *biop = bio_chain_and_submit(*biop, bio);
223                 cond_resched();
224         }
225 }
226 
227 static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector,
228                 sector_t nr_sects, gfp_t gfp, unsigned flags)
229 {
230         struct bio *bio = NULL;
231         struct blk_plug plug;
232         int ret = 0;
233 
234         if (flags & BLKDEV_ZERO_NOFALLBACK)
235                 return -EOPNOTSUPP;
236 
237         blk_start_plug(&plug);
238         __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags);
239         if (bio) {
240                 if ((flags & BLKDEV_ZERO_KILLABLE) &&
241                     fatal_signal_pending(current)) {
242                         bio_await_chain(bio);
243                         blk_finish_plug(&plug);
244                         return -EINTR;
245                 }
246                 ret = submit_bio_wait(bio);
247                 bio_put(bio);
248         }
249         blk_finish_plug(&plug);
250 
251         return ret;
252 }
253 
254 /**
255  * __blkdev_issue_zeroout - generate number of zero filed write bios
256  * @bdev:       blockdev to issue
257  * @sector:     start sector
258  * @nr_sects:   number of sectors to write
259  * @gfp_mask:   memory allocation flags (for bio_alloc)
260  * @biop:       pointer to anchor bio
261  * @flags:      controls detailed behavior
262  *
263  * Description:
264  *  Zero-fill a block range, either using hardware offload or by explicitly
265  *  writing zeroes to the device.
266  *
267  *  If a device is using logical block provisioning, the underlying space will
268  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
269  *
270  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
271  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
272  */
273 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
274                 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
275                 unsigned flags)
276 {
277         sector_t limit = bio_write_zeroes_limit(bdev);
278 
279         if (bdev_read_only(bdev))
280                 return -EPERM;
281 
282         if (limit) {
283                 __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
284                                 gfp_mask, biop, flags, limit);
285         } else {
286                 if (flags & BLKDEV_ZERO_NOFALLBACK)
287                         return -EOPNOTSUPP;
288                 __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
289                                 biop, flags);
290         }
291         return 0;
292 }
293 EXPORT_SYMBOL(__blkdev_issue_zeroout);
294 
295 /**
296  * blkdev_issue_zeroout - zero-fill a block range
297  * @bdev:       blockdev to write
298  * @sector:     start sector
299  * @nr_sects:   number of sectors to write
300  * @gfp_mask:   memory allocation flags (for bio_alloc)
301  * @flags:      controls detailed behavior
302  *
303  * Description:
304  *  Zero-fill a block range, either using hardware offload or by explicitly
305  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
306  *  valid values for %flags.
307  */
308 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
309                 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
310 {
311         int ret;
312 
313         if ((sector | nr_sects) & ((bdev_logical_block_size(bdev) >> 9) - 1))
314                 return -EINVAL;
315         if (bdev_read_only(bdev))
316                 return -EPERM;
317 
318         if (bdev_write_zeroes_sectors(bdev)) {
319                 ret = blkdev_issue_write_zeroes(bdev, sector, nr_sects,
320                                 gfp_mask, flags);
321                 if (ret != -EOPNOTSUPP)
322                         return ret;
323         }
324 
325         return blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, flags);
326 }
327 EXPORT_SYMBOL(blkdev_issue_zeroout);
328 
329 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
330                 sector_t nr_sects, gfp_t gfp)
331 {
332         sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
333         unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev);
334         struct bio *bio = NULL;
335         struct blk_plug plug;
336         int ret = 0;
337 
338         /* make sure that "len << SECTOR_SHIFT" doesn't overflow */
339         if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
340                 max_sectors = UINT_MAX >> SECTOR_SHIFT;
341         max_sectors &= ~bs_mask;
342 
343         if (max_sectors == 0)
344                 return -EOPNOTSUPP;
345         if ((sector | nr_sects) & bs_mask)
346                 return -EINVAL;
347         if (bdev_read_only(bdev))
348                 return -EPERM;
349 
350         blk_start_plug(&plug);
351         while (nr_sects) {
352                 unsigned int len = min_t(sector_t, nr_sects, max_sectors);
353 
354                 bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
355                 bio->bi_iter.bi_sector = sector;
356                 bio->bi_iter.bi_size = len << SECTOR_SHIFT;
357 
358                 sector += len;
359                 nr_sects -= len;
360                 cond_resched();
361         }
362         if (bio) {
363                 ret = submit_bio_wait(bio);
364                 bio_put(bio);
365         }
366         blk_finish_plug(&plug);
367 
368         return ret;
369 }
370 EXPORT_SYMBOL(blkdev_issue_secure_erase);
371 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php