1 // SPDX-License-Identifier: GPL-2.0 1 2 /* 3 * Functions related to generic helpers functi 4 */ 5 #include <linux/kernel.h> 6 #include <linux/module.h> 7 #include <linux/bio.h> 8 #include <linux/blkdev.h> 9 #include <linux/scatterlist.h> 10 11 #include "blk.h" 12 13 static sector_t bio_discard_limit(struct block 14 { 15 unsigned int discard_granularity = bde 16 sector_t granularity_aligned_sector; 17 18 if (bdev_is_partition(bdev)) 19 sector += bdev->bd_start_sect; 20 21 granularity_aligned_sector = 22 round_up(sector, discard_granu 23 24 /* 25 * Make sure subsequent bios start ali 26 * it needs to be split. 27 */ 28 if (granularity_aligned_sector != sect 29 return granularity_aligned_sec 30 31 /* 32 * Align the bio size to the discard g 33 * at discard granularity boundaries e 34 */ 35 return round_down(UINT_MAX, discard_gr 36 } 37 38 struct bio *blk_alloc_discard_bio(struct block 39 sector_t *sector, sector_t *nr 40 { 41 sector_t bio_sects = min(*nr_sects, bi 42 struct bio *bio; 43 44 if (!bio_sects) 45 return NULL; 46 47 bio = bio_alloc(bdev, 0, REQ_OP_DISCAR 48 if (!bio) 49 return NULL; 50 bio->bi_iter.bi_sector = *sector; 51 bio->bi_iter.bi_size = bio_sects << SE 52 *sector += bio_sects; 53 *nr_sects -= bio_sects; 54 /* 55 * We can loop for a long time in here 56 * discards (like mkfs). Be nice and 57 * softlocking if preempt is disabled. 58 */ 59 cond_resched(); 60 return bio; 61 } 62 63 int __blkdev_issue_discard(struct block_device 64 sector_t nr_sects, gfp_t gfp_m 65 { 66 struct bio *bio; 67 68 while ((bio = blk_alloc_discard_bio(bd 69 gfp_mask))) 70 *biop = bio_chain_and_submit(* 71 return 0; 72 } 73 EXPORT_SYMBOL(__blkdev_issue_discard); 74 75 /** 76 * blkdev_issue_discard - queue a discard 77 * @bdev: blockdev to issue discard for 78 * @sector: start sector 79 * @nr_sects: number of sectors to discard 80 * @gfp_mask: memory allocation flags (for b 81 * 82 * Description: 83 * Issue a discard request for the sectors 84 */ 85 int blkdev_issue_discard(struct block_device * 86 sector_t nr_sects, gfp_t gfp_m 87 { 88 struct bio *bio = NULL; 89 struct blk_plug plug; 90 int ret; 91 92 blk_start_plug(&plug); 93 ret = __blkdev_issue_discard(bdev, sec 94 if (!ret && bio) { 95 ret = submit_bio_wait(bio); 96 if (ret == -EOPNOTSUPP) 97 ret = 0; 98 bio_put(bio); 99 } 100 blk_finish_plug(&plug); 101 102 return ret; 103 } 104 EXPORT_SYMBOL(blkdev_issue_discard); 105 106 static sector_t bio_write_zeroes_limit(struct 107 { 108 sector_t bs_mask = (bdev_logical_block 109 110 return min(bdev_write_zeroes_sectors(b 111 (UINT_MAX >> SECTOR_SHIFT) & ~ 112 } 113 114 /* 115 * There is no reliable way for the SCSI subsy 116 * device supports a WRITE SAME operation with 117 * to media. As a result, write_zeroes is enab 118 * disabled if a zeroing operation subsequentl 119 * queue limit is likely to change at runtime. 120 */ 121 static void __blkdev_issue_write_zeroes(struct 122 sector_t sector, sector_t nr_s 123 struct bio **biop, unsigned fl 124 { 125 126 while (nr_sects) { 127 unsigned int len = min(nr_sect 128 struct bio *bio; 129 130 if ((flags & BLKDEV_ZERO_KILLA 131 fatal_signal_pending(curre 132 break; 133 134 bio = bio_alloc(bdev, 0, REQ_O 135 bio->bi_iter.bi_sector = secto 136 if (flags & BLKDEV_ZERO_NOUNMA 137 bio->bi_opf |= REQ_NOU 138 139 bio->bi_iter.bi_size = len << 140 *biop = bio_chain_and_submit(* 141 142 nr_sects -= len; 143 sector += len; 144 cond_resched(); 145 } 146 } 147 148 static int blkdev_issue_write_zeroes(struct bl 149 sector_t nr_sects, gfp_t gfp, 150 { 151 sector_t limit = bio_write_zeroes_limi 152 struct bio *bio = NULL; 153 struct blk_plug plug; 154 int ret = 0; 155 156 blk_start_plug(&plug); 157 __blkdev_issue_write_zeroes(bdev, sect 158 flags, limit); 159 if (bio) { 160 if ((flags & BLKDEV_ZERO_KILLA 161 fatal_signal_pending(curre 162 bio_await_chain(bio); 163 blk_finish_plug(&plug) 164 return -EINTR; 165 } 166 ret = submit_bio_wait(bio); 167 bio_put(bio); 168 } 169 blk_finish_plug(&plug); 170 171 /* 172 * For some devices there is no non-de 173 * WRITE ZEROES is actually supported. 174 * on an I/O error, in which case we'l 175 * "not supported" here. 176 */ 177 if (ret && !bdev_write_zeroes_sectors( 178 return -EOPNOTSUPP; 179 return ret; 180 } 181 182 /* 183 * Convert a number of 512B sectors to a numbe 184 * The result is limited to a number of pages 185 * Also make sure that the result is always at 186 * where nr_sects is lower than the number of 187 */ 188 static unsigned int __blkdev_sectors_to_bio_pa 189 { 190 sector_t pages = DIV_ROUND_UP_SECTOR_T 191 192 return min(pages, (sector_t)BIO_MAX_VE 193 } 194 195 static void __blkdev_issue_zero_pages(struct b 196 sector_t sector, sector_t nr_s 197 struct bio **biop, unsigned in 198 { 199 while (nr_sects) { 200 unsigned int nr_vecs = __blkde 201 struct bio *bio; 202 203 bio = bio_alloc(bdev, nr_vecs, 204 bio->bi_iter.bi_sector = secto 205 206 if ((flags & BLKDEV_ZERO_KILLA 207 fatal_signal_pending(curre 208 break; 209 210 do { 211 unsigned int len, adde 212 213 len = min_t(sector_t, 214 PAGE_SIZE, nr_ 215 added = bio_add_page(b 216 if (added < len) 217 break; 218 nr_sects -= added >> S 219 sector += added >> SEC 220 } while (nr_sects); 221 222 *biop = bio_chain_and_submit(* 223 cond_resched(); 224 } 225 } 226 227 static int blkdev_issue_zero_pages(struct bloc 228 sector_t nr_sects, gfp_t gfp, 229 { 230 struct bio *bio = NULL; 231 struct blk_plug plug; 232 int ret = 0; 233 234 if (flags & BLKDEV_ZERO_NOFALLBACK) 235 return -EOPNOTSUPP; 236 237 blk_start_plug(&plug); 238 __blkdev_issue_zero_pages(bdev, sector 239 if (bio) { 240 if ((flags & BLKDEV_ZERO_KILLA 241 fatal_signal_pending(curre 242 bio_await_chain(bio); 243 blk_finish_plug(&plug) 244 return -EINTR; 245 } 246 ret = submit_bio_wait(bio); 247 bio_put(bio); 248 } 249 blk_finish_plug(&plug); 250 251 return ret; 252 } 253 254 /** 255 * __blkdev_issue_zeroout - generate number of 256 * @bdev: blockdev to issue 257 * @sector: start sector 258 * @nr_sects: number of sectors to write 259 * @gfp_mask: memory allocation flags (for b 260 * @biop: pointer to anchor bio 261 * @flags: controls detailed behavior 262 * 263 * Description: 264 * Zero-fill a block range, either using hard 265 * writing zeroes to the device. 266 * 267 * If a device is using logical block provisi 268 * not be released if %flags contains BLKDEV_ 269 * 270 * If %flags contains BLKDEV_ZERO_NOFALLBACK, 271 * -EOPNOTSUPP if no explicit hardware offloa 272 */ 273 int __blkdev_issue_zeroout(struct block_device 274 sector_t nr_sects, gfp_t gfp_m 275 unsigned flags) 276 { 277 sector_t limit = bio_write_zeroes_limi 278 279 if (bdev_read_only(bdev)) 280 return -EPERM; 281 282 if (limit) { 283 __blkdev_issue_write_zeroes(bd 284 gfp_mask, biop 285 } else { 286 if (flags & BLKDEV_ZERO_NOFALL 287 return -EOPNOTSUPP; 288 __blkdev_issue_zero_pages(bdev 289 biop, flags); 290 } 291 return 0; 292 } 293 EXPORT_SYMBOL(__blkdev_issue_zeroout); 294 295 /** 296 * blkdev_issue_zeroout - zero-fill a block ra 297 * @bdev: blockdev to write 298 * @sector: start sector 299 * @nr_sects: number of sectors to write 300 * @gfp_mask: memory allocation flags (for b 301 * @flags: controls detailed behavior 302 * 303 * Description: 304 * Zero-fill a block range, either using hard 305 * writing zeroes to the device. See __blkde 306 * valid values for %flags. 307 */ 308 int blkdev_issue_zeroout(struct block_device * 309 sector_t nr_sects, gfp_t gfp_m 310 { 311 int ret; 312 313 if ((sector | nr_sects) & ((bdev_logic 314 return -EINVAL; 315 if (bdev_read_only(bdev)) 316 return -EPERM; 317 318 if (bdev_write_zeroes_sectors(bdev)) { 319 ret = blkdev_issue_write_zeroe 320 gfp_mask, flag 321 if (ret != -EOPNOTSUPP) 322 return ret; 323 } 324 325 return blkdev_issue_zero_pages(bdev, s 326 } 327 EXPORT_SYMBOL(blkdev_issue_zeroout); 328 329 int blkdev_issue_secure_erase(struct block_dev 330 sector_t nr_sects, gfp_t gfp) 331 { 332 sector_t bs_mask = (bdev_logical_block 333 unsigned int max_sectors = bdev_max_se 334 struct bio *bio = NULL; 335 struct blk_plug plug; 336 int ret = 0; 337 338 /* make sure that "len << SECTOR_SHIFT 339 if (max_sectors > UINT_MAX >> SECTOR_S 340 max_sectors = UINT_MAX >> SECT 341 max_sectors &= ~bs_mask; 342 343 if (max_sectors == 0) 344 return -EOPNOTSUPP; 345 if ((sector | nr_sects) & bs_mask) 346 return -EINVAL; 347 if (bdev_read_only(bdev)) 348 return -EPERM; 349 350 blk_start_plug(&plug); 351 while (nr_sects) { 352 unsigned int len = min_t(secto 353 354 bio = blk_next_bio(bio, bdev, 355 bio->bi_iter.bi_sector = secto 356 bio->bi_iter.bi_size = len << 357 358 sector += len; 359 nr_sects -= len; 360 cond_resched(); 361 } 362 if (bio) { 363 ret = submit_bio_wait(bio); 364 bio_put(bio); 365 } 366 blk_finish_plug(&plug); 367 368 return ret; 369 } 370 EXPORT_SYMBOL(blkdev_issue_secure_erase); 371
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.