1 // SPDX-License-Identifier: GPL-2.0 << 2 /* 1 /* 3 * Functions related to generic helpers functi 2 * Functions related to generic helpers functions 4 */ 3 */ 5 #include <linux/kernel.h> 4 #include <linux/kernel.h> 6 #include <linux/module.h> 5 #include <linux/module.h> 7 #include <linux/bio.h> 6 #include <linux/bio.h> 8 #include <linux/blkdev.h> 7 #include <linux/blkdev.h> 9 #include <linux/scatterlist.h> 8 #include <linux/scatterlist.h> 10 9 11 #include "blk.h" 10 #include "blk.h" 12 11 13 static sector_t bio_discard_limit(struct block !! 12 static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, >> 13 gfp_t gfp) 14 { 14 { 15 unsigned int discard_granularity = bde !! 15 struct bio *new = bio_alloc(gfp, nr_pages); 16 sector_t granularity_aligned_sector; << 17 16 18 if (bdev_is_partition(bdev)) !! 17 if (bio) { 19 sector += bdev->bd_start_sect; !! 18 bio_chain(bio, new); 20 !! 19 submit_bio(bio); 21 granularity_aligned_sector = !! 20 } 22 round_up(sector, discard_granu << 23 21 24 /* !! 22 return new; 25 * Make sure subsequent bios start ali << 26 * it needs to be split. << 27 */ << 28 if (granularity_aligned_sector != sect << 29 return granularity_aligned_sec << 30 << 31 /* << 32 * Align the bio size to the discard g << 33 * at discard granularity boundaries e << 34 */ << 35 return round_down(UINT_MAX, discard_gr << 36 } 23 } 37 24 38 struct bio *blk_alloc_discard_bio(struct block !! 25 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 39 sector_t *sector, sector_t *nr !! 26 sector_t nr_sects, gfp_t gfp_mask, int flags, >> 27 struct bio **biop) 40 { 28 { 41 sector_t bio_sects = min(*nr_sects, bi !! 29 struct request_queue *q = bdev_get_queue(bdev); 42 struct bio *bio; !! 30 struct bio *bio = *biop; >> 31 unsigned int granularity; >> 32 unsigned int op; >> 33 int alignment; >> 34 sector_t bs_mask; 43 35 44 if (!bio_sects) !! 36 if (!q) 45 return NULL; !! 37 return -ENXIO; 46 38 47 bio = bio_alloc(bdev, 0, REQ_OP_DISCAR !! 39 if (flags & BLKDEV_DISCARD_SECURE) { 48 if (!bio) !! 40 if (flags & BLKDEV_DISCARD_ZERO) 49 return NULL; !! 41 return -EOPNOTSUPP; 50 bio->bi_iter.bi_sector = *sector; !! 42 if (!blk_queue_secure_erase(q)) 51 bio->bi_iter.bi_size = bio_sects << SE !! 43 return -EOPNOTSUPP; 52 *sector += bio_sects; !! 44 op = REQ_OP_SECURE_ERASE; 53 *nr_sects -= bio_sects; !! 45 } else { 54 /* !! 46 if (!blk_queue_discard(q)) 55 * We can loop for a long time in here !! 47 return -EOPNOTSUPP; 56 * discards (like mkfs). Be nice and !! 48 if ((flags & BLKDEV_DISCARD_ZERO) && 57 * softlocking if preempt is disabled. !! 49 !q->limits.discard_zeroes_data) 58 */ !! 50 return -EOPNOTSUPP; 59 cond_resched(); !! 51 op = REQ_OP_DISCARD; 60 return bio; !! 52 } 61 } << 62 53 63 int __blkdev_issue_discard(struct block_device !! 54 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; 64 sector_t nr_sects, gfp_t gfp_m !! 55 if ((sector | nr_sects) & bs_mask) 65 { !! 56 return -EINVAL; 66 struct bio *bio; !! 57 >> 58 /* Zero-sector (unknown) and one-sector granularities are the same. */ >> 59 granularity = max(q->limits.discard_granularity >> 9, 1U); >> 60 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; >> 61 >> 62 while (nr_sects) { >> 63 unsigned int req_sects; >> 64 sector_t end_sect, tmp; >> 65 >> 66 /* Make sure bi_size doesn't overflow */ >> 67 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); >> 68 >> 69 /** >> 70 * If splitting a request, and the next starting sector would be >> 71 * misaligned, stop the discard at the previous aligned sector. >> 72 */ >> 73 end_sect = sector + req_sects; >> 74 tmp = end_sect; >> 75 if (req_sects < nr_sects && >> 76 sector_div(tmp, granularity) != alignment) { >> 77 end_sect = end_sect - alignment; >> 78 sector_div(end_sect, granularity); >> 79 end_sect = end_sect * granularity + alignment; >> 80 req_sects = end_sect - sector; >> 81 } >> 82 >> 83 bio = next_bio(bio, 0, gfp_mask); >> 84 bio->bi_iter.bi_sector = sector; >> 85 bio->bi_bdev = bdev; >> 86 bio_set_op_attrs(bio, op, 0); >> 87 >> 88 bio->bi_iter.bi_size = req_sects << 9; >> 89 nr_sects -= req_sects; >> 90 sector = end_sect; >> 91 >> 92 /* >> 93 * We can loop for a long time in here, if someone does >> 94 * full device discards (like mkfs). Be nice and allow >> 95 * us to schedule out to avoid softlocking if preempt >> 96 * is disabled. >> 97 */ >> 98 cond_resched(); >> 99 } 67 100 68 while ((bio = blk_alloc_discard_bio(bd !! 101 *biop = bio; 69 gfp_mask))) << 70 *biop = bio_chain_and_submit(* << 71 return 0; 102 return 0; 72 } 103 } 73 EXPORT_SYMBOL(__blkdev_issue_discard); 104 EXPORT_SYMBOL(__blkdev_issue_discard); 74 105 75 /** 106 /** 76 * blkdev_issue_discard - queue a discard 107 * blkdev_issue_discard - queue a discard 77 * @bdev: blockdev to issue discard for 108 * @bdev: blockdev to issue discard for 78 * @sector: start sector 109 * @sector: start sector 79 * @nr_sects: number of sectors to discard 110 * @nr_sects: number of sectors to discard 80 * @gfp_mask: memory allocation flags (for b 111 * @gfp_mask: memory allocation flags (for bio_alloc) >> 112 * @flags: BLKDEV_IFL_* flags to control behaviour 81 * 113 * 82 * Description: 114 * Description: 83 * Issue a discard request for the sectors 115 * Issue a discard request for the sectors in question. 84 */ 116 */ 85 int blkdev_issue_discard(struct block_device * 117 int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 86 sector_t nr_sects, gfp_t gfp_m !! 118 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 87 { 119 { 88 struct bio *bio = NULL; 120 struct bio *bio = NULL; 89 struct blk_plug plug; 121 struct blk_plug plug; 90 int ret; 122 int ret; 91 123 92 blk_start_plug(&plug); 124 blk_start_plug(&plug); 93 ret = __blkdev_issue_discard(bdev, sec !! 125 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, >> 126 &bio); 94 if (!ret && bio) { 127 if (!ret && bio) { 95 ret = submit_bio_wait(bio); 128 ret = submit_bio_wait(bio); 96 if (ret == -EOPNOTSUPP) !! 129 if (ret == -EOPNOTSUPP && !(flags & BLKDEV_DISCARD_ZERO)) 97 ret = 0; 130 ret = 0; 98 bio_put(bio); 131 bio_put(bio); 99 } 132 } 100 blk_finish_plug(&plug); 133 blk_finish_plug(&plug); 101 134 102 return ret; 135 return ret; 103 } 136 } 104 EXPORT_SYMBOL(blkdev_issue_discard); 137 EXPORT_SYMBOL(blkdev_issue_discard); 105 138 106 static sector_t bio_write_zeroes_limit(struct !! 139 /** 107 { !! 140 * __blkdev_issue_write_same - generate number of bios with same page 108 sector_t bs_mask = (bdev_logical_block !! 141 * @bdev: target blockdev 109 !! 142 * @sector: start sector 110 return min(bdev_write_zeroes_sectors(b !! 143 * @nr_sects: number of sectors to write 111 (UINT_MAX >> SECTOR_SHIFT) & ~ !! 144 * @gfp_mask: memory allocation flags (for bio_alloc) 112 } !! 145 * @page: page containing data to write 113 !! 146 * @biop: pointer to anchor bio 114 /* !! 147 * 115 * There is no reliable way for the SCSI subsy !! 148 * Description: 116 * device supports a WRITE SAME operation with !! 149 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page. 117 * to media. As a result, write_zeroes is enab << 118 * disabled if a zeroing operation subsequentl << 119 * queue limit is likely to change at runtime. << 120 */ 150 */ 121 static void __blkdev_issue_write_zeroes(struct !! 151 static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 122 sector_t sector, sector_t nr_s !! 152 sector_t nr_sects, gfp_t gfp_mask, struct page *page, 123 struct bio **biop, unsigned fl !! 153 struct bio **biop) 124 { !! 154 { >> 155 struct request_queue *q = bdev_get_queue(bdev); >> 156 unsigned int max_write_same_sectors; >> 157 struct bio *bio = *biop; >> 158 sector_t bs_mask; 125 159 126 while (nr_sects) { !! 160 if (!q) 127 unsigned int len = min(nr_sect !! 161 return -ENXIO; 128 struct bio *bio; << 129 162 130 if ((flags & BLKDEV_ZERO_KILLA !! 163 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; 131 fatal_signal_pending(curre !! 164 if ((sector | nr_sects) & bs_mask) 132 break; !! 165 return -EINVAL; 133 166 134 bio = bio_alloc(bdev, 0, REQ_O !! 167 if (!bdev_write_same(bdev)) 135 bio->bi_iter.bi_sector = secto !! 168 return -EOPNOTSUPP; 136 if (flags & BLKDEV_ZERO_NOUNMA << 137 bio->bi_opf |= REQ_NOU << 138 169 139 bio->bi_iter.bi_size = len << !! 170 /* Ensure that max_write_same_sectors doesn't overflow bi_size */ 140 *biop = bio_chain_and_submit(* !! 171 max_write_same_sectors = UINT_MAX >> 9; 141 172 142 nr_sects -= len; !! 173 while (nr_sects) { 143 sector += len; !! 174 bio = next_bio(bio, 1, gfp_mask); >> 175 bio->bi_iter.bi_sector = sector; >> 176 bio->bi_bdev = bdev; >> 177 bio->bi_vcnt = 1; >> 178 bio->bi_io_vec->bv_page = page; >> 179 bio->bi_io_vec->bv_offset = 0; >> 180 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); >> 181 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); >> 182 >> 183 if (nr_sects > max_write_same_sectors) { >> 184 bio->bi_iter.bi_size = max_write_same_sectors << 9; >> 185 nr_sects -= max_write_same_sectors; >> 186 sector += max_write_same_sectors; >> 187 } else { >> 188 bio->bi_iter.bi_size = nr_sects << 9; >> 189 nr_sects = 0; >> 190 } 144 cond_resched(); 191 cond_resched(); 145 } 192 } >> 193 >> 194 *biop = bio; >> 195 return 0; 146 } 196 } 147 197 148 static int blkdev_issue_write_zeroes(struct bl !! 198 /** 149 sector_t nr_sects, gfp_t gfp, !! 199 * blkdev_issue_write_same - queue a write same operation >> 200 * @bdev: target blockdev >> 201 * @sector: start sector >> 202 * @nr_sects: number of sectors to write >> 203 * @gfp_mask: memory allocation flags (for bio_alloc) >> 204 * @page: page containing data >> 205 * >> 206 * Description: >> 207 * Issue a write same request for the sectors in question. >> 208 */ >> 209 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, >> 210 sector_t nr_sects, gfp_t gfp_mask, >> 211 struct page *page) 150 { 212 { 151 sector_t limit = bio_write_zeroes_limi << 152 struct bio *bio = NULL; 213 struct bio *bio = NULL; 153 struct blk_plug plug; 214 struct blk_plug plug; 154 int ret = 0; !! 215 int ret; 155 216 156 blk_start_plug(&plug); 217 blk_start_plug(&plug); 157 __blkdev_issue_write_zeroes(bdev, sect !! 218 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, 158 flags, limit); !! 219 &bio); 159 if (bio) { !! 220 if (ret == 0 && bio) { 160 if ((flags & BLKDEV_ZERO_KILLA << 161 fatal_signal_pending(curre << 162 bio_await_chain(bio); << 163 blk_finish_plug(&plug) << 164 return -EINTR; << 165 } << 166 ret = submit_bio_wait(bio); 221 ret = submit_bio_wait(bio); 167 bio_put(bio); 222 bio_put(bio); 168 } 223 } 169 blk_finish_plug(&plug); 224 blk_finish_plug(&plug); 170 << 171 /* << 172 * For some devices there is no non-de << 173 * WRITE ZEROES is actually supported. << 174 * on an I/O error, in which case we'l << 175 * "not supported" here. << 176 */ << 177 if (ret && !bdev_write_zeroes_sectors( << 178 return -EOPNOTSUPP; << 179 return ret; 225 return ret; 180 } 226 } >> 227 EXPORT_SYMBOL(blkdev_issue_write_same); 181 228 182 /* !! 229 /** 183 * Convert a number of 512B sectors to a numbe !! 230 * __blkdev_issue_write_zeroes - generate number of bios with WRITE ZEROES 184 * The result is limited to a number of pages !! 231 * @bdev: blockdev to issue 185 * Also make sure that the result is always at !! 232 * @sector: start sector 186 * where nr_sects is lower than the number of !! 233 * @nr_sects: number of sectors to write >> 234 * @gfp_mask: memory allocation flags (for bio_alloc) >> 235 * @biop: pointer to anchor bio >> 236 * >> 237 * Description: >> 238 * Generate and issue number of bios(REQ_OP_WRITE_ZEROES) with zerofiled pages. 187 */ 239 */ 188 static unsigned int __blkdev_sectors_to_bio_pa !! 240 static int __blkdev_issue_write_zeroes(struct block_device *bdev, 189 { << 190 sector_t pages = DIV_ROUND_UP_SECTOR_T << 191 << 192 return min(pages, (sector_t)BIO_MAX_VE << 193 } << 194 << 195 static void __blkdev_issue_zero_pages(struct b << 196 sector_t sector, sector_t nr_s 241 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, 197 struct bio **biop, unsigned in !! 242 struct bio **biop) 198 { 243 { 199 while (nr_sects) { !! 244 struct bio *bio = *biop; 200 unsigned int nr_vecs = __blkde !! 245 unsigned int max_write_zeroes_sectors; 201 struct bio *bio; !! 246 struct request_queue *q = bdev_get_queue(bdev); 202 247 203 bio = bio_alloc(bdev, nr_vecs, !! 248 if (!q) 204 bio->bi_iter.bi_sector = secto !! 249 return -ENXIO; 205 << 206 if ((flags & BLKDEV_ZERO_KILLA << 207 fatal_signal_pending(curre << 208 break; << 209 << 210 do { << 211 unsigned int len, adde << 212 << 213 len = min_t(sector_t, << 214 PAGE_SIZE, nr_ << 215 added = bio_add_page(b << 216 if (added < len) << 217 break; << 218 nr_sects -= added >> S << 219 sector += added >> SEC << 220 } while (nr_sects); << 221 250 222 *biop = bio_chain_and_submit(* !! 251 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ 223 cond_resched(); !! 252 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); 224 } << 225 } << 226 253 227 static int blkdev_issue_zero_pages(struct bloc !! 254 if (max_write_zeroes_sectors == 0) 228 sector_t nr_sects, gfp_t gfp, << 229 { << 230 struct bio *bio = NULL; << 231 struct blk_plug plug; << 232 int ret = 0; << 233 << 234 if (flags & BLKDEV_ZERO_NOFALLBACK) << 235 return -EOPNOTSUPP; 255 return -EOPNOTSUPP; 236 256 237 blk_start_plug(&plug); !! 257 while (nr_sects) { 238 __blkdev_issue_zero_pages(bdev, sector !! 258 bio = next_bio(bio, 0, gfp_mask); 239 if (bio) { !! 259 bio->bi_iter.bi_sector = sector; 240 if ((flags & BLKDEV_ZERO_KILLA !! 260 bio->bi_bdev = bdev; 241 fatal_signal_pending(curre !! 261 bio_set_op_attrs(bio, REQ_OP_WRITE_ZEROES, 0); 242 bio_await_chain(bio); !! 262 243 blk_finish_plug(&plug) !! 263 if (nr_sects > max_write_zeroes_sectors) { 244 return -EINTR; !! 264 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; >> 265 nr_sects -= max_write_zeroes_sectors; >> 266 sector += max_write_zeroes_sectors; >> 267 } else { >> 268 bio->bi_iter.bi_size = nr_sects << 9; >> 269 nr_sects = 0; 245 } 270 } 246 ret = submit_bio_wait(bio); !! 271 cond_resched(); 247 bio_put(bio); << 248 } 272 } 249 blk_finish_plug(&plug); << 250 273 251 return ret; !! 274 *biop = bio; >> 275 return 0; 252 } 276 } 253 277 254 /** 278 /** 255 * __blkdev_issue_zeroout - generate number of 279 * __blkdev_issue_zeroout - generate number of zero filed write bios 256 * @bdev: blockdev to issue 280 * @bdev: blockdev to issue 257 * @sector: start sector 281 * @sector: start sector 258 * @nr_sects: number of sectors to write 282 * @nr_sects: number of sectors to write 259 * @gfp_mask: memory allocation flags (for b 283 * @gfp_mask: memory allocation flags (for bio_alloc) 260 * @biop: pointer to anchor bio 284 * @biop: pointer to anchor bio 261 * @flags: controls detailed behavior !! 285 * @discard: discard flag 262 * 286 * 263 * Description: 287 * Description: 264 * Zero-fill a block range, either using hard !! 288 * Generate and issue number of bios with zerofiled pages. 265 * writing zeroes to the device. << 266 * << 267 * If a device is using logical block provisi << 268 * not be released if %flags contains BLKDEV_ << 269 * << 270 * If %flags contains BLKDEV_ZERO_NOFALLBACK, << 271 * -EOPNOTSUPP if no explicit hardware offloa << 272 */ 289 */ 273 int __blkdev_issue_zeroout(struct block_device 290 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 274 sector_t nr_sects, gfp_t gfp_m 291 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, 275 unsigned flags) !! 292 bool discard) 276 { 293 { 277 sector_t limit = bio_write_zeroes_limi !! 294 int ret; >> 295 int bi_size = 0; >> 296 struct bio *bio = *biop; >> 297 unsigned int sz; >> 298 sector_t bs_mask; 278 299 279 if (bdev_read_only(bdev)) !! 300 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; 280 return -EPERM; !! 301 if ((sector | nr_sects) & bs_mask) >> 302 return -EINVAL; 281 303 282 if (limit) { !! 304 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, 283 __blkdev_issue_write_zeroes(bd !! 305 biop); 284 gfp_mask, biop !! 306 if (ret == 0 || (ret && ret != -EOPNOTSUPP)) 285 } else { !! 307 goto out; 286 if (flags & BLKDEV_ZERO_NOFALL !! 308 287 return -EOPNOTSUPP; !! 309 ret = 0; 288 __blkdev_issue_zero_pages(bdev !! 310 while (nr_sects != 0) { 289 biop, flags); !! 311 bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), >> 312 gfp_mask); >> 313 bio->bi_iter.bi_sector = sector; >> 314 bio->bi_bdev = bdev; >> 315 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); >> 316 >> 317 while (nr_sects != 0) { >> 318 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); >> 319 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); >> 320 nr_sects -= bi_size >> 9; >> 321 sector += bi_size >> 9; >> 322 if (bi_size < (sz << 9)) >> 323 break; >> 324 } >> 325 cond_resched(); 290 } 326 } 291 return 0; !! 327 >> 328 *biop = bio; >> 329 out: >> 330 return ret; 292 } 331 } 293 EXPORT_SYMBOL(__blkdev_issue_zeroout); 332 EXPORT_SYMBOL(__blkdev_issue_zeroout); 294 333 295 /** 334 /** 296 * blkdev_issue_zeroout - zero-fill a block ra 335 * blkdev_issue_zeroout - zero-fill a block range 297 * @bdev: blockdev to write 336 * @bdev: blockdev to write 298 * @sector: start sector 337 * @sector: start sector 299 * @nr_sects: number of sectors to write 338 * @nr_sects: number of sectors to write 300 * @gfp_mask: memory allocation flags (for b 339 * @gfp_mask: memory allocation flags (for bio_alloc) 301 * @flags: controls detailed behavior !! 340 * @discard: whether to discard the block range 302 * 341 * 303 * Description: 342 * Description: 304 * Zero-fill a block range, either using hard !! 343 * Zero-fill a block range. If the discard flag is set and the block 305 * writing zeroes to the device. See __blkde !! 344 * device guarantees that subsequent READ operations to the block range 306 * valid values for %flags. !! 345 * in question will return zeroes, the blocks will be discarded. Should >> 346 * the discard request fail, if the discard flag is not set, or if >> 347 * discard_zeroes_data is not supported, this function will resort to >> 348 * zeroing the blocks manually, thus provisioning (allocating, >> 349 * anchoring) them. If the block device supports WRITE ZEROES or WRITE SAME >> 350 * command(s), blkdev_issue_zeroout() will use it to optimize the process of >> 351 * clearing the block range. Otherwise the zeroing will be performed >> 352 * using regular WRITE calls. 307 */ 353 */ 308 int blkdev_issue_zeroout(struct block_device * 354 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 309 sector_t nr_sects, gfp_t gfp_m !! 355 sector_t nr_sects, gfp_t gfp_mask, bool discard) 310 { 356 { 311 int ret; 357 int ret; 312 << 313 if ((sector | nr_sects) & ((bdev_logic << 314 return -EINVAL; << 315 if (bdev_read_only(bdev)) << 316 return -EPERM; << 317 << 318 if (bdev_write_zeroes_sectors(bdev)) { << 319 ret = blkdev_issue_write_zeroe << 320 gfp_mask, flag << 321 if (ret != -EOPNOTSUPP) << 322 return ret; << 323 } << 324 << 325 return blkdev_issue_zero_pages(bdev, s << 326 } << 327 EXPORT_SYMBOL(blkdev_issue_zeroout); << 328 << 329 int blkdev_issue_secure_erase(struct block_dev << 330 sector_t nr_sects, gfp_t gfp) << 331 { << 332 sector_t bs_mask = (bdev_logical_block << 333 unsigned int max_sectors = bdev_max_se << 334 struct bio *bio = NULL; 358 struct bio *bio = NULL; 335 struct blk_plug plug; 359 struct blk_plug plug; 336 int ret = 0; << 337 360 338 /* make sure that "len << SECTOR_SHIFT !! 361 if (discard) { 339 if (max_sectors > UINT_MAX >> SECTOR_S !! 362 if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 340 max_sectors = UINT_MAX >> SECT !! 363 BLKDEV_DISCARD_ZERO)) 341 max_sectors &= ~bs_mask; !! 364 return 0; >> 365 } 342 366 343 if (max_sectors == 0) !! 367 if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, 344 return -EOPNOTSUPP; !! 368 ZERO_PAGE(0))) 345 if ((sector | nr_sects) & bs_mask) !! 369 return 0; 346 return -EINVAL; << 347 if (bdev_read_only(bdev)) << 348 return -EPERM; << 349 370 350 blk_start_plug(&plug); 371 blk_start_plug(&plug); 351 while (nr_sects) { !! 372 ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, 352 unsigned int len = min_t(secto !! 373 &bio, discard); 353 !! 374 if (ret == 0 && bio) { 354 bio = blk_next_bio(bio, bdev, << 355 bio->bi_iter.bi_sector = secto << 356 bio->bi_iter.bi_size = len << << 357 << 358 sector += len; << 359 nr_sects -= len; << 360 cond_resched(); << 361 } << 362 if (bio) { << 363 ret = submit_bio_wait(bio); 375 ret = submit_bio_wait(bio); 364 bio_put(bio); 376 bio_put(bio); 365 } 377 } 366 blk_finish_plug(&plug); 378 blk_finish_plug(&plug); 367 379 368 return ret; 380 return ret; 369 } 381 } 370 EXPORT_SYMBOL(blkdev_issue_secure_erase); !! 382 EXPORT_SYMBOL(blkdev_issue_zeroout); 371 383
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.