~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-lib.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /block/blk-lib.c (Architecture m68k) and /block/blk-lib.c (Architecture i386)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2 /*                                                  2 /*
  3  * Functions related to generic helpers functi      3  * Functions related to generic helpers functions
  4  */                                                 4  */
  5 #include <linux/kernel.h>                           5 #include <linux/kernel.h>
  6 #include <linux/module.h>                           6 #include <linux/module.h>
  7 #include <linux/bio.h>                              7 #include <linux/bio.h>
  8 #include <linux/blkdev.h>                           8 #include <linux/blkdev.h>
  9 #include <linux/scatterlist.h>                      9 #include <linux/scatterlist.h>
 10                                                    10 
 11 #include "blk.h"                                   11 #include "blk.h"
 12                                                    12 
 13 static sector_t bio_discard_limit(struct block     13 static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
 14 {                                                  14 {
 15         unsigned int discard_granularity = bde     15         unsigned int discard_granularity = bdev_discard_granularity(bdev);
 16         sector_t granularity_aligned_sector;       16         sector_t granularity_aligned_sector;
 17                                                    17 
 18         if (bdev_is_partition(bdev))               18         if (bdev_is_partition(bdev))
 19                 sector += bdev->bd_start_sect;     19                 sector += bdev->bd_start_sect;
 20                                                    20 
 21         granularity_aligned_sector =               21         granularity_aligned_sector =
 22                 round_up(sector, discard_granu     22                 round_up(sector, discard_granularity >> SECTOR_SHIFT);
 23                                                    23 
 24         /*                                         24         /*
 25          * Make sure subsequent bios start ali     25          * Make sure subsequent bios start aligned to the discard granularity if
 26          * it needs to be split.                   26          * it needs to be split.
 27          */                                        27          */
 28         if (granularity_aligned_sector != sect     28         if (granularity_aligned_sector != sector)
 29                 return granularity_aligned_sec     29                 return granularity_aligned_sector - sector;
 30                                                    30 
 31         /*                                         31         /*
 32          * Align the bio size to the discard g     32          * Align the bio size to the discard granularity to make splitting the bio
 33          * at discard granularity boundaries e     33          * at discard granularity boundaries easier in the driver if needed.
 34          */                                        34          */
 35         return round_down(UINT_MAX, discard_gr     35         return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
 36 }                                                  36 }
 37                                                    37 
 38 struct bio *blk_alloc_discard_bio(struct block     38 struct bio *blk_alloc_discard_bio(struct block_device *bdev,
 39                 sector_t *sector, sector_t *nr     39                 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask)
 40 {                                                  40 {
 41         sector_t bio_sects = min(*nr_sects, bi     41         sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector));
 42         struct bio *bio;                           42         struct bio *bio;
 43                                                    43 
 44         if (!bio_sects)                            44         if (!bio_sects)
 45                 return NULL;                       45                 return NULL;
 46                                                    46 
 47         bio = bio_alloc(bdev, 0, REQ_OP_DISCAR     47         bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask);
 48         if (!bio)                                  48         if (!bio)
 49                 return NULL;                       49                 return NULL;
 50         bio->bi_iter.bi_sector = *sector;          50         bio->bi_iter.bi_sector = *sector;
 51         bio->bi_iter.bi_size = bio_sects << SE     51         bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT;
 52         *sector += bio_sects;                      52         *sector += bio_sects;
 53         *nr_sects -= bio_sects;                    53         *nr_sects -= bio_sects;
 54         /*                                         54         /*
 55          * We can loop for a long time in here     55          * We can loop for a long time in here if someone does full device
 56          * discards (like mkfs).  Be nice and      56          * discards (like mkfs).  Be nice and allow us to schedule out to avoid
 57          * softlocking if preempt is disabled.     57          * softlocking if preempt is disabled.
 58          */                                        58          */
 59         cond_resched();                            59         cond_resched();
 60         return bio;                                60         return bio;
 61 }                                                  61 }
 62                                                    62 
 63 int __blkdev_issue_discard(struct block_device     63 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 64                 sector_t nr_sects, gfp_t gfp_m     64                 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
 65 {                                                  65 {
 66         struct bio *bio;                           66         struct bio *bio;
 67                                                    67 
 68         while ((bio = blk_alloc_discard_bio(bd     68         while ((bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects,
 69                         gfp_mask)))                69                         gfp_mask)))
 70                 *biop = bio_chain_and_submit(*     70                 *biop = bio_chain_and_submit(*biop, bio);
 71         return 0;                                  71         return 0;
 72 }                                                  72 }
 73 EXPORT_SYMBOL(__blkdev_issue_discard);             73 EXPORT_SYMBOL(__blkdev_issue_discard);
 74                                                    74 
 75 /**                                                75 /**
 76  * blkdev_issue_discard - queue a discard          76  * blkdev_issue_discard - queue a discard
 77  * @bdev:       blockdev to issue discard for      77  * @bdev:       blockdev to issue discard for
 78  * @sector:     start sector                       78  * @sector:     start sector
 79  * @nr_sects:   number of sectors to discard       79  * @nr_sects:   number of sectors to discard
 80  * @gfp_mask:   memory allocation flags (for b     80  * @gfp_mask:   memory allocation flags (for bio_alloc)
 81  *                                                 81  *
 82  * Description:                                    82  * Description:
 83  *    Issue a discard request for the sectors      83  *    Issue a discard request for the sectors in question.
 84  */                                                84  */
 85 int blkdev_issue_discard(struct block_device *     85 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 86                 sector_t nr_sects, gfp_t gfp_m     86                 sector_t nr_sects, gfp_t gfp_mask)
 87 {                                                  87 {
 88         struct bio *bio = NULL;                    88         struct bio *bio = NULL;
 89         struct blk_plug plug;                      89         struct blk_plug plug;
 90         int ret;                                   90         int ret;
 91                                                    91 
 92         blk_start_plug(&plug);                     92         blk_start_plug(&plug);
 93         ret = __blkdev_issue_discard(bdev, sec     93         ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio);
 94         if (!ret && bio) {                         94         if (!ret && bio) {
 95                 ret = submit_bio_wait(bio);        95                 ret = submit_bio_wait(bio);
 96                 if (ret == -EOPNOTSUPP)            96                 if (ret == -EOPNOTSUPP)
 97                         ret = 0;                   97                         ret = 0;
 98                 bio_put(bio);                      98                 bio_put(bio);
 99         }                                          99         }
100         blk_finish_plug(&plug);                   100         blk_finish_plug(&plug);
101                                                   101 
102         return ret;                               102         return ret;
103 }                                                 103 }
104 EXPORT_SYMBOL(blkdev_issue_discard);              104 EXPORT_SYMBOL(blkdev_issue_discard);
105                                                   105 
106 static sector_t bio_write_zeroes_limit(struct     106 static sector_t bio_write_zeroes_limit(struct block_device *bdev)
107 {                                                 107 {
108         sector_t bs_mask = (bdev_logical_block    108         sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
109                                                   109 
110         return min(bdev_write_zeroes_sectors(b    110         return min(bdev_write_zeroes_sectors(bdev),
111                 (UINT_MAX >> SECTOR_SHIFT) & ~    111                 (UINT_MAX >> SECTOR_SHIFT) & ~bs_mask);
112 }                                                 112 }
113                                                   113 
114 /*                                                114 /*
115  * There is no reliable way for the SCSI subsy    115  * There is no reliable way for the SCSI subsystem to determine whether a
116  * device supports a WRITE SAME operation with    116  * device supports a WRITE SAME operation without actually performing a write
117  * to media. As a result, write_zeroes is enab    117  * to media. As a result, write_zeroes is enabled by default and will be
118  * disabled if a zeroing operation subsequentl    118  * disabled if a zeroing operation subsequently fails. This means that this
119  * queue limit is likely to change at runtime.    119  * queue limit is likely to change at runtime.
120  */                                               120  */
121 static void __blkdev_issue_write_zeroes(struct    121 static void __blkdev_issue_write_zeroes(struct block_device *bdev,
122                 sector_t sector, sector_t nr_s    122                 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
123                 struct bio **biop, unsigned fl    123                 struct bio **biop, unsigned flags, sector_t limit)
124 {                                                 124 {
125                                                   125 
126         while (nr_sects) {                        126         while (nr_sects) {
127                 unsigned int len = min(nr_sect    127                 unsigned int len = min(nr_sects, limit);
128                 struct bio *bio;                  128                 struct bio *bio;
129                                                   129 
130                 if ((flags & BLKDEV_ZERO_KILLA    130                 if ((flags & BLKDEV_ZERO_KILLABLE) &&
131                     fatal_signal_pending(curre    131                     fatal_signal_pending(current))
132                         break;                    132                         break;
133                                                   133 
134                 bio = bio_alloc(bdev, 0, REQ_O    134                 bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
135                 bio->bi_iter.bi_sector = secto    135                 bio->bi_iter.bi_sector = sector;
136                 if (flags & BLKDEV_ZERO_NOUNMA    136                 if (flags & BLKDEV_ZERO_NOUNMAP)
137                         bio->bi_opf |= REQ_NOU    137                         bio->bi_opf |= REQ_NOUNMAP;
138                                                   138 
139                 bio->bi_iter.bi_size = len <<     139                 bio->bi_iter.bi_size = len << SECTOR_SHIFT;
140                 *biop = bio_chain_and_submit(*    140                 *biop = bio_chain_and_submit(*biop, bio);
141                                                   141 
142                 nr_sects -= len;                  142                 nr_sects -= len;
143                 sector += len;                    143                 sector += len;
144                 cond_resched();                   144                 cond_resched();
145         }                                         145         }
146 }                                                 146 }
147                                                   147 
148 static int blkdev_issue_write_zeroes(struct bl    148 static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector,
149                 sector_t nr_sects, gfp_t gfp,     149                 sector_t nr_sects, gfp_t gfp, unsigned flags)
150 {                                                 150 {
151         sector_t limit = bio_write_zeroes_limi    151         sector_t limit = bio_write_zeroes_limit(bdev);
152         struct bio *bio = NULL;                   152         struct bio *bio = NULL;
153         struct blk_plug plug;                     153         struct blk_plug plug;
154         int ret = 0;                              154         int ret = 0;
155                                                   155 
156         blk_start_plug(&plug);                    156         blk_start_plug(&plug);
157         __blkdev_issue_write_zeroes(bdev, sect    157         __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio,
158                         flags, limit);            158                         flags, limit);
159         if (bio) {                                159         if (bio) {
160                 if ((flags & BLKDEV_ZERO_KILLA    160                 if ((flags & BLKDEV_ZERO_KILLABLE) &&
161                     fatal_signal_pending(curre    161                     fatal_signal_pending(current)) {
162                         bio_await_chain(bio);     162                         bio_await_chain(bio);
163                         blk_finish_plug(&plug)    163                         blk_finish_plug(&plug);
164                         return -EINTR;            164                         return -EINTR;
165                 }                                 165                 }
166                 ret = submit_bio_wait(bio);       166                 ret = submit_bio_wait(bio);
167                 bio_put(bio);                     167                 bio_put(bio);
168         }                                         168         }
169         blk_finish_plug(&plug);                   169         blk_finish_plug(&plug);
170                                                   170 
171         /*                                        171         /*
172          * For some devices there is no non-de    172          * For some devices there is no non-destructive way to verify whether
173          * WRITE ZEROES is actually supported.    173          * WRITE ZEROES is actually supported.  These will clear the capability
174          * on an I/O error, in which case we'l    174          * on an I/O error, in which case we'll turn any error into
175          * "not supported" here.                  175          * "not supported" here.
176          */                                       176          */
177         if (ret && !bdev_write_zeroes_sectors(    177         if (ret && !bdev_write_zeroes_sectors(bdev))
178                 return -EOPNOTSUPP;               178                 return -EOPNOTSUPP;
179         return ret;                               179         return ret;
180 }                                                 180 }
181                                                   181 
182 /*                                                182 /*
183  * Convert a number of 512B sectors to a numbe    183  * Convert a number of 512B sectors to a number of pages.
184  * The result is limited to a number of pages     184  * The result is limited to a number of pages that can fit into a BIO.
185  * Also make sure that the result is always at    185  * Also make sure that the result is always at least 1 (page) for the cases
186  * where nr_sects is lower than the number of     186  * where nr_sects is lower than the number of sectors in a page.
187  */                                               187  */
188 static unsigned int __blkdev_sectors_to_bio_pa    188 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
189 {                                                 189 {
190         sector_t pages = DIV_ROUND_UP_SECTOR_T    190         sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
191                                                   191 
192         return min(pages, (sector_t)BIO_MAX_VE    192         return min(pages, (sector_t)BIO_MAX_VECS);
193 }                                                 193 }
194                                                   194 
195 static void __blkdev_issue_zero_pages(struct b    195 static void __blkdev_issue_zero_pages(struct block_device *bdev,
196                 sector_t sector, sector_t nr_s    196                 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
197                 struct bio **biop, unsigned in    197                 struct bio **biop, unsigned int flags)
198 {                                                 198 {
199         while (nr_sects) {                        199         while (nr_sects) {
200                 unsigned int nr_vecs = __blkde    200                 unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects);
201                 struct bio *bio;                  201                 struct bio *bio;
202                                                   202 
203                 bio = bio_alloc(bdev, nr_vecs,    203                 bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask);
204                 bio->bi_iter.bi_sector = secto    204                 bio->bi_iter.bi_sector = sector;
205                                                   205 
206                 if ((flags & BLKDEV_ZERO_KILLA    206                 if ((flags & BLKDEV_ZERO_KILLABLE) &&
207                     fatal_signal_pending(curre    207                     fatal_signal_pending(current))
208                         break;                    208                         break;
209                                                   209 
210                 do {                              210                 do {
211                         unsigned int len, adde    211                         unsigned int len, added;
212                                                   212 
213                         len = min_t(sector_t,     213                         len = min_t(sector_t,
214                                 PAGE_SIZE, nr_    214                                 PAGE_SIZE, nr_sects << SECTOR_SHIFT);
215                         added = bio_add_page(b    215                         added = bio_add_page(bio, ZERO_PAGE(0), len, 0);
216                         if (added < len)          216                         if (added < len)
217                                 break;            217                                 break;
218                         nr_sects -= added >> S    218                         nr_sects -= added >> SECTOR_SHIFT;
219                         sector += added >> SEC    219                         sector += added >> SECTOR_SHIFT;
220                 } while (nr_sects);               220                 } while (nr_sects);
221                                                   221 
222                 *biop = bio_chain_and_submit(*    222                 *biop = bio_chain_and_submit(*biop, bio);
223                 cond_resched();                   223                 cond_resched();
224         }                                         224         }
225 }                                                 225 }
226                                                   226 
227 static int blkdev_issue_zero_pages(struct bloc    227 static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector,
228                 sector_t nr_sects, gfp_t gfp,     228                 sector_t nr_sects, gfp_t gfp, unsigned flags)
229 {                                                 229 {
230         struct bio *bio = NULL;                   230         struct bio *bio = NULL;
231         struct blk_plug plug;                     231         struct blk_plug plug;
232         int ret = 0;                              232         int ret = 0;
233                                                   233 
234         if (flags & BLKDEV_ZERO_NOFALLBACK)       234         if (flags & BLKDEV_ZERO_NOFALLBACK)
235                 return -EOPNOTSUPP;               235                 return -EOPNOTSUPP;
236                                                   236 
237         blk_start_plug(&plug);                    237         blk_start_plug(&plug);
238         __blkdev_issue_zero_pages(bdev, sector    238         __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags);
239         if (bio) {                                239         if (bio) {
240                 if ((flags & BLKDEV_ZERO_KILLA    240                 if ((flags & BLKDEV_ZERO_KILLABLE) &&
241                     fatal_signal_pending(curre    241                     fatal_signal_pending(current)) {
242                         bio_await_chain(bio);     242                         bio_await_chain(bio);
243                         blk_finish_plug(&plug)    243                         blk_finish_plug(&plug);
244                         return -EINTR;            244                         return -EINTR;
245                 }                                 245                 }
246                 ret = submit_bio_wait(bio);       246                 ret = submit_bio_wait(bio);
247                 bio_put(bio);                     247                 bio_put(bio);
248         }                                         248         }
249         blk_finish_plug(&plug);                   249         blk_finish_plug(&plug);
250                                                   250 
251         return ret;                               251         return ret;
252 }                                                 252 }
253                                                   253 
254 /**                                               254 /**
255  * __blkdev_issue_zeroout - generate number of    255  * __blkdev_issue_zeroout - generate number of zero filed write bios
256  * @bdev:       blockdev to issue                 256  * @bdev:       blockdev to issue
257  * @sector:     start sector                      257  * @sector:     start sector
258  * @nr_sects:   number of sectors to write        258  * @nr_sects:   number of sectors to write
259  * @gfp_mask:   memory allocation flags (for b    259  * @gfp_mask:   memory allocation flags (for bio_alloc)
260  * @biop:       pointer to anchor bio             260  * @biop:       pointer to anchor bio
261  * @flags:      controls detailed behavior        261  * @flags:      controls detailed behavior
262  *                                                262  *
263  * Description:                                   263  * Description:
264  *  Zero-fill a block range, either using hard    264  *  Zero-fill a block range, either using hardware offload or by explicitly
265  *  writing zeroes to the device.                 265  *  writing zeroes to the device.
266  *                                                266  *
267  *  If a device is using logical block provisi    267  *  If a device is using logical block provisioning, the underlying space will
268  *  not be released if %flags contains BLKDEV_    268  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
269  *                                                269  *
270  *  If %flags contains BLKDEV_ZERO_NOFALLBACK,    270  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
271  *  -EOPNOTSUPP if no explicit hardware offloa    271  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
272  */                                               272  */
273 int __blkdev_issue_zeroout(struct block_device    273 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
274                 sector_t nr_sects, gfp_t gfp_m    274                 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
275                 unsigned flags)                   275                 unsigned flags)
276 {                                                 276 {
277         sector_t limit = bio_write_zeroes_limi    277         sector_t limit = bio_write_zeroes_limit(bdev);
278                                                   278 
279         if (bdev_read_only(bdev))                 279         if (bdev_read_only(bdev))
280                 return -EPERM;                    280                 return -EPERM;
281                                                   281 
282         if (limit) {                              282         if (limit) {
283                 __blkdev_issue_write_zeroes(bd    283                 __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
284                                 gfp_mask, biop    284                                 gfp_mask, biop, flags, limit);
285         } else {                                  285         } else {
286                 if (flags & BLKDEV_ZERO_NOFALL    286                 if (flags & BLKDEV_ZERO_NOFALLBACK)
287                         return -EOPNOTSUPP;       287                         return -EOPNOTSUPP;
288                 __blkdev_issue_zero_pages(bdev    288                 __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
289                                 biop, flags);     289                                 biop, flags);
290         }                                         290         }
291         return 0;                                 291         return 0;
292 }                                                 292 }
293 EXPORT_SYMBOL(__blkdev_issue_zeroout);            293 EXPORT_SYMBOL(__blkdev_issue_zeroout);
294                                                   294 
295 /**                                               295 /**
296  * blkdev_issue_zeroout - zero-fill a block ra    296  * blkdev_issue_zeroout - zero-fill a block range
297  * @bdev:       blockdev to write                 297  * @bdev:       blockdev to write
298  * @sector:     start sector                      298  * @sector:     start sector
299  * @nr_sects:   number of sectors to write        299  * @nr_sects:   number of sectors to write
300  * @gfp_mask:   memory allocation flags (for b    300  * @gfp_mask:   memory allocation flags (for bio_alloc)
301  * @flags:      controls detailed behavior        301  * @flags:      controls detailed behavior
302  *                                                302  *
303  * Description:                                   303  * Description:
304  *  Zero-fill a block range, either using hard    304  *  Zero-fill a block range, either using hardware offload or by explicitly
305  *  writing zeroes to the device.  See __blkde    305  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
306  *  valid values for %flags.                      306  *  valid values for %flags.
307  */                                               307  */
308 int blkdev_issue_zeroout(struct block_device *    308 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
309                 sector_t nr_sects, gfp_t gfp_m    309                 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
310 {                                                 310 {
311         int ret;                                  311         int ret;
312                                                   312 
313         if ((sector | nr_sects) & ((bdev_logic    313         if ((sector | nr_sects) & ((bdev_logical_block_size(bdev) >> 9) - 1))
314                 return -EINVAL;                   314                 return -EINVAL;
315         if (bdev_read_only(bdev))                 315         if (bdev_read_only(bdev))
316                 return -EPERM;                    316                 return -EPERM;
317                                                   317 
318         if (bdev_write_zeroes_sectors(bdev)) {    318         if (bdev_write_zeroes_sectors(bdev)) {
319                 ret = blkdev_issue_write_zeroe    319                 ret = blkdev_issue_write_zeroes(bdev, sector, nr_sects,
320                                 gfp_mask, flag    320                                 gfp_mask, flags);
321                 if (ret != -EOPNOTSUPP)           321                 if (ret != -EOPNOTSUPP)
322                         return ret;               322                         return ret;
323         }                                         323         }
324                                                   324 
325         return blkdev_issue_zero_pages(bdev, s    325         return blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, flags);
326 }                                                 326 }
327 EXPORT_SYMBOL(blkdev_issue_zeroout);              327 EXPORT_SYMBOL(blkdev_issue_zeroout);
328                                                   328 
329 int blkdev_issue_secure_erase(struct block_dev    329 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
330                 sector_t nr_sects, gfp_t gfp)     330                 sector_t nr_sects, gfp_t gfp)
331 {                                                 331 {
332         sector_t bs_mask = (bdev_logical_block    332         sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
333         unsigned int max_sectors = bdev_max_se    333         unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev);
334         struct bio *bio = NULL;                   334         struct bio *bio = NULL;
335         struct blk_plug plug;                     335         struct blk_plug plug;
336         int ret = 0;                              336         int ret = 0;
337                                                   337 
338         /* make sure that "len << SECTOR_SHIFT    338         /* make sure that "len << SECTOR_SHIFT" doesn't overflow */
339         if (max_sectors > UINT_MAX >> SECTOR_S    339         if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
340                 max_sectors = UINT_MAX >> SECT    340                 max_sectors = UINT_MAX >> SECTOR_SHIFT;
341         max_sectors &= ~bs_mask;                  341         max_sectors &= ~bs_mask;
342                                                   342 
343         if (max_sectors == 0)                     343         if (max_sectors == 0)
344                 return -EOPNOTSUPP;               344                 return -EOPNOTSUPP;
345         if ((sector | nr_sects) & bs_mask)        345         if ((sector | nr_sects) & bs_mask)
346                 return -EINVAL;                   346                 return -EINVAL;
347         if (bdev_read_only(bdev))                 347         if (bdev_read_only(bdev))
348                 return -EPERM;                    348                 return -EPERM;
349                                                   349 
350         blk_start_plug(&plug);                    350         blk_start_plug(&plug);
351         while (nr_sects) {                        351         while (nr_sects) {
352                 unsigned int len = min_t(secto    352                 unsigned int len = min_t(sector_t, nr_sects, max_sectors);
353                                                   353 
354                 bio = blk_next_bio(bio, bdev,     354                 bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
355                 bio->bi_iter.bi_sector = secto    355                 bio->bi_iter.bi_sector = sector;
356                 bio->bi_iter.bi_size = len <<     356                 bio->bi_iter.bi_size = len << SECTOR_SHIFT;
357                                                   357 
358                 sector += len;                    358                 sector += len;
359                 nr_sects -= len;                  359                 nr_sects -= len;
360                 cond_resched();                   360                 cond_resched();
361         }                                         361         }
362         if (bio) {                                362         if (bio) {
363                 ret = submit_bio_wait(bio);       363                 ret = submit_bio_wait(bio);
364                 bio_put(bio);                     364                 bio_put(bio);
365         }                                         365         }
366         blk_finish_plug(&plug);                   366         blk_finish_plug(&plug);
367                                                   367 
368         return ret;                               368         return ret;
369 }                                                 369 }
370 EXPORT_SYMBOL(blkdev_issue_secure_erase);         370 EXPORT_SYMBOL(blkdev_issue_secure_erase);
371                                                   371 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php