1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * Copyright (C) 2001 Jens Axboe <axboe@suse.d 3 * Copyright (C) 2001 Jens Axboe <axboe@suse.de> 4 */ 4 */ 5 #ifndef __LINUX_BIO_H 5 #ifndef __LINUX_BIO_H 6 #define __LINUX_BIO_H 6 #define __LINUX_BIO_H 7 7 8 #include <linux/mempool.h> 8 #include <linux/mempool.h> 9 /* struct bio, bio_vec and BIO_* flags are def 9 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 10 #include <linux/blk_types.h> 10 #include <linux/blk_types.h> 11 #include <linux/uio.h> 11 #include <linux/uio.h> 12 12 13 #define BIO_MAX_VECS 256U 13 #define BIO_MAX_VECS 256U 14 14 15 struct queue_limits; 15 struct queue_limits; 16 16 17 static inline unsigned int bio_max_segs(unsign 17 static inline unsigned int bio_max_segs(unsigned int nr_segs) 18 { 18 { 19 return min(nr_segs, BIO_MAX_VECS); 19 return min(nr_segs, BIO_MAX_VECS); 20 } 20 } 21 21 22 #define bio_prio(bio) (bio)- 22 #define bio_prio(bio) (bio)->bi_ioprio 23 #define bio_set_prio(bio, prio) ((bio) 23 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) 24 24 25 #define bio_iter_iovec(bio, iter) 25 #define bio_iter_iovec(bio, iter) \ 26 bvec_iter_bvec((bio)->bi_io_vec, (iter 26 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 27 27 28 #define bio_iter_page(bio, iter) 28 #define bio_iter_page(bio, iter) \ 29 bvec_iter_page((bio)->bi_io_vec, (iter 29 bvec_iter_page((bio)->bi_io_vec, (iter)) 30 #define bio_iter_len(bio, iter) 30 #define bio_iter_len(bio, iter) \ 31 bvec_iter_len((bio)->bi_io_vec, (iter) 31 bvec_iter_len((bio)->bi_io_vec, (iter)) 32 #define bio_iter_offset(bio, iter) 32 #define bio_iter_offset(bio, iter) \ 33 bvec_iter_offset((bio)->bi_io_vec, (it 33 bvec_iter_offset((bio)->bi_io_vec, (iter)) 34 34 35 #define bio_page(bio) bio_iter_page( 35 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) 36 #define bio_offset(bio) bio_iter_offse 36 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) 37 #define bio_iovec(bio) bio_iter_iovec 37 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) 38 38 39 #define bvec_iter_sectors(iter) ((iter).bi_siz 39 #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) 40 #define bvec_iter_end_sector(iter) ((iter).bi_ 40 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) 41 41 42 #define bio_sectors(bio) bvec_iter_sect 42 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) 43 #define bio_end_sector(bio) bvec_iter_end_ 43 #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) 44 44 45 /* 45 /* 46 * Return the data direction, READ or WRITE. 46 * Return the data direction, READ or WRITE. 47 */ 47 */ 48 #define bio_data_dir(bio) \ 48 #define bio_data_dir(bio) \ 49 (op_is_write(bio_op(bio)) ? WRITE : RE 49 (op_is_write(bio_op(bio)) ? WRITE : READ) 50 50 51 /* 51 /* 52 * Check whether this bio carries any data or 52 * Check whether this bio carries any data or not. A NULL bio is allowed. 53 */ 53 */ 54 static inline bool bio_has_data(struct bio *bi 54 static inline bool bio_has_data(struct bio *bio) 55 { 55 { 56 if (bio && 56 if (bio && 57 bio->bi_iter.bi_size && 57 bio->bi_iter.bi_size && 58 bio_op(bio) != REQ_OP_DISCARD && 58 bio_op(bio) != REQ_OP_DISCARD && 59 bio_op(bio) != REQ_OP_SECURE_ERASE 59 bio_op(bio) != REQ_OP_SECURE_ERASE && 60 bio_op(bio) != REQ_OP_WRITE_ZEROES 60 bio_op(bio) != REQ_OP_WRITE_ZEROES) 61 return true; 61 return true; 62 62 63 return false; 63 return false; 64 } 64 } 65 65 66 static inline bool bio_no_advance_iter(const s 66 static inline bool bio_no_advance_iter(const struct bio *bio) 67 { 67 { 68 return bio_op(bio) == REQ_OP_DISCARD | 68 return bio_op(bio) == REQ_OP_DISCARD || 69 bio_op(bio) == REQ_OP_SECURE_ER 69 bio_op(bio) == REQ_OP_SECURE_ERASE || 70 bio_op(bio) == REQ_OP_WRITE_ZER 70 bio_op(bio) == REQ_OP_WRITE_ZEROES; 71 } 71 } 72 72 73 static inline void *bio_data(struct bio *bio) 73 static inline void *bio_data(struct bio *bio) 74 { 74 { 75 if (bio_has_data(bio)) 75 if (bio_has_data(bio)) 76 return page_address(bio_page(b 76 return page_address(bio_page(bio)) + bio_offset(bio); 77 77 78 return NULL; 78 return NULL; 79 } 79 } 80 80 81 static inline bool bio_next_segment(const stru 81 static inline bool bio_next_segment(const struct bio *bio, 82 struct bve 82 struct bvec_iter_all *iter) 83 { 83 { 84 if (iter->idx >= bio->bi_vcnt) 84 if (iter->idx >= bio->bi_vcnt) 85 return false; 85 return false; 86 86 87 bvec_advance(&bio->bi_io_vec[iter->idx 87 bvec_advance(&bio->bi_io_vec[iter->idx], iter); 88 return true; 88 return true; 89 } 89 } 90 90 91 /* 91 /* 92 * drivers should _never_ use the all version 92 * drivers should _never_ use the all version - the bio may have been split 93 * before it got to the driver and the driver 93 * before it got to the driver and the driver won't own all of it 94 */ 94 */ 95 #define bio_for_each_segment_all(bvl, bio, ite 95 #define bio_for_each_segment_all(bvl, bio, iter) \ 96 for (bvl = bvec_init_iter_all(&iter); 96 for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) 97 97 98 static inline void bio_advance_iter(const stru 98 static inline void bio_advance_iter(const struct bio *bio, 99 struct bve 99 struct bvec_iter *iter, unsigned int bytes) 100 { 100 { 101 iter->bi_sector += bytes >> 9; 101 iter->bi_sector += bytes >> 9; 102 102 103 if (bio_no_advance_iter(bio)) 103 if (bio_no_advance_iter(bio)) 104 iter->bi_size -= bytes; 104 iter->bi_size -= bytes; 105 else 105 else 106 bvec_iter_advance(bio->bi_io_v 106 bvec_iter_advance(bio->bi_io_vec, iter, bytes); 107 /* TODO: It is reasonable to c 107 /* TODO: It is reasonable to complete bio with error here. */ 108 } 108 } 109 109 110 /* @bytes should be less or equal to bvec[i->b 110 /* @bytes should be less or equal to bvec[i->bi_idx].bv_len */ 111 static inline void bio_advance_iter_single(con 111 static inline void bio_advance_iter_single(const struct bio *bio, 112 str 112 struct bvec_iter *iter, 113 uns 113 unsigned int bytes) 114 { 114 { 115 iter->bi_sector += bytes >> 9; 115 iter->bi_sector += bytes >> 9; 116 116 117 if (bio_no_advance_iter(bio)) 117 if (bio_no_advance_iter(bio)) 118 iter->bi_size -= bytes; 118 iter->bi_size -= bytes; 119 else 119 else 120 bvec_iter_advance_single(bio-> 120 bvec_iter_advance_single(bio->bi_io_vec, iter, bytes); 121 } 121 } 122 122 123 void __bio_advance(struct bio *, unsigned byte 123 void __bio_advance(struct bio *, unsigned bytes); 124 124 125 /** 125 /** 126 * bio_advance - increment/complete a bio by s 126 * bio_advance - increment/complete a bio by some number of bytes 127 * @bio: bio to advance 127 * @bio: bio to advance 128 * @nbytes: number of bytes to complete 128 * @nbytes: number of bytes to complete 129 * 129 * 130 * This updates bi_sector, bi_size and bi_idx; 130 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 131 * complete doesn't align with a bvec boundary 131 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 132 * be updated on the last bvec as well. 132 * be updated on the last bvec as well. 133 * 133 * 134 * @bio will then represent the remaining, unc 134 * @bio will then represent the remaining, uncompleted portion of the io. 135 */ 135 */ 136 static inline void bio_advance(struct bio *bio 136 static inline void bio_advance(struct bio *bio, unsigned int nbytes) 137 { 137 { 138 if (nbytes == bio->bi_iter.bi_size) { 138 if (nbytes == bio->bi_iter.bi_size) { 139 bio->bi_iter.bi_size = 0; 139 bio->bi_iter.bi_size = 0; 140 return; 140 return; 141 } 141 } 142 __bio_advance(bio, nbytes); 142 __bio_advance(bio, nbytes); 143 } 143 } 144 144 145 #define __bio_for_each_segment(bvl, bio, iter, 145 #define __bio_for_each_segment(bvl, bio, iter, start) \ 146 for (iter = (start); 146 for (iter = (start); \ 147 (iter).bi_size && 147 (iter).bi_size && \ 148 ((bvl = bio_iter_iovec((bio), 148 ((bvl = bio_iter_iovec((bio), (iter))), 1); \ 149 bio_advance_iter_single((bio), &( 149 bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) 150 150 151 #define bio_for_each_segment(bvl, bio, iter) 151 #define bio_for_each_segment(bvl, bio, iter) \ 152 __bio_for_each_segment(bvl, bio, iter, 152 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 153 153 154 #define __bio_for_each_bvec(bvl, bio, iter, st 154 #define __bio_for_each_bvec(bvl, bio, iter, start) \ 155 for (iter = (start); 155 for (iter = (start); \ 156 (iter).bi_size && 156 (iter).bi_size && \ 157 ((bvl = mp_bvec_iter_bvec((bio 157 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ 158 bio_advance_iter_single((bio), &( 158 bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) 159 159 160 /* iterate over multi-page bvec */ 160 /* iterate over multi-page bvec */ 161 #define bio_for_each_bvec(bvl, bio, iter) 161 #define bio_for_each_bvec(bvl, bio, iter) \ 162 __bio_for_each_bvec(bvl, bio, iter, (b 162 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) 163 163 164 /* 164 /* 165 * Iterate over all multi-page bvecs. Drivers 165 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the 166 * same reasons as bio_for_each_segment_all(). 166 * same reasons as bio_for_each_segment_all(). 167 */ 167 */ 168 #define bio_for_each_bvec_all(bvl, bio, i) 168 #define bio_for_each_bvec_all(bvl, bio, i) \ 169 for (i = 0, bvl = bio_first_bvec_all(b 169 for (i = 0, bvl = bio_first_bvec_all(bio); \ 170 i < (bio)->bi_vcnt; i++, bvl++) 170 i < (bio)->bi_vcnt; i++, bvl++) 171 171 172 #define bio_iter_last(bvec, iter) ((iter).bi_s 172 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 173 173 174 static inline unsigned bio_segments(struct bio 174 static inline unsigned bio_segments(struct bio *bio) 175 { 175 { 176 unsigned segs = 0; 176 unsigned segs = 0; 177 struct bio_vec bv; 177 struct bio_vec bv; 178 struct bvec_iter iter; 178 struct bvec_iter iter; 179 179 180 /* 180 /* 181 * We special case discard/write same/ 181 * We special case discard/write same/write zeroes, because they 182 * interpret bi_size differently: 182 * interpret bi_size differently: 183 */ 183 */ 184 184 185 switch (bio_op(bio)) { 185 switch (bio_op(bio)) { 186 case REQ_OP_DISCARD: 186 case REQ_OP_DISCARD: 187 case REQ_OP_SECURE_ERASE: 187 case REQ_OP_SECURE_ERASE: 188 case REQ_OP_WRITE_ZEROES: 188 case REQ_OP_WRITE_ZEROES: 189 return 0; 189 return 0; 190 default: 190 default: 191 break; 191 break; 192 } 192 } 193 193 194 bio_for_each_segment(bv, bio, iter) 194 bio_for_each_segment(bv, bio, iter) 195 segs++; 195 segs++; 196 196 197 return segs; 197 return segs; 198 } 198 } 199 199 200 /* 200 /* 201 * get a reference to a bio, so it won't disap 201 * get a reference to a bio, so it won't disappear. the intended use is 202 * something like: 202 * something like: 203 * 203 * 204 * bio_get(bio); 204 * bio_get(bio); 205 * submit_bio(rw, bio); 205 * submit_bio(rw, bio); 206 * if (bio->bi_flags ...) 206 * if (bio->bi_flags ...) 207 * do_something 207 * do_something 208 * bio_put(bio); 208 * bio_put(bio); 209 * 209 * 210 * without the bio_get(), it could potentially 210 * without the bio_get(), it could potentially complete I/O before submit_bio 211 * returns. and then bio would be freed memory 211 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 212 * runs 212 * runs 213 */ 213 */ 214 static inline void bio_get(struct bio *bio) 214 static inline void bio_get(struct bio *bio) 215 { 215 { 216 bio->bi_flags |= (1 << BIO_REFFED); 216 bio->bi_flags |= (1 << BIO_REFFED); 217 smp_mb__before_atomic(); 217 smp_mb__before_atomic(); 218 atomic_inc(&bio->__bi_cnt); 218 atomic_inc(&bio->__bi_cnt); 219 } 219 } 220 220 221 static inline void bio_cnt_set(struct bio *bio 221 static inline void bio_cnt_set(struct bio *bio, unsigned int count) 222 { 222 { 223 if (count != 1) { 223 if (count != 1) { 224 bio->bi_flags |= (1 << BIO_REF 224 bio->bi_flags |= (1 << BIO_REFFED); 225 smp_mb(); 225 smp_mb(); 226 } 226 } 227 atomic_set(&bio->__bi_cnt, count); 227 atomic_set(&bio->__bi_cnt, count); 228 } 228 } 229 229 230 static inline bool bio_flagged(struct bio *bio 230 static inline bool bio_flagged(struct bio *bio, unsigned int bit) 231 { 231 { 232 return bio->bi_flags & (1U << bit); !! 232 return (bio->bi_flags & (1U << bit)) != 0; 233 } 233 } 234 234 235 static inline void bio_set_flag(struct bio *bi 235 static inline void bio_set_flag(struct bio *bio, unsigned int bit) 236 { 236 { 237 bio->bi_flags |= (1U << bit); 237 bio->bi_flags |= (1U << bit); 238 } 238 } 239 239 240 static inline void bio_clear_flag(struct bio * 240 static inline void bio_clear_flag(struct bio *bio, unsigned int bit) 241 { 241 { 242 bio->bi_flags &= ~(1U << bit); 242 bio->bi_flags &= ~(1U << bit); 243 } 243 } 244 244 245 static inline struct bio_vec *bio_first_bvec_a 245 static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) 246 { 246 { 247 WARN_ON_ONCE(bio_flagged(bio, BIO_CLON 247 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 248 return bio->bi_io_vec; 248 return bio->bi_io_vec; 249 } 249 } 250 250 251 static inline struct page *bio_first_page_all( 251 static inline struct page *bio_first_page_all(struct bio *bio) 252 { 252 { 253 return bio_first_bvec_all(bio)->bv_pag 253 return bio_first_bvec_all(bio)->bv_page; 254 } 254 } 255 255 256 static inline struct folio *bio_first_folio_al << 257 { << 258 return page_folio(bio_first_page_all(b << 259 } << 260 << 261 static inline struct bio_vec *bio_last_bvec_al 256 static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) 262 { 257 { 263 WARN_ON_ONCE(bio_flagged(bio, BIO_CLON 258 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 264 return &bio->bi_io_vec[bio->bi_vcnt - 259 return &bio->bi_io_vec[bio->bi_vcnt - 1]; 265 } 260 } 266 261 267 /** 262 /** 268 * struct folio_iter - State for iterating all 263 * struct folio_iter - State for iterating all folios in a bio. 269 * @folio: The current folio we're iterating. 264 * @folio: The current folio we're iterating. NULL after the last folio. 270 * @offset: The byte offset within the current 265 * @offset: The byte offset within the current folio. 271 * @length: The number of bytes in this iterat 266 * @length: The number of bytes in this iteration (will not cross folio 272 * boundary). 267 * boundary). 273 */ 268 */ 274 struct folio_iter { 269 struct folio_iter { 275 struct folio *folio; 270 struct folio *folio; 276 size_t offset; 271 size_t offset; 277 size_t length; 272 size_t length; 278 /* private: for use by the iterator */ 273 /* private: for use by the iterator */ 279 struct folio *_next; 274 struct folio *_next; 280 size_t _seg_count; 275 size_t _seg_count; 281 int _i; 276 int _i; 282 }; 277 }; 283 278 284 static inline void bio_first_folio(struct foli 279 static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio, 285 int i) 280 int i) 286 { 281 { 287 struct bio_vec *bvec = bio_first_bvec_ 282 struct bio_vec *bvec = bio_first_bvec_all(bio) + i; 288 283 289 if (unlikely(i >= bio->bi_vcnt)) { << 290 fi->folio = NULL; << 291 return; << 292 } << 293 << 294 fi->folio = page_folio(bvec->bv_page); 284 fi->folio = page_folio(bvec->bv_page); 295 fi->offset = bvec->bv_offset + 285 fi->offset = bvec->bv_offset + 296 PAGE_SIZE * (bvec->bv_ 286 PAGE_SIZE * (bvec->bv_page - &fi->folio->page); 297 fi->_seg_count = bvec->bv_len; 287 fi->_seg_count = bvec->bv_len; 298 fi->length = min(folio_size(fi->folio) 288 fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count); 299 fi->_next = folio_next(fi->folio); 289 fi->_next = folio_next(fi->folio); 300 fi->_i = i; 290 fi->_i = i; 301 } 291 } 302 292 303 static inline void bio_next_folio(struct folio 293 static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio) 304 { 294 { 305 fi->_seg_count -= fi->length; 295 fi->_seg_count -= fi->length; 306 if (fi->_seg_count) { 296 if (fi->_seg_count) { 307 fi->folio = fi->_next; 297 fi->folio = fi->_next; 308 fi->offset = 0; 298 fi->offset = 0; 309 fi->length = min(folio_size(fi 299 fi->length = min(folio_size(fi->folio), fi->_seg_count); 310 fi->_next = folio_next(fi->fol 300 fi->_next = folio_next(fi->folio); 311 } else { !! 301 } else if (fi->_i + 1 < bio->bi_vcnt) { 312 bio_first_folio(fi, bio, fi->_ 302 bio_first_folio(fi, bio, fi->_i + 1); >> 303 } else { >> 304 fi->folio = NULL; 313 } 305 } 314 } 306 } 315 307 316 /** 308 /** 317 * bio_for_each_folio_all - Iterate over each 309 * bio_for_each_folio_all - Iterate over each folio in a bio. 318 * @fi: struct folio_iter which is updated for 310 * @fi: struct folio_iter which is updated for each folio. 319 * @bio: struct bio to iterate over. 311 * @bio: struct bio to iterate over. 320 */ 312 */ 321 #define bio_for_each_folio_all(fi, bio) 313 #define bio_for_each_folio_all(fi, bio) \ 322 for (bio_first_folio(&fi, bio, 0); fi. 314 for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio)) 323 315 >> 316 enum bip_flags { >> 317 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ >> 318 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ >> 319 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ >> 320 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ >> 321 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ >> 322 }; >> 323 >> 324 /* >> 325 * bio integrity payload >> 326 */ >> 327 struct bio_integrity_payload { >> 328 struct bio *bip_bio; /* parent bio */ >> 329 >> 330 struct bvec_iter bip_iter; >> 331 >> 332 unsigned short bip_vcnt; /* # of integrity bio_vecs */ >> 333 unsigned short bip_max_vcnt; /* integrity bio_vec slots */ >> 334 unsigned short bip_flags; /* control flags */ >> 335 >> 336 struct bvec_iter bio_iter; /* for rewinding parent bio */ >> 337 >> 338 struct work_struct bip_work; /* I/O completion */ >> 339 >> 340 struct bio_vec *bip_vec; >> 341 struct bio_vec bip_inline_vecs[];/* embedded bvec array */ >> 342 }; >> 343 >> 344 #if defined(CONFIG_BLK_DEV_INTEGRITY) >> 345 >> 346 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) >> 347 { >> 348 if (bio->bi_opf & REQ_INTEGRITY) >> 349 return bio->bi_integrity; >> 350 >> 351 return NULL; >> 352 } >> 353 >> 354 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) >> 355 { >> 356 struct bio_integrity_payload *bip = bio_integrity(bio); >> 357 >> 358 if (bip) >> 359 return bip->bip_flags & flag; >> 360 >> 361 return false; >> 362 } >> 363 >> 364 static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) >> 365 { >> 366 return bip->bip_iter.bi_sector; >> 367 } >> 368 >> 369 static inline void bip_set_seed(struct bio_integrity_payload *bip, >> 370 sector_t seed) >> 371 { >> 372 bip->bip_iter.bi_sector = seed; >> 373 } >> 374 >> 375 #endif /* CONFIG_BLK_DEV_INTEGRITY */ >> 376 324 void bio_trim(struct bio *bio, sector_t offset 377 void bio_trim(struct bio *bio, sector_t offset, sector_t size); 325 extern struct bio *bio_split(struct bio *bio, 378 extern struct bio *bio_split(struct bio *bio, int sectors, 326 gfp_t gfp, struct 379 gfp_t gfp, struct bio_set *bs); 327 int bio_split_rw_at(struct bio *bio, const str !! 380 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, 328 unsigned *segs, unsigned max_b !! 381 unsigned *segs, struct bio_set *bs, unsigned max_bytes); 329 382 330 /** 383 /** 331 * bio_next_split - get next @sectors from a b 384 * bio_next_split - get next @sectors from a bio, splitting if necessary 332 * @bio: bio to split 385 * @bio: bio to split 333 * @sectors: number of sectors to split fro 386 * @sectors: number of sectors to split from the front of @bio 334 * @gfp: gfp mask 387 * @gfp: gfp mask 335 * @bs: bio set to allocate from 388 * @bs: bio set to allocate from 336 * 389 * 337 * Return: a bio representing the next @sector 390 * Return: a bio representing the next @sectors of @bio - if the bio is smaller 338 * than @sectors, returns the original bio unc 391 * than @sectors, returns the original bio unchanged. 339 */ 392 */ 340 static inline struct bio *bio_next_split(struc 393 static inline struct bio *bio_next_split(struct bio *bio, int sectors, 341 gfp_t 394 gfp_t gfp, struct bio_set *bs) 342 { 395 { 343 if (sectors >= bio_sectors(bio)) 396 if (sectors >= bio_sectors(bio)) 344 return bio; 397 return bio; 345 398 346 return bio_split(bio, sectors, gfp, bs 399 return bio_split(bio, sectors, gfp, bs); 347 } 400 } 348 401 349 enum { 402 enum { 350 BIOSET_NEED_BVECS = BIT(0), 403 BIOSET_NEED_BVECS = BIT(0), 351 BIOSET_NEED_RESCUER = BIT(1), 404 BIOSET_NEED_RESCUER = BIT(1), 352 BIOSET_PERCPU_CACHE = BIT(2), 405 BIOSET_PERCPU_CACHE = BIT(2), 353 }; 406 }; 354 extern int bioset_init(struct bio_set *, unsig 407 extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); 355 extern void bioset_exit(struct bio_set *); 408 extern void bioset_exit(struct bio_set *); 356 extern int biovec_init_pool(mempool_t *pool, i 409 extern int biovec_init_pool(mempool_t *pool, int pool_entries); 357 410 358 struct bio *bio_alloc_bioset(struct block_devi 411 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, 359 blk_opf_t opf, gf 412 blk_opf_t opf, gfp_t gfp_mask, 360 struct bio_set *b 413 struct bio_set *bs); 361 struct bio *bio_kmalloc(unsigned short nr_vecs 414 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask); 362 extern void bio_put(struct bio *); 415 extern void bio_put(struct bio *); 363 416 364 struct bio *bio_alloc_clone(struct block_devic 417 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, 365 gfp_t gfp, struct bio_set *bs) 418 gfp_t gfp, struct bio_set *bs); 366 int bio_init_clone(struct block_device *bdev, 419 int bio_init_clone(struct block_device *bdev, struct bio *bio, 367 struct bio *bio_src, gfp_t gfp 420 struct bio *bio_src, gfp_t gfp); 368 421 369 extern struct bio_set fs_bio_set; 422 extern struct bio_set fs_bio_set; 370 423 371 static inline struct bio *bio_alloc(struct blo 424 static inline struct bio *bio_alloc(struct block_device *bdev, 372 unsigned short nr_vecs, blk_op 425 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask) 373 { 426 { 374 return bio_alloc_bioset(bdev, nr_vecs, 427 return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set); 375 } 428 } 376 429 377 void submit_bio(struct bio *bio); 430 void submit_bio(struct bio *bio); 378 431 379 extern void bio_endio(struct bio *); 432 extern void bio_endio(struct bio *); 380 433 381 static inline void bio_io_error(struct bio *bi 434 static inline void bio_io_error(struct bio *bio) 382 { 435 { 383 bio->bi_status = BLK_STS_IOERR; 436 bio->bi_status = BLK_STS_IOERR; 384 bio_endio(bio); 437 bio_endio(bio); 385 } 438 } 386 439 387 static inline void bio_wouldblock_error(struct 440 static inline void bio_wouldblock_error(struct bio *bio) 388 { 441 { 389 bio_set_flag(bio, BIO_QUIET); 442 bio_set_flag(bio, BIO_QUIET); 390 bio->bi_status = BLK_STS_AGAIN; 443 bio->bi_status = BLK_STS_AGAIN; 391 bio_endio(bio); 444 bio_endio(bio); 392 } 445 } 393 446 394 /* 447 /* 395 * Calculate number of bvec segments that shou 448 * Calculate number of bvec segments that should be allocated to fit data 396 * pointed by @iter. If @iter is backed by bve 449 * pointed by @iter. If @iter is backed by bvec it's going to be reused 397 * instead of allocating a new one. 450 * instead of allocating a new one. 398 */ 451 */ 399 static inline int bio_iov_vecs_to_alloc(struct 452 static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) 400 { 453 { 401 if (iov_iter_is_bvec(iter)) 454 if (iov_iter_is_bvec(iter)) 402 return 0; 455 return 0; 403 return iov_iter_npages(iter, max_segs) 456 return iov_iter_npages(iter, max_segs); 404 } 457 } 405 458 406 struct request_queue; 459 struct request_queue; 407 460 408 extern int submit_bio_wait(struct bio *bio); 461 extern int submit_bio_wait(struct bio *bio); 409 void bio_init(struct bio *bio, struct block_de 462 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, 410 unsigned short max_vecs, blk_opf 463 unsigned short max_vecs, blk_opf_t opf); 411 extern void bio_uninit(struct bio *); 464 extern void bio_uninit(struct bio *); 412 void bio_reset(struct bio *bio, struct block_d 465 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf); 413 void bio_chain(struct bio *, struct bio *); 466 void bio_chain(struct bio *, struct bio *); 414 467 415 int __must_check bio_add_page(struct bio *bio, !! 468 int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off); 416 unsigned off); !! 469 bool bio_add_folio(struct bio *, struct folio *, size_t len, size_t off); 417 bool __must_check bio_add_folio(struct bio *bi << 418 size_t len, si << 419 extern int bio_add_pc_page(struct request_queu 470 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 420 unsigned int, unsig 471 unsigned int, unsigned int); 421 int bio_add_zone_append_page(struct bio *bio, 472 int bio_add_zone_append_page(struct bio *bio, struct page *page, 422 unsigned int len, 473 unsigned int len, unsigned int offset); 423 void __bio_add_page(struct bio *bio, struct pa 474 void __bio_add_page(struct bio *bio, struct page *page, 424 unsigned int len, unsigned int 475 unsigned int len, unsigned int off); 425 void bio_add_folio_nofail(struct bio *bio, str << 426 size_t off); << 427 int bio_iov_iter_get_pages(struct bio *bio, st 476 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); 428 void bio_iov_bvec_set(struct bio *bio, struct 477 void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter); 429 void __bio_release_pages(struct bio *bio, bool 478 void __bio_release_pages(struct bio *bio, bool mark_dirty); 430 extern void bio_set_pages_dirty(struct bio *bi 479 extern void bio_set_pages_dirty(struct bio *bio); 431 extern void bio_check_pages_dirty(struct bio * 480 extern void bio_check_pages_dirty(struct bio *bio); 432 481 433 extern void bio_copy_data_iter(struct bio *dst 482 extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, 434 struct bio *src 483 struct bio *src, struct bvec_iter *src_iter); 435 extern void bio_copy_data(struct bio *dst, str 484 extern void bio_copy_data(struct bio *dst, struct bio *src); 436 extern void bio_free_pages(struct bio *bio); 485 extern void bio_free_pages(struct bio *bio); 437 void guard_bio_eod(struct bio *bio); 486 void guard_bio_eod(struct bio *bio); 438 void zero_fill_bio_iter(struct bio *bio, struc !! 487 void zero_fill_bio(struct bio *bio); 439 << 440 static inline void zero_fill_bio(struct bio *b << 441 { << 442 zero_fill_bio_iter(bio, bio->bi_iter); << 443 } << 444 488 445 static inline void bio_release_pages(struct bi 489 static inline void bio_release_pages(struct bio *bio, bool mark_dirty) 446 { 490 { 447 if (bio_flagged(bio, BIO_PAGE_PINNED)) !! 491 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) 448 __bio_release_pages(bio, mark_ 492 __bio_release_pages(bio, mark_dirty); 449 } 493 } 450 494 451 #define bio_dev(bio) \ 495 #define bio_dev(bio) \ 452 disk_devt((bio)->bi_bdev->bd_disk) 496 disk_devt((bio)->bi_bdev->bd_disk) 453 497 454 #ifdef CONFIG_BLK_CGROUP 498 #ifdef CONFIG_BLK_CGROUP 455 void bio_associate_blkg(struct bio *bio); 499 void bio_associate_blkg(struct bio *bio); 456 void bio_associate_blkg_from_css(struct bio *b 500 void bio_associate_blkg_from_css(struct bio *bio, 457 struct cgroup 501 struct cgroup_subsys_state *css); 458 void bio_clone_blkg_association(struct bio *ds 502 void bio_clone_blkg_association(struct bio *dst, struct bio *src); 459 void blkcg_punt_bio_submit(struct bio *bio); << 460 #else /* CONFIG_BLK_CGROUP */ 503 #else /* CONFIG_BLK_CGROUP */ 461 static inline void bio_associate_blkg(struct b 504 static inline void bio_associate_blkg(struct bio *bio) { } 462 static inline void bio_associate_blkg_from_css 505 static inline void bio_associate_blkg_from_css(struct bio *bio, 463 506 struct cgroup_subsys_state *css) 464 { } 507 { } 465 static inline void bio_clone_blkg_association( 508 static inline void bio_clone_blkg_association(struct bio *dst, 466 509 struct bio *src) { } 467 static inline void blkcg_punt_bio_submit(struc << 468 { << 469 submit_bio(bio); << 470 } << 471 #endif /* CONFIG_BLK_CGROUP */ 510 #endif /* CONFIG_BLK_CGROUP */ 472 511 473 static inline void bio_set_dev(struct bio *bio 512 static inline void bio_set_dev(struct bio *bio, struct block_device *bdev) 474 { 513 { 475 bio_clear_flag(bio, BIO_REMAPPED); 514 bio_clear_flag(bio, BIO_REMAPPED); 476 if (bio->bi_bdev != bdev) 515 if (bio->bi_bdev != bdev) 477 bio_clear_flag(bio, BIO_BPS_TH 516 bio_clear_flag(bio, BIO_BPS_THROTTLED); 478 bio->bi_bdev = bdev; 517 bio->bi_bdev = bdev; 479 bio_associate_blkg(bio); 518 bio_associate_blkg(bio); 480 } 519 } 481 520 482 /* 521 /* 483 * BIO list management for use by remapping dr 522 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 484 * 523 * 485 * A bio_list anchors a singly-linked list of 524 * A bio_list anchors a singly-linked list of bios chained through the bi_next 486 * member of the bio. The bio_list also cache 525 * member of the bio. The bio_list also caches the last list member to allow 487 * fast access to the tail. 526 * fast access to the tail. 488 */ 527 */ 489 struct bio_list { 528 struct bio_list { 490 struct bio *head; 529 struct bio *head; 491 struct bio *tail; 530 struct bio *tail; 492 }; 531 }; 493 532 494 static inline int bio_list_empty(const struct 533 static inline int bio_list_empty(const struct bio_list *bl) 495 { 534 { 496 return bl->head == NULL; 535 return bl->head == NULL; 497 } 536 } 498 537 499 static inline void bio_list_init(struct bio_li 538 static inline void bio_list_init(struct bio_list *bl) 500 { 539 { 501 bl->head = bl->tail = NULL; 540 bl->head = bl->tail = NULL; 502 } 541 } 503 542 504 #define BIO_EMPTY_LIST { NULL, NULL } 543 #define BIO_EMPTY_LIST { NULL, NULL } 505 544 506 #define bio_list_for_each(bio, bl) \ 545 #define bio_list_for_each(bio, bl) \ 507 for (bio = (bl)->head; bio; bio = bio- 546 for (bio = (bl)->head; bio; bio = bio->bi_next) 508 547 509 static inline unsigned bio_list_size(const str 548 static inline unsigned bio_list_size(const struct bio_list *bl) 510 { 549 { 511 unsigned sz = 0; 550 unsigned sz = 0; 512 struct bio *bio; 551 struct bio *bio; 513 552 514 bio_list_for_each(bio, bl) 553 bio_list_for_each(bio, bl) 515 sz++; 554 sz++; 516 555 517 return sz; 556 return sz; 518 } 557 } 519 558 520 static inline void bio_list_add(struct bio_lis 559 static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 521 { 560 { 522 bio->bi_next = NULL; 561 bio->bi_next = NULL; 523 562 524 if (bl->tail) 563 if (bl->tail) 525 bl->tail->bi_next = bio; 564 bl->tail->bi_next = bio; 526 else 565 else 527 bl->head = bio; 566 bl->head = bio; 528 567 529 bl->tail = bio; 568 bl->tail = bio; 530 } 569 } 531 570 532 static inline void bio_list_add_head(struct bi 571 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) 533 { 572 { 534 bio->bi_next = bl->head; 573 bio->bi_next = bl->head; 535 574 536 bl->head = bio; 575 bl->head = bio; 537 576 538 if (!bl->tail) 577 if (!bl->tail) 539 bl->tail = bio; 578 bl->tail = bio; 540 } 579 } 541 580 542 static inline void bio_list_merge(struct bio_l 581 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) 543 { 582 { 544 if (!bl2->head) 583 if (!bl2->head) 545 return; 584 return; 546 585 547 if (bl->tail) 586 if (bl->tail) 548 bl->tail->bi_next = bl2->head; 587 bl->tail->bi_next = bl2->head; 549 else 588 else 550 bl->head = bl2->head; 589 bl->head = bl2->head; 551 590 552 bl->tail = bl2->tail; 591 bl->tail = bl2->tail; 553 } 592 } 554 593 555 static inline void bio_list_merge_init(struct << 556 struct bio_list *bl2) << 557 { << 558 bio_list_merge(bl, bl2); << 559 bio_list_init(bl2); << 560 } << 561 << 562 static inline void bio_list_merge_head(struct 594 static inline void bio_list_merge_head(struct bio_list *bl, 563 struct 595 struct bio_list *bl2) 564 { 596 { 565 if (!bl2->head) 597 if (!bl2->head) 566 return; 598 return; 567 599 568 if (bl->head) 600 if (bl->head) 569 bl2->tail->bi_next = bl->head; 601 bl2->tail->bi_next = bl->head; 570 else 602 else 571 bl->tail = bl2->tail; 603 bl->tail = bl2->tail; 572 604 573 bl->head = bl2->head; 605 bl->head = bl2->head; 574 } 606 } 575 607 576 static inline struct bio *bio_list_peek(struct 608 static inline struct bio *bio_list_peek(struct bio_list *bl) 577 { 609 { 578 return bl->head; 610 return bl->head; 579 } 611 } 580 612 581 static inline struct bio *bio_list_pop(struct 613 static inline struct bio *bio_list_pop(struct bio_list *bl) 582 { 614 { 583 struct bio *bio = bl->head; 615 struct bio *bio = bl->head; 584 616 585 if (bio) { 617 if (bio) { 586 bl->head = bl->head->bi_next; 618 bl->head = bl->head->bi_next; 587 if (!bl->head) 619 if (!bl->head) 588 bl->tail = NULL; 620 bl->tail = NULL; 589 621 590 bio->bi_next = NULL; 622 bio->bi_next = NULL; 591 } 623 } 592 624 593 return bio; 625 return bio; 594 } 626 } 595 627 596 static inline struct bio *bio_list_get(struct 628 static inline struct bio *bio_list_get(struct bio_list *bl) 597 { 629 { 598 struct bio *bio = bl->head; 630 struct bio *bio = bl->head; 599 631 600 bl->head = bl->tail = NULL; 632 bl->head = bl->tail = NULL; 601 633 602 return bio; 634 return bio; 603 } 635 } 604 636 605 /* 637 /* 606 * Increment chain count for the bio. Make sur 638 * Increment chain count for the bio. Make sure the CHAIN flag update 607 * is visible before the raised count. 639 * is visible before the raised count. 608 */ 640 */ 609 static inline void bio_inc_remaining(struct bi 641 static inline void bio_inc_remaining(struct bio *bio) 610 { 642 { 611 bio_set_flag(bio, BIO_CHAIN); 643 bio_set_flag(bio, BIO_CHAIN); 612 smp_mb__before_atomic(); 644 smp_mb__before_atomic(); 613 atomic_inc(&bio->__bi_remaining); 645 atomic_inc(&bio->__bi_remaining); 614 } 646 } 615 647 616 /* 648 /* 617 * bio_set is used to allow other portions of 649 * bio_set is used to allow other portions of the IO system to 618 * allocate their own private memory pools for 650 * allocate their own private memory pools for bio and iovec structures. 619 * These memory pools in turn all allocate fro 651 * These memory pools in turn all allocate from the bio_slab 620 * and the bvec_slabs[]. 652 * and the bvec_slabs[]. 621 */ 653 */ 622 #define BIO_POOL_SIZE 2 654 #define BIO_POOL_SIZE 2 623 655 624 struct bio_set { 656 struct bio_set { 625 struct kmem_cache *bio_slab; 657 struct kmem_cache *bio_slab; 626 unsigned int front_pad; 658 unsigned int front_pad; 627 659 628 /* 660 /* 629 * per-cpu bio alloc cache 661 * per-cpu bio alloc cache 630 */ 662 */ 631 struct bio_alloc_cache __percpu *cache 663 struct bio_alloc_cache __percpu *cache; 632 664 633 mempool_t bio_pool; 665 mempool_t bio_pool; 634 mempool_t bvec_pool; 666 mempool_t bvec_pool; 635 #if defined(CONFIG_BLK_DEV_INTEGRITY) 667 #if defined(CONFIG_BLK_DEV_INTEGRITY) 636 mempool_t bio_integrity_pool; 668 mempool_t bio_integrity_pool; 637 mempool_t bvec_integrity_pool; 669 mempool_t bvec_integrity_pool; 638 #endif 670 #endif 639 671 640 unsigned int back_pad; 672 unsigned int back_pad; 641 /* 673 /* 642 * Deadlock avoidance for stacking blo 674 * Deadlock avoidance for stacking block drivers: see comments in 643 * bio_alloc_bioset() for details 675 * bio_alloc_bioset() for details 644 */ 676 */ 645 spinlock_t rescue_lock; 677 spinlock_t rescue_lock; 646 struct bio_list rescue_list; 678 struct bio_list rescue_list; 647 struct work_struct rescue_work; 679 struct work_struct rescue_work; 648 struct workqueue_struct *rescue_workqu 680 struct workqueue_struct *rescue_workqueue; 649 681 650 /* 682 /* 651 * Hot un-plug notifier for the per-cp 683 * Hot un-plug notifier for the per-cpu cache, if used 652 */ 684 */ 653 struct hlist_node cpuhp_dead; 685 struct hlist_node cpuhp_dead; 654 }; 686 }; 655 687 656 static inline bool bioset_initialized(struct b 688 static inline bool bioset_initialized(struct bio_set *bs) 657 { 689 { 658 return bs->bio_slab != NULL; 690 return bs->bio_slab != NULL; 659 } 691 } 660 692 >> 693 #if defined(CONFIG_BLK_DEV_INTEGRITY) >> 694 >> 695 #define bip_for_each_vec(bvl, bip, iter) \ >> 696 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) >> 697 >> 698 #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ >> 699 for_each_bio(_bio) \ >> 700 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) >> 701 >> 702 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); >> 703 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); >> 704 extern bool bio_integrity_prep(struct bio *); >> 705 extern void bio_integrity_advance(struct bio *, unsigned int); >> 706 extern void bio_integrity_trim(struct bio *); >> 707 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); >> 708 extern int bioset_integrity_create(struct bio_set *, int); >> 709 extern void bioset_integrity_free(struct bio_set *); >> 710 extern void bio_integrity_init(void); >> 711 >> 712 #else /* CONFIG_BLK_DEV_INTEGRITY */ >> 713 >> 714 static inline void *bio_integrity(struct bio *bio) >> 715 { >> 716 return NULL; >> 717 } >> 718 >> 719 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) >> 720 { >> 721 return 0; >> 722 } >> 723 >> 724 static inline void bioset_integrity_free (struct bio_set *bs) >> 725 { >> 726 return; >> 727 } >> 728 >> 729 static inline bool bio_integrity_prep(struct bio *bio) >> 730 { >> 731 return true; >> 732 } >> 733 >> 734 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, >> 735 gfp_t gfp_mask) >> 736 { >> 737 return 0; >> 738 } >> 739 >> 740 static inline void bio_integrity_advance(struct bio *bio, >> 741 unsigned int bytes_done) >> 742 { >> 743 return; >> 744 } >> 745 >> 746 static inline void bio_integrity_trim(struct bio *bio) >> 747 { >> 748 return; >> 749 } >> 750 >> 751 static inline void bio_integrity_init(void) >> 752 { >> 753 return; >> 754 } >> 755 >> 756 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) >> 757 { >> 758 return false; >> 759 } >> 760 >> 761 static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, >> 762 unsigned int nr) >> 763 { >> 764 return ERR_PTR(-EINVAL); >> 765 } >> 766 >> 767 static inline int bio_integrity_add_page(struct bio *bio, struct page *page, >> 768 unsigned int len, unsigned int offset) >> 769 { >> 770 return 0; >> 771 } >> 772 >> 773 #endif /* CONFIG_BLK_DEV_INTEGRITY */ >> 774 661 /* 775 /* 662 * Mark a bio as polled. Note that for async p 776 * Mark a bio as polled. Note that for async polled IO, the caller must 663 * expect -EWOULDBLOCK if we cannot allocate a 777 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). 664 * We cannot block waiting for requests on pol 778 * We cannot block waiting for requests on polled IO, as those completions 665 * must be found by the caller. This is differ 779 * must be found by the caller. This is different than IRQ driven IO, where 666 * it's safe to wait for IO to complete. 780 * it's safe to wait for IO to complete. 667 */ 781 */ 668 static inline void bio_set_polled(struct bio * 782 static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) 669 { 783 { 670 bio->bi_opf |= REQ_POLLED; 784 bio->bi_opf |= REQ_POLLED; 671 if (kiocb->ki_flags & IOCB_NOWAIT) !! 785 if (!is_sync_kiocb(kiocb)) 672 bio->bi_opf |= REQ_NOWAIT; 786 bio->bi_opf |= REQ_NOWAIT; 673 } 787 } 674 788 675 static inline void bio_clear_polled(struct bio 789 static inline void bio_clear_polled(struct bio *bio) 676 { 790 { 677 bio->bi_opf &= ~REQ_POLLED; 791 bio->bi_opf &= ~REQ_POLLED; 678 } 792 } 679 793 680 struct bio *blk_next_bio(struct bio *bio, stru 794 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, 681 unsigned int nr_pages, blk_opf 795 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp); 682 struct bio *bio_chain_and_submit(struct bio *p << 683 << 684 struct bio *blk_alloc_discard_bio(struct block << 685 sector_t *sector, sector_t *nr << 686 796 687 #endif /* __LINUX_BIO_H */ 797 #endif /* __LINUX_BIO_H */ 688 798
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.