~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/bio.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/bio.h (Version linux-6.12-rc7) and /include/linux/bio.h (Version ccs-tools-1.8.12)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 
  2 /*                                                
  3  * Copyright (C) 2001 Jens Axboe <axboe@suse.d    
  4  */                                               
  5 #ifndef __LINUX_BIO_H                             
  6 #define __LINUX_BIO_H                             
  7                                                   
  8 #include <linux/mempool.h>                        
  9 /* struct bio, bio_vec and BIO_* flags are def    
 10 #include <linux/blk_types.h>                      
 11 #include <linux/uio.h>                            
 12                                                   
 13 #define BIO_MAX_VECS            256U              
 14                                                   
 15 struct queue_limits;                              
 16                                                   
 17 static inline unsigned int bio_max_segs(unsign    
 18 {                                                 
 19         return min(nr_segs, BIO_MAX_VECS);        
 20 }                                                 
 21                                                   
 22 #define bio_prio(bio)                   (bio)-    
 23 #define bio_set_prio(bio, prio)         ((bio)    
 24                                                   
 25 #define bio_iter_iovec(bio, iter)                 
 26         bvec_iter_bvec((bio)->bi_io_vec, (iter    
 27                                                   
 28 #define bio_iter_page(bio, iter)                  
 29         bvec_iter_page((bio)->bi_io_vec, (iter    
 30 #define bio_iter_len(bio, iter)                   
 31         bvec_iter_len((bio)->bi_io_vec, (iter)    
 32 #define bio_iter_offset(bio, iter)                
 33         bvec_iter_offset((bio)->bi_io_vec, (it    
 34                                                   
 35 #define bio_page(bio)           bio_iter_page(    
 36 #define bio_offset(bio)         bio_iter_offse    
 37 #define bio_iovec(bio)          bio_iter_iovec    
 38                                                   
 39 #define bvec_iter_sectors(iter) ((iter).bi_siz    
 40 #define bvec_iter_end_sector(iter) ((iter).bi_    
 41                                                   
 42 #define bio_sectors(bio)        bvec_iter_sect    
 43 #define bio_end_sector(bio)     bvec_iter_end_    
 44                                                   
 45 /*                                                
 46  * Return the data direction, READ or WRITE.      
 47  */                                               
 48 #define bio_data_dir(bio) \                       
 49         (op_is_write(bio_op(bio)) ? WRITE : RE    
 50                                                   
 51 /*                                                
 52  * Check whether this bio carries any data or     
 53  */                                               
 54 static inline bool bio_has_data(struct bio *bi    
 55 {                                                 
 56         if (bio &&                                
 57             bio->bi_iter.bi_size &&               
 58             bio_op(bio) != REQ_OP_DISCARD &&      
 59             bio_op(bio) != REQ_OP_SECURE_ERASE    
 60             bio_op(bio) != REQ_OP_WRITE_ZEROES    
 61                 return true;                      
 62                                                   
 63         return false;                             
 64 }                                                 
 65                                                   
 66 static inline bool bio_no_advance_iter(const s    
 67 {                                                 
 68         return bio_op(bio) == REQ_OP_DISCARD |    
 69                bio_op(bio) == REQ_OP_SECURE_ER    
 70                bio_op(bio) == REQ_OP_WRITE_ZER    
 71 }                                                 
 72                                                   
 73 static inline void *bio_data(struct bio *bio)     
 74 {                                                 
 75         if (bio_has_data(bio))                    
 76                 return page_address(bio_page(b    
 77                                                   
 78         return NULL;                              
 79 }                                                 
 80                                                   
 81 static inline bool bio_next_segment(const stru    
 82                                     struct bve    
 83 {                                                 
 84         if (iter->idx >= bio->bi_vcnt)            
 85                 return false;                     
 86                                                   
 87         bvec_advance(&bio->bi_io_vec[iter->idx    
 88         return true;                              
 89 }                                                 
 90                                                   
 91 /*                                                
 92  * drivers should _never_ use the all version     
 93  * before it got to the driver and the driver     
 94  */                                               
 95 #define bio_for_each_segment_all(bvl, bio, ite    
 96         for (bvl = bvec_init_iter_all(&iter);     
 97                                                   
 98 static inline void bio_advance_iter(const stru    
 99                                     struct bve    
100 {                                                 
101         iter->bi_sector += bytes >> 9;            
102                                                   
103         if (bio_no_advance_iter(bio))             
104                 iter->bi_size -= bytes;           
105         else                                      
106                 bvec_iter_advance(bio->bi_io_v    
107                 /* TODO: It is reasonable to c    
108 }                                                 
109                                                   
110 /* @bytes should be less or equal to bvec[i->b    
111 static inline void bio_advance_iter_single(con    
112                                            str    
113                                            uns    
114 {                                                 
115         iter->bi_sector += bytes >> 9;            
116                                                   
117         if (bio_no_advance_iter(bio))             
118                 iter->bi_size -= bytes;           
119         else                                      
120                 bvec_iter_advance_single(bio->    
121 }                                                 
122                                                   
123 void __bio_advance(struct bio *, unsigned byte    
124                                                   
125 /**                                               
126  * bio_advance - increment/complete a bio by s    
127  * @bio:        bio to advance                    
128  * @nbytes:     number of bytes to complete       
129  *                                                
130  * This updates bi_sector, bi_size and bi_idx;    
131  * complete doesn't align with a bvec boundary    
132  * be updated on the last bvec as well.           
133  *                                                
134  * @bio will then represent the remaining, unc    
135  */                                               
136 static inline void bio_advance(struct bio *bio    
137 {                                                 
138         if (nbytes == bio->bi_iter.bi_size) {     
139                 bio->bi_iter.bi_size = 0;         
140                 return;                           
141         }                                         
142         __bio_advance(bio, nbytes);               
143 }                                                 
144                                                   
145 #define __bio_for_each_segment(bvl, bio, iter,    
146         for (iter = (start);                      
147              (iter).bi_size &&                    
148                 ((bvl = bio_iter_iovec((bio),     
149              bio_advance_iter_single((bio), &(    
150                                                   
151 #define bio_for_each_segment(bvl, bio, iter)      
152         __bio_for_each_segment(bvl, bio, iter,    
153                                                   
154 #define __bio_for_each_bvec(bvl, bio, iter, st    
155         for (iter = (start);                      
156              (iter).bi_size &&                    
157                 ((bvl = mp_bvec_iter_bvec((bio    
158              bio_advance_iter_single((bio), &(    
159                                                   
160 /* iterate over multi-page bvec */                
161 #define bio_for_each_bvec(bvl, bio, iter)         
162         __bio_for_each_bvec(bvl, bio, iter, (b    
163                                                   
164 /*                                                
165  * Iterate over all multi-page bvecs. Drivers     
166  * same reasons as bio_for_each_segment_all().    
167  */                                               
168 #define bio_for_each_bvec_all(bvl, bio, i)        
169         for (i = 0, bvl = bio_first_bvec_all(b    
170              i < (bio)->bi_vcnt; i++, bvl++)      
171                                                   
172 #define bio_iter_last(bvec, iter) ((iter).bi_s    
173                                                   
174 static inline unsigned bio_segments(struct bio    
175 {                                                 
176         unsigned segs = 0;                        
177         struct bio_vec bv;                        
178         struct bvec_iter iter;                    
179                                                   
180         /*                                        
181          * We special case discard/write same/    
182          * interpret bi_size differently:         
183          */                                       
184                                                   
185         switch (bio_op(bio)) {                    
186         case REQ_OP_DISCARD:                      
187         case REQ_OP_SECURE_ERASE:                 
188         case REQ_OP_WRITE_ZEROES:                 
189                 return 0;                         
190         default:                                  
191                 break;                            
192         }                                         
193                                                   
194         bio_for_each_segment(bv, bio, iter)       
195                 segs++;                           
196                                                   
197         return segs;                              
198 }                                                 
199                                                   
200 /*                                                
201  * get a reference to a bio, so it won't disap    
202  * something like:                                
203  *                                                
204  * bio_get(bio);                                  
205  * submit_bio(rw, bio);                           
206  * if (bio->bi_flags ...)                         
207  *      do_something                              
208  * bio_put(bio);                                  
209  *                                                
210  * without the bio_get(), it could potentially    
211  * returns. and then bio would be freed memory    
212  * runs                                           
213  */                                               
214 static inline void bio_get(struct bio *bio)       
215 {                                                 
216         bio->bi_flags |= (1 << BIO_REFFED);       
217         smp_mb__before_atomic();                  
218         atomic_inc(&bio->__bi_cnt);               
219 }                                                 
220                                                   
221 static inline void bio_cnt_set(struct bio *bio    
222 {                                                 
223         if (count != 1) {                         
224                 bio->bi_flags |= (1 << BIO_REF    
225                 smp_mb();                         
226         }                                         
227         atomic_set(&bio->__bi_cnt, count);        
228 }                                                 
229                                                   
230 static inline bool bio_flagged(struct bio *bio    
231 {                                                 
232         return bio->bi_flags & (1U << bit);       
233 }                                                 
234                                                   
235 static inline void bio_set_flag(struct bio *bi    
236 {                                                 
237         bio->bi_flags |= (1U << bit);             
238 }                                                 
239                                                   
240 static inline void bio_clear_flag(struct bio *    
241 {                                                 
242         bio->bi_flags &= ~(1U << bit);            
243 }                                                 
244                                                   
245 static inline struct bio_vec *bio_first_bvec_a    
246 {                                                 
247         WARN_ON_ONCE(bio_flagged(bio, BIO_CLON    
248         return bio->bi_io_vec;                    
249 }                                                 
250                                                   
251 static inline struct page *bio_first_page_all(    
252 {                                                 
253         return bio_first_bvec_all(bio)->bv_pag    
254 }                                                 
255                                                   
256 static inline struct folio *bio_first_folio_al    
257 {                                                 
258         return page_folio(bio_first_page_all(b    
259 }                                                 
260                                                   
261 static inline struct bio_vec *bio_last_bvec_al    
262 {                                                 
263         WARN_ON_ONCE(bio_flagged(bio, BIO_CLON    
264         return &bio->bi_io_vec[bio->bi_vcnt -     
265 }                                                 
266                                                   
267 /**                                               
268  * struct folio_iter - State for iterating all    
269  * @folio: The current folio we're iterating.     
270  * @offset: The byte offset within the current    
271  * @length: The number of bytes in this iterat    
272  *      boundary).                                
273  */                                               
274 struct folio_iter {                               
275         struct folio *folio;                      
276         size_t offset;                            
277         size_t length;                            
278         /* private: for use by the iterator */    
279         struct folio *_next;                      
280         size_t _seg_count;                        
281         int _i;                                   
282 };                                                
283                                                   
284 static inline void bio_first_folio(struct foli    
285                                    int i)         
286 {                                                 
287         struct bio_vec *bvec = bio_first_bvec_    
288                                                   
289         if (unlikely(i >= bio->bi_vcnt)) {        
290                 fi->folio = NULL;                 
291                 return;                           
292         }                                         
293                                                   
294         fi->folio = page_folio(bvec->bv_page);    
295         fi->offset = bvec->bv_offset +            
296                         PAGE_SIZE * (bvec->bv_    
297         fi->_seg_count = bvec->bv_len;            
298         fi->length = min(folio_size(fi->folio)    
299         fi->_next = folio_next(fi->folio);        
300         fi->_i = i;                               
301 }                                                 
302                                                   
303 static inline void bio_next_folio(struct folio    
304 {                                                 
305         fi->_seg_count -= fi->length;             
306         if (fi->_seg_count) {                     
307                 fi->folio = fi->_next;            
308                 fi->offset = 0;                   
309                 fi->length = min(folio_size(fi    
310                 fi->_next = folio_next(fi->fol    
311         } else {                                  
312                 bio_first_folio(fi, bio, fi->_    
313         }                                         
314 }                                                 
315                                                   
316 /**                                               
317  * bio_for_each_folio_all - Iterate over each     
318  * @fi: struct folio_iter which is updated for    
319  * @bio: struct bio to iterate over.              
320  */                                               
321 #define bio_for_each_folio_all(fi, bio)           
322         for (bio_first_folio(&fi, bio, 0); fi.    
323                                                   
324 void bio_trim(struct bio *bio, sector_t offset    
325 extern struct bio *bio_split(struct bio *bio,     
326                              gfp_t gfp, struct    
327 int bio_split_rw_at(struct bio *bio, const str    
328                 unsigned *segs, unsigned max_b    
329                                                   
330 /**                                               
331  * bio_next_split - get next @sectors from a b    
332  * @bio:        bio to split                      
333  * @sectors:    number of sectors to split fro    
334  * @gfp:        gfp mask                          
335  * @bs:         bio set to allocate from          
336  *                                                
337  * Return: a bio representing the next @sector    
338  * than @sectors, returns the original bio unc    
339  */                                               
340 static inline struct bio *bio_next_split(struc    
341                                          gfp_t    
342 {                                                 
343         if (sectors >= bio_sectors(bio))          
344                 return bio;                       
345                                                   
346         return bio_split(bio, sectors, gfp, bs    
347 }                                                 
348                                                   
349 enum {                                            
350         BIOSET_NEED_BVECS = BIT(0),               
351         BIOSET_NEED_RESCUER = BIT(1),             
352         BIOSET_PERCPU_CACHE = BIT(2),             
353 };                                                
354 extern int bioset_init(struct bio_set *, unsig    
355 extern void bioset_exit(struct bio_set *);        
356 extern int biovec_init_pool(mempool_t *pool, i    
357                                                   
358 struct bio *bio_alloc_bioset(struct block_devi    
359                              blk_opf_t opf, gf    
360                              struct bio_set *b    
361 struct bio *bio_kmalloc(unsigned short nr_vecs    
362 extern void bio_put(struct bio *);                
363                                                   
364 struct bio *bio_alloc_clone(struct block_devic    
365                 gfp_t gfp, struct bio_set *bs)    
366 int bio_init_clone(struct block_device *bdev,     
367                 struct bio *bio_src, gfp_t gfp    
368                                                   
369 extern struct bio_set fs_bio_set;                 
370                                                   
371 static inline struct bio *bio_alloc(struct blo    
372                 unsigned short nr_vecs, blk_op    
373 {                                                 
374         return bio_alloc_bioset(bdev, nr_vecs,    
375 }                                                 
376                                                   
377 void submit_bio(struct bio *bio);                 
378                                                   
379 extern void bio_endio(struct bio *);              
380                                                   
381 static inline void bio_io_error(struct bio *bi    
382 {                                                 
383         bio->bi_status = BLK_STS_IOERR;           
384         bio_endio(bio);                           
385 }                                                 
386                                                   
387 static inline void bio_wouldblock_error(struct    
388 {                                                 
389         bio_set_flag(bio, BIO_QUIET);             
390         bio->bi_status = BLK_STS_AGAIN;           
391         bio_endio(bio);                           
392 }                                                 
393                                                   
394 /*                                                
395  * Calculate number of bvec segments that shou    
396  * pointed by @iter. If @iter is backed by bve    
397  * instead of allocating a new one.               
398  */                                               
399 static inline int bio_iov_vecs_to_alloc(struct    
400 {                                                 
401         if (iov_iter_is_bvec(iter))               
402                 return 0;                         
403         return iov_iter_npages(iter, max_segs)    
404 }                                                 
405                                                   
406 struct request_queue;                             
407                                                   
408 extern int submit_bio_wait(struct bio *bio);      
409 void bio_init(struct bio *bio, struct block_de    
410               unsigned short max_vecs, blk_opf    
411 extern void bio_uninit(struct bio *);             
412 void bio_reset(struct bio *bio, struct block_d    
413 void bio_chain(struct bio *, struct bio *);       
414                                                   
415 int __must_check bio_add_page(struct bio *bio,    
416                               unsigned off);      
417 bool __must_check bio_add_folio(struct bio *bi    
418                                 size_t len, si    
419 extern int bio_add_pc_page(struct request_queu    
420                            unsigned int, unsig    
421 int bio_add_zone_append_page(struct bio *bio,     
422                              unsigned int len,    
423 void __bio_add_page(struct bio *bio, struct pa    
424                 unsigned int len, unsigned int    
425 void bio_add_folio_nofail(struct bio *bio, str    
426                           size_t off);            
427 int bio_iov_iter_get_pages(struct bio *bio, st    
428 void bio_iov_bvec_set(struct bio *bio, struct     
429 void __bio_release_pages(struct bio *bio, bool    
430 extern void bio_set_pages_dirty(struct bio *bi    
431 extern void bio_check_pages_dirty(struct bio *    
432                                                   
433 extern void bio_copy_data_iter(struct bio *dst    
434                                struct bio *src    
435 extern void bio_copy_data(struct bio *dst, str    
436 extern void bio_free_pages(struct bio *bio);      
437 void guard_bio_eod(struct bio *bio);              
438 void zero_fill_bio_iter(struct bio *bio, struc    
439                                                   
440 static inline void zero_fill_bio(struct bio *b    
441 {                                                 
442         zero_fill_bio_iter(bio, bio->bi_iter);    
443 }                                                 
444                                                   
445 static inline void bio_release_pages(struct bi    
446 {                                                 
447         if (bio_flagged(bio, BIO_PAGE_PINNED))    
448                 __bio_release_pages(bio, mark_    
449 }                                                 
450                                                   
451 #define bio_dev(bio) \                            
452         disk_devt((bio)->bi_bdev->bd_disk)        
453                                                   
454 #ifdef CONFIG_BLK_CGROUP                          
455 void bio_associate_blkg(struct bio *bio);         
456 void bio_associate_blkg_from_css(struct bio *b    
457                                  struct cgroup    
458 void bio_clone_blkg_association(struct bio *ds    
459 void blkcg_punt_bio_submit(struct bio *bio);      
460 #else   /* CONFIG_BLK_CGROUP */                   
461 static inline void bio_associate_blkg(struct b    
462 static inline void bio_associate_blkg_from_css    
463                                                   
464 { }                                               
465 static inline void bio_clone_blkg_association(    
466                                                   
467 static inline void blkcg_punt_bio_submit(struc    
468 {                                                 
469         submit_bio(bio);                          
470 }                                                 
471 #endif  /* CONFIG_BLK_CGROUP */                   
472                                                   
473 static inline void bio_set_dev(struct bio *bio    
474 {                                                 
475         bio_clear_flag(bio, BIO_REMAPPED);        
476         if (bio->bi_bdev != bdev)                 
477                 bio_clear_flag(bio, BIO_BPS_TH    
478         bio->bi_bdev = bdev;                      
479         bio_associate_blkg(bio);                  
480 }                                                 
481                                                   
482 /*                                                
483  * BIO list management for use by remapping dr    
484  *                                                
485  * A bio_list anchors a singly-linked list of     
486  * member of the bio.  The bio_list also cache    
487  * fast access to the tail.                       
488  */                                               
489 struct bio_list {                                 
490         struct bio *head;                         
491         struct bio *tail;                         
492 };                                                
493                                                   
494 static inline int bio_list_empty(const struct     
495 {                                                 
496         return bl->head == NULL;                  
497 }                                                 
498                                                   
499 static inline void bio_list_init(struct bio_li    
500 {                                                 
501         bl->head = bl->tail = NULL;               
502 }                                                 
503                                                   
504 #define BIO_EMPTY_LIST  { NULL, NULL }            
505                                                   
506 #define bio_list_for_each(bio, bl) \              
507         for (bio = (bl)->head; bio; bio = bio-    
508                                                   
509 static inline unsigned bio_list_size(const str    
510 {                                                 
511         unsigned sz = 0;                          
512         struct bio *bio;                          
513                                                   
514         bio_list_for_each(bio, bl)                
515                 sz++;                             
516                                                   
517         return sz;                                
518 }                                                 
519                                                   
520 static inline void bio_list_add(struct bio_lis    
521 {                                                 
522         bio->bi_next = NULL;                      
523                                                   
524         if (bl->tail)                             
525                 bl->tail->bi_next = bio;          
526         else                                      
527                 bl->head = bio;                   
528                                                   
529         bl->tail = bio;                           
530 }                                                 
531                                                   
532 static inline void bio_list_add_head(struct bi    
533 {                                                 
534         bio->bi_next = bl->head;                  
535                                                   
536         bl->head = bio;                           
537                                                   
538         if (!bl->tail)                            
539                 bl->tail = bio;                   
540 }                                                 
541                                                   
542 static inline void bio_list_merge(struct bio_l    
543 {                                                 
544         if (!bl2->head)                           
545                 return;                           
546                                                   
547         if (bl->tail)                             
548                 bl->tail->bi_next = bl2->head;    
549         else                                      
550                 bl->head = bl2->head;             
551                                                   
552         bl->tail = bl2->tail;                     
553 }                                                 
554                                                   
555 static inline void bio_list_merge_init(struct     
556                 struct bio_list *bl2)             
557 {                                                 
558         bio_list_merge(bl, bl2);                  
559         bio_list_init(bl2);                       
560 }                                                 
561                                                   
562 static inline void bio_list_merge_head(struct     
563                                        struct     
564 {                                                 
565         if (!bl2->head)                           
566                 return;                           
567                                                   
568         if (bl->head)                             
569                 bl2->tail->bi_next = bl->head;    
570         else                                      
571                 bl->tail = bl2->tail;             
572                                                   
573         bl->head = bl2->head;                     
574 }                                                 
575                                                   
576 static inline struct bio *bio_list_peek(struct    
577 {                                                 
578         return bl->head;                          
579 }                                                 
580                                                   
581 static inline struct bio *bio_list_pop(struct     
582 {                                                 
583         struct bio *bio = bl->head;               
584                                                   
585         if (bio) {                                
586                 bl->head = bl->head->bi_next;     
587                 if (!bl->head)                    
588                         bl->tail = NULL;          
589                                                   
590                 bio->bi_next = NULL;              
591         }                                         
592                                                   
593         return bio;                               
594 }                                                 
595                                                   
596 static inline struct bio *bio_list_get(struct     
597 {                                                 
598         struct bio *bio = bl->head;               
599                                                   
600         bl->head = bl->tail = NULL;               
601                                                   
602         return bio;                               
603 }                                                 
604                                                   
605 /*                                                
606  * Increment chain count for the bio. Make sur    
607  * is visible before the raised count.            
608  */                                               
609 static inline void bio_inc_remaining(struct bi    
610 {                                                 
611         bio_set_flag(bio, BIO_CHAIN);             
612         smp_mb__before_atomic();                  
613         atomic_inc(&bio->__bi_remaining);         
614 }                                                 
615                                                   
616 /*                                                
617  * bio_set is used to allow other portions of     
618  * allocate their own private memory pools for    
619  * These memory pools in turn all allocate fro    
620  * and the bvec_slabs[].                          
621  */                                               
622 #define BIO_POOL_SIZE 2                           
623                                                   
624 struct bio_set {                                  
625         struct kmem_cache *bio_slab;              
626         unsigned int front_pad;                   
627                                                   
628         /*                                        
629          * per-cpu bio alloc cache                
630          */                                       
631         struct bio_alloc_cache __percpu *cache    
632                                                   
633         mempool_t bio_pool;                       
634         mempool_t bvec_pool;                      
635 #if defined(CONFIG_BLK_DEV_INTEGRITY)             
636         mempool_t bio_integrity_pool;             
637         mempool_t bvec_integrity_pool;            
638 #endif                                            
639                                                   
640         unsigned int back_pad;                    
641         /*                                        
642          * Deadlock avoidance for stacking blo    
643          * bio_alloc_bioset() for details         
644          */                                       
645         spinlock_t              rescue_lock;      
646         struct bio_list         rescue_list;      
647         struct work_struct      rescue_work;      
648         struct workqueue_struct *rescue_workqu    
649                                                   
650         /*                                        
651          * Hot un-plug notifier for the per-cp    
652          */                                       
653         struct hlist_node cpuhp_dead;             
654 };                                                
655                                                   
656 static inline bool bioset_initialized(struct b    
657 {                                                 
658         return bs->bio_slab != NULL;              
659 }                                                 
660                                                   
661 /*                                                
662  * Mark a bio as polled. Note that for async p    
663  * expect -EWOULDBLOCK if we cannot allocate a    
664  * We cannot block waiting for requests on pol    
665  * must be found by the caller. This is differ    
666  * it's safe to wait for IO to complete.          
667  */                                               
668 static inline void bio_set_polled(struct bio *    
669 {                                                 
670         bio->bi_opf |= REQ_POLLED;                
671         if (kiocb->ki_flags & IOCB_NOWAIT)        
672                 bio->bi_opf |= REQ_NOWAIT;        
673 }                                                 
674                                                   
675 static inline void bio_clear_polled(struct bio    
676 {                                                 
677         bio->bi_opf &= ~REQ_POLLED;               
678 }                                                 
679                                                   
680 struct bio *blk_next_bio(struct bio *bio, stru    
681                 unsigned int nr_pages, blk_opf    
682 struct bio *bio_chain_and_submit(struct bio *p    
683                                                   
684 struct bio *blk_alloc_discard_bio(struct block    
685                 sector_t *sector, sector_t *nr    
686                                                   
687 #endif /* __LINUX_BIO_H */                        
688                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php