1 #ifndef IO_URING_TYPES_H 2 #define IO_URING_TYPES_H 3 4 #include <linux/blkdev.h> 5 #include <linux/hashtable.h> 6 #include <linux/task_work.h> 7 #include <linux/bitmap.h> 8 #include <linux/llist.h> 9 #include <uapi/linux/io_uring.h> 10 11 enum { 12 /* 13 * A hint to not wake right away but delay until there are enough of 14 * tw's queued to match the number of CQEs the task is waiting for. 15 * 16 * Must not be used with requests generating more than one CQE. 17 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set. 18 */ 19 IOU_F_TWQ_LAZY_WAKE = 1, 20 }; 21 22 enum io_uring_cmd_flags { 23 IO_URING_F_COMPLETE_DEFER = 1, 24 IO_URING_F_UNLOCKED = 2, 25 /* the request is executed from poll, it should not be freed */ 26 IO_URING_F_MULTISHOT = 4, 27 /* executed by io-wq */ 28 IO_URING_F_IOWQ = 8, 29 /* int's last bit, sign checks are usually faster than a bit test */ 30 IO_URING_F_NONBLOCK = INT_MIN, 31 32 /* ctx state flags, for URING_CMD */ 33 IO_URING_F_SQE128 = (1 << 8), 34 IO_URING_F_CQE32 = (1 << 9), 35 IO_URING_F_IOPOLL = (1 << 10), 36 37 /* set when uring wants to cancel a previously issued command */ 38 IO_URING_F_CANCEL = (1 << 11), 39 IO_URING_F_COMPAT = (1 << 12), 40 }; 41 42 struct io_wq_work_node { 43 struct io_wq_work_node *next; 44 }; 45 46 struct io_wq_work_list { 47 struct io_wq_work_node *first; 48 struct io_wq_work_node *last; 49 }; 50 51 struct io_wq_work { 52 struct io_wq_work_node list; 53 atomic_t flags; 54 /* place it here instead of io_kiocb as it fills padding and saves 4B */ 55 int cancel_seq; 56 }; 57 58 struct io_fixed_file { 59 /* file * with additional FFS_* flags */ 60 unsigned long file_ptr; 61 }; 62 63 struct io_file_table { 64 struct io_fixed_file *files; 65 unsigned long *bitmap; 66 unsigned int alloc_hint; 67 }; 68 69 struct io_hash_bucket { 70 spinlock_t lock; 71 struct hlist_head list; 72 } ____cacheline_aligned_in_smp; 73 74 struct io_hash_table { 75 struct io_hash_bucket *hbs; 76 unsigned hash_bits; 77 }; 78 79 /* 80 * Arbitrary limit, can be raised if need be 81 */ 82 #define IO_RINGFD_REG_MAX 16 83 84 struct io_uring_task { 85 /* submission side */ 86 int cached_refs; 87 const struct io_ring_ctx *last; 88 struct io_wq *io_wq; 89 struct file *registered_rings[IO_RINGFD_REG_MAX]; 90 91 struct xarray xa; 92 struct wait_queue_head wait; 93 atomic_t in_cancel; 94 atomic_t inflight_tracked; 95 struct percpu_counter inflight; 96 97 struct { /* task_work */ 98 struct llist_head task_list; 99 struct callback_head task_work; 100 } ____cacheline_aligned_in_smp; 101 }; 102 103 struct io_uring { 104 u32 head; 105 u32 tail; 106 }; 107 108 /* 109 * This data is shared with the application through the mmap at offsets 110 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING. 111 * 112 * The offsets to the member fields are published through struct 113 * io_sqring_offsets when calling io_uring_setup. 114 */ 115 struct io_rings { 116 /* 117 * Head and tail offsets into the ring; the offsets need to be 118 * masked to get valid indices. 119 * 120 * The kernel controls head of the sq ring and the tail of the cq ring, 121 * and the application controls tail of the sq ring and the head of the 122 * cq ring. 123 */ 124 struct io_uring sq, cq; 125 /* 126 * Bitmasks to apply to head and tail offsets (constant, equals 127 * ring_entries - 1) 128 */ 129 u32 sq_ring_mask, cq_ring_mask; 130 /* Ring sizes (constant, power of 2) */ 131 u32 sq_ring_entries, cq_ring_entries; 132 /* 133 * Number of invalid entries dropped by the kernel due to 134 * invalid index stored in array 135 * 136 * Written by the kernel, shouldn't be modified by the 137 * application (i.e. get number of "new events" by comparing to 138 * cached value). 139 * 140 * After a new SQ head value was read by the application this 141 * counter includes all submissions that were dropped reaching 142 * the new SQ head (and possibly more). 143 */ 144 u32 sq_dropped; 145 /* 146 * Runtime SQ flags 147 * 148 * Written by the kernel, shouldn't be modified by the 149 * application. 150 * 151 * The application needs a full memory barrier before checking 152 * for IORING_SQ_NEED_WAKEUP after updating the sq tail. 153 */ 154 atomic_t sq_flags; 155 /* 156 * Runtime CQ flags 157 * 158 * Written by the application, shouldn't be modified by the 159 * kernel. 160 */ 161 u32 cq_flags; 162 /* 163 * Number of completion events lost because the queue was full; 164 * this should be avoided by the application by making sure 165 * there are not more requests pending than there is space in 166 * the completion queue. 167 * 168 * Written by the kernel, shouldn't be modified by the 169 * application (i.e. get number of "new events" by comparing to 170 * cached value). 171 * 172 * As completion events come in out of order this counter is not 173 * ordered with any other data. 174 */ 175 u32 cq_overflow; 176 /* 177 * Ring buffer of completion events. 178 * 179 * The kernel writes completion events fresh every time they are 180 * produced, so the application is allowed to modify pending 181 * entries. 182 */ 183 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; 184 }; 185 186 struct io_restriction { 187 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST); 188 DECLARE_BITMAP(sqe_op, IORING_OP_LAST); 189 u8 sqe_flags_allowed; 190 u8 sqe_flags_required; 191 bool registered; 192 }; 193 194 struct io_submit_link { 195 struct io_kiocb *head; 196 struct io_kiocb *last; 197 }; 198 199 struct io_submit_state { 200 /* inline/task_work completion list, under ->uring_lock */ 201 struct io_wq_work_node free_list; 202 /* batch completion logic */ 203 struct io_wq_work_list compl_reqs; 204 struct io_submit_link link; 205 206 bool plug_started; 207 bool need_plug; 208 bool cq_flush; 209 unsigned short submit_nr; 210 struct blk_plug plug; 211 }; 212 213 struct io_alloc_cache { 214 void **entries; 215 unsigned int nr_cached; 216 unsigned int max_cached; 217 size_t elem_size; 218 }; 219 220 struct io_ring_ctx { 221 /* const or read-mostly hot data */ 222 struct { 223 unsigned int flags; 224 unsigned int drain_next: 1; 225 unsigned int restricted: 1; 226 unsigned int off_timeout_used: 1; 227 unsigned int drain_active: 1; 228 unsigned int has_evfd: 1; 229 /* all CQEs should be posted only by the submitter task */ 230 unsigned int task_complete: 1; 231 unsigned int lockless_cq: 1; 232 unsigned int syscall_iopoll: 1; 233 unsigned int poll_activated: 1; 234 unsigned int drain_disabled: 1; 235 unsigned int compat: 1; 236 unsigned int iowq_limits_set : 1; 237 238 struct task_struct *submitter_task; 239 struct io_rings *rings; 240 struct percpu_ref refs; 241 242 enum task_work_notify_mode notify_method; 243 unsigned sq_thread_idle; 244 } ____cacheline_aligned_in_smp; 245 246 /* submission data */ 247 struct { 248 struct mutex uring_lock; 249 250 /* 251 * Ring buffer of indices into array of io_uring_sqe, which is 252 * mmapped by the application using the IORING_OFF_SQES offset. 253 * 254 * This indirection could e.g. be used to assign fixed 255 * io_uring_sqe entries to operations and only submit them to 256 * the queue when needed. 257 * 258 * The kernel modifies neither the indices array nor the entries 259 * array. 260 */ 261 u32 *sq_array; 262 struct io_uring_sqe *sq_sqes; 263 unsigned cached_sq_head; 264 unsigned sq_entries; 265 266 /* 267 * Fixed resources fast path, should be accessed only under 268 * uring_lock, and updated through io_uring_register(2) 269 */ 270 struct io_rsrc_node *rsrc_node; 271 atomic_t cancel_seq; 272 273 /* 274 * ->iopoll_list is protected by the ctx->uring_lock for 275 * io_uring instances that don't use IORING_SETUP_SQPOLL. 276 * For SQPOLL, only the single threaded io_sq_thread() will 277 * manipulate the list, hence no extra locking is needed there. 278 */ 279 bool poll_multi_queue; 280 struct io_wq_work_list iopoll_list; 281 282 struct io_file_table file_table; 283 struct io_mapped_ubuf **user_bufs; 284 unsigned nr_user_files; 285 unsigned nr_user_bufs; 286 287 struct io_submit_state submit_state; 288 289 struct xarray io_bl_xa; 290 291 struct io_hash_table cancel_table_locked; 292 struct io_alloc_cache apoll_cache; 293 struct io_alloc_cache netmsg_cache; 294 struct io_alloc_cache rw_cache; 295 struct io_alloc_cache uring_cache; 296 297 /* 298 * Any cancelable uring_cmd is added to this list in 299 * ->uring_cmd() by io_uring_cmd_insert_cancelable() 300 */ 301 struct hlist_head cancelable_uring_cmd; 302 } ____cacheline_aligned_in_smp; 303 304 struct { 305 /* 306 * We cache a range of free CQEs we can use, once exhausted it 307 * should go through a slower range setup, see __io_get_cqe() 308 */ 309 struct io_uring_cqe *cqe_cached; 310 struct io_uring_cqe *cqe_sentinel; 311 312 unsigned cached_cq_tail; 313 unsigned cq_entries; 314 struct io_ev_fd __rcu *io_ev_fd; 315 unsigned cq_extra; 316 } ____cacheline_aligned_in_smp; 317 318 /* 319 * task_work and async notification delivery cacheline. Expected to 320 * regularly bounce b/w CPUs. 321 */ 322 struct { 323 struct llist_head work_llist; 324 unsigned long check_cq; 325 atomic_t cq_wait_nr; 326 atomic_t cq_timeouts; 327 struct wait_queue_head cq_wait; 328 } ____cacheline_aligned_in_smp; 329 330 /* timeouts */ 331 struct { 332 spinlock_t timeout_lock; 333 struct list_head timeout_list; 334 struct list_head ltimeout_list; 335 unsigned cq_last_tm_flush; 336 } ____cacheline_aligned_in_smp; 337 338 spinlock_t completion_lock; 339 340 struct list_head io_buffers_comp; 341 struct list_head cq_overflow_list; 342 struct io_hash_table cancel_table; 343 344 struct hlist_head waitid_list; 345 346 #ifdef CONFIG_FUTEX 347 struct hlist_head futex_list; 348 struct io_alloc_cache futex_cache; 349 #endif 350 351 const struct cred *sq_creds; /* cred used for __io_sq_thread() */ 352 struct io_sq_data *sq_data; /* if using sq thread polling */ 353 354 struct wait_queue_head sqo_sq_wait; 355 struct list_head sqd_list; 356 357 unsigned int file_alloc_start; 358 unsigned int file_alloc_end; 359 360 struct list_head io_buffers_cache; 361 362 /* Keep this last, we don't need it for the fast path */ 363 struct wait_queue_head poll_wq; 364 struct io_restriction restrictions; 365 366 /* slow path rsrc auxilary data, used by update/register */ 367 struct io_rsrc_data *file_data; 368 struct io_rsrc_data *buf_data; 369 370 /* protected by ->uring_lock */ 371 struct list_head rsrc_ref_list; 372 struct io_alloc_cache rsrc_node_cache; 373 struct wait_queue_head rsrc_quiesce_wq; 374 unsigned rsrc_quiesce; 375 376 u32 pers_next; 377 struct xarray personalities; 378 379 /* hashed buffered write serialization */ 380 struct io_wq_hash *hash_map; 381 382 /* Only used for accounting purposes */ 383 struct user_struct *user; 384 struct mm_struct *mm_account; 385 386 /* ctx exit and cancelation */ 387 struct llist_head fallback_llist; 388 struct delayed_work fallback_work; 389 struct work_struct exit_work; 390 struct list_head tctx_list; 391 struct completion ref_comp; 392 393 /* io-wq management, e.g. thread count */ 394 u32 iowq_limits[2]; 395 396 struct callback_head poll_wq_task_work; 397 struct list_head defer_list; 398 399 struct io_alloc_cache msg_cache; 400 spinlock_t msg_lock; 401 402 #ifdef CONFIG_NET_RX_BUSY_POLL 403 struct list_head napi_list; /* track busy poll napi_id */ 404 spinlock_t napi_lock; /* napi_list lock */ 405 406 /* napi busy poll default timeout */ 407 ktime_t napi_busy_poll_dt; 408 bool napi_prefer_busy_poll; 409 bool napi_enabled; 410 411 DECLARE_HASHTABLE(napi_ht, 4); 412 #endif 413 414 /* protected by ->completion_lock */ 415 unsigned evfd_last_cq_tail; 416 417 /* 418 * If IORING_SETUP_NO_MMAP is used, then the below holds 419 * the gup'ed pages for the two rings, and the sqes. 420 */ 421 unsigned short n_ring_pages; 422 unsigned short n_sqe_pages; 423 struct page **ring_pages; 424 struct page **sqe_pages; 425 }; 426 427 struct io_tw_state { 428 }; 429 430 enum { 431 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, 432 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, 433 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT, 434 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, 435 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, 436 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, 437 REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT, 438 439 /* first byte is taken by user flags, shift it to not overlap */ 440 REQ_F_FAIL_BIT = 8, 441 REQ_F_INFLIGHT_BIT, 442 REQ_F_CUR_POS_BIT, 443 REQ_F_NOWAIT_BIT, 444 REQ_F_LINK_TIMEOUT_BIT, 445 REQ_F_NEED_CLEANUP_BIT, 446 REQ_F_POLLED_BIT, 447 REQ_F_BUFFER_SELECTED_BIT, 448 REQ_F_BUFFER_RING_BIT, 449 REQ_F_REISSUE_BIT, 450 REQ_F_CREDS_BIT, 451 REQ_F_REFCOUNT_BIT, 452 REQ_F_ARM_LTIMEOUT_BIT, 453 REQ_F_ASYNC_DATA_BIT, 454 REQ_F_SKIP_LINK_CQES_BIT, 455 REQ_F_SINGLE_POLL_BIT, 456 REQ_F_DOUBLE_POLL_BIT, 457 REQ_F_APOLL_MULTISHOT_BIT, 458 REQ_F_CLEAR_POLLIN_BIT, 459 REQ_F_HASH_LOCKED_BIT, 460 /* keep async read/write and isreg together and in order */ 461 REQ_F_SUPPORT_NOWAIT_BIT, 462 REQ_F_ISREG_BIT, 463 REQ_F_POLL_NO_LAZY_BIT, 464 REQ_F_CAN_POLL_BIT, 465 REQ_F_BL_EMPTY_BIT, 466 REQ_F_BL_NO_RECYCLE_BIT, 467 REQ_F_BUFFERS_COMMIT_BIT, 468 469 /* not a real bit, just to check we're not overflowing the space */ 470 __REQ_F_LAST_BIT, 471 }; 472 473 typedef u64 __bitwise io_req_flags_t; 474 #define IO_REQ_FLAG(bitno) ((__force io_req_flags_t) BIT_ULL((bitno))) 475 476 enum { 477 /* ctx owns file */ 478 REQ_F_FIXED_FILE = IO_REQ_FLAG(REQ_F_FIXED_FILE_BIT), 479 /* drain existing IO first */ 480 REQ_F_IO_DRAIN = IO_REQ_FLAG(REQ_F_IO_DRAIN_BIT), 481 /* linked sqes */ 482 REQ_F_LINK = IO_REQ_FLAG(REQ_F_LINK_BIT), 483 /* doesn't sever on completion < 0 */ 484 REQ_F_HARDLINK = IO_REQ_FLAG(REQ_F_HARDLINK_BIT), 485 /* IOSQE_ASYNC */ 486 REQ_F_FORCE_ASYNC = IO_REQ_FLAG(REQ_F_FORCE_ASYNC_BIT), 487 /* IOSQE_BUFFER_SELECT */ 488 REQ_F_BUFFER_SELECT = IO_REQ_FLAG(REQ_F_BUFFER_SELECT_BIT), 489 /* IOSQE_CQE_SKIP_SUCCESS */ 490 REQ_F_CQE_SKIP = IO_REQ_FLAG(REQ_F_CQE_SKIP_BIT), 491 492 /* fail rest of links */ 493 REQ_F_FAIL = IO_REQ_FLAG(REQ_F_FAIL_BIT), 494 /* on inflight list, should be cancelled and waited on exit reliably */ 495 REQ_F_INFLIGHT = IO_REQ_FLAG(REQ_F_INFLIGHT_BIT), 496 /* read/write uses file position */ 497 REQ_F_CUR_POS = IO_REQ_FLAG(REQ_F_CUR_POS_BIT), 498 /* must not punt to workers */ 499 REQ_F_NOWAIT = IO_REQ_FLAG(REQ_F_NOWAIT_BIT), 500 /* has or had linked timeout */ 501 REQ_F_LINK_TIMEOUT = IO_REQ_FLAG(REQ_F_LINK_TIMEOUT_BIT), 502 /* needs cleanup */ 503 REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT), 504 /* already went through poll handler */ 505 REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT), 506 /* buffer already selected */ 507 REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT), 508 /* buffer selected from ring, needs commit */ 509 REQ_F_BUFFER_RING = IO_REQ_FLAG(REQ_F_BUFFER_RING_BIT), 510 /* caller should reissue async */ 511 REQ_F_REISSUE = IO_REQ_FLAG(REQ_F_REISSUE_BIT), 512 /* supports async reads/writes */ 513 REQ_F_SUPPORT_NOWAIT = IO_REQ_FLAG(REQ_F_SUPPORT_NOWAIT_BIT), 514 /* regular file */ 515 REQ_F_ISREG = IO_REQ_FLAG(REQ_F_ISREG_BIT), 516 /* has creds assigned */ 517 REQ_F_CREDS = IO_REQ_FLAG(REQ_F_CREDS_BIT), 518 /* skip refcounting if not set */ 519 REQ_F_REFCOUNT = IO_REQ_FLAG(REQ_F_REFCOUNT_BIT), 520 /* there is a linked timeout that has to be armed */ 521 REQ_F_ARM_LTIMEOUT = IO_REQ_FLAG(REQ_F_ARM_LTIMEOUT_BIT), 522 /* ->async_data allocated */ 523 REQ_F_ASYNC_DATA = IO_REQ_FLAG(REQ_F_ASYNC_DATA_BIT), 524 /* don't post CQEs while failing linked requests */ 525 REQ_F_SKIP_LINK_CQES = IO_REQ_FLAG(REQ_F_SKIP_LINK_CQES_BIT), 526 /* single poll may be active */ 527 REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT), 528 /* double poll may active */ 529 REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT), 530 /* fast poll multishot mode */ 531 REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT), 532 /* recvmsg special flag, clear EPOLLIN */ 533 REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT), 534 /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ 535 REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT), 536 /* don't use lazy poll wake for this request */ 537 REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT), 538 /* file is pollable */ 539 REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT), 540 /* buffer list was empty after selection of buffer */ 541 REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT), 542 /* don't recycle provided buffers for this request */ 543 REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT), 544 /* buffer ring head needs incrementing on put */ 545 REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT), 546 }; 547 548 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); 549 550 struct io_task_work { 551 struct llist_node node; 552 io_req_tw_func_t func; 553 }; 554 555 struct io_cqe { 556 __u64 user_data; 557 __s32 res; 558 /* fd initially, then cflags for completion */ 559 union { 560 __u32 flags; 561 int fd; 562 }; 563 }; 564 565 /* 566 * Each request type overlays its private data structure on top of this one. 567 * They must not exceed this one in size. 568 */ 569 struct io_cmd_data { 570 struct file *file; 571 /* each command gets 56 bytes of data */ 572 __u8 data[56]; 573 }; 574 575 static inline void io_kiocb_cmd_sz_check(size_t cmd_sz) 576 { 577 BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data)); 578 } 579 #define io_kiocb_to_cmd(req, cmd_type) ( \ 580 io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \ 581 ((cmd_type *)&(req)->cmd) \ 582 ) 583 #define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr) 584 585 struct io_kiocb { 586 union { 587 /* 588 * NOTE! Each of the io_kiocb union members has the file pointer 589 * as the first entry in their struct definition. So you can 590 * access the file pointer through any of the sub-structs, 591 * or directly as just 'file' in this struct. 592 */ 593 struct file *file; 594 struct io_cmd_data cmd; 595 }; 596 597 u8 opcode; 598 /* polled IO has completed */ 599 u8 iopoll_completed; 600 /* 601 * Can be either a fixed buffer index, or used with provided buffers. 602 * For the latter, before issue it points to the buffer group ID, 603 * and after selection it points to the buffer ID itself. 604 */ 605 u16 buf_index; 606 607 unsigned nr_tw; 608 609 /* REQ_F_* flags */ 610 io_req_flags_t flags; 611 612 struct io_cqe cqe; 613 614 struct io_ring_ctx *ctx; 615 struct task_struct *task; 616 617 union { 618 /* store used ubuf, so we can prevent reloading */ 619 struct io_mapped_ubuf *imu; 620 621 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ 622 struct io_buffer *kbuf; 623 624 /* 625 * stores buffer ID for ring provided buffers, valid IFF 626 * REQ_F_BUFFER_RING is set. 627 */ 628 struct io_buffer_list *buf_list; 629 }; 630 631 union { 632 /* used by request caches, completion batching and iopoll */ 633 struct io_wq_work_node comp_list; 634 /* cache ->apoll->events */ 635 __poll_t apoll_events; 636 }; 637 638 struct io_rsrc_node *rsrc_node; 639 640 atomic_t refs; 641 bool cancel_seq_set; 642 struct io_task_work io_task_work; 643 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ 644 struct hlist_node hash_node; 645 /* internal polling, see IORING_FEAT_FAST_POLL */ 646 struct async_poll *apoll; 647 /* opcode allocated if it needs to store data for async defer */ 648 void *async_data; 649 /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */ 650 atomic_t poll_refs; 651 struct io_kiocb *link; 652 /* custom credentials, valid IFF REQ_F_CREDS is set */ 653 const struct cred *creds; 654 struct io_wq_work work; 655 656 struct { 657 u64 extra1; 658 u64 extra2; 659 } big_cqe; 660 }; 661 662 struct io_overflow_cqe { 663 struct list_head list; 664 struct io_uring_cqe cqe; 665 }; 666 667 #endif 668
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.