~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/io_uring/io_uring.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef IOU_CORE_H
  2 #define IOU_CORE_H
  3 
  4 #include <linux/errno.h>
  5 #include <linux/lockdep.h>
  6 #include <linux/resume_user_mode.h>
  7 #include <linux/kasan.h>
  8 #include <linux/poll.h>
  9 #include <linux/io_uring_types.h>
 10 #include <uapi/linux/eventpoll.h>
 11 #include "io-wq.h"
 12 #include "slist.h"
 13 #include "filetable.h"
 14 
 15 #ifndef CREATE_TRACE_POINTS
 16 #include <trace/events/io_uring.h>
 17 #endif
 18 
 19 enum {
 20         IOU_OK                  = 0,
 21         IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
 22 
 23         /*
 24          * Requeue the task_work to restart operations on this request. The
 25          * actual value isn't important, should just be not an otherwise
 26          * valid error code, yet less than -MAX_ERRNO and valid internally.
 27          */
 28         IOU_REQUEUE             = -3072,
 29 
 30         /*
 31          * Intended only when both IO_URING_F_MULTISHOT is passed
 32          * to indicate to the poll runner that multishot should be
 33          * removed and the result is set on req->cqe.res.
 34          */
 35         IOU_STOP_MULTISHOT      = -ECANCELED,
 36 };
 37 
 38 struct io_wait_queue {
 39         struct wait_queue_entry wq;
 40         struct io_ring_ctx *ctx;
 41         unsigned cq_tail;
 42         unsigned nr_timeouts;
 43         ktime_t timeout;
 44 
 45 #ifdef CONFIG_NET_RX_BUSY_POLL
 46         ktime_t napi_busy_poll_dt;
 47         bool napi_prefer_busy_poll;
 48 #endif
 49 };
 50 
 51 static inline bool io_should_wake(struct io_wait_queue *iowq)
 52 {
 53         struct io_ring_ctx *ctx = iowq->ctx;
 54         int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
 55 
 56         /*
 57          * Wake up if we have enough events, or if a timeout occurred since we
 58          * started waiting. For timeouts, we always want to return to userspace,
 59          * regardless of event count.
 60          */
 61         return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
 62 }
 63 
 64 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
 65 int io_run_task_work_sig(struct io_ring_ctx *ctx);
 66 void io_req_defer_failed(struct io_kiocb *req, s32 res);
 67 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
 68 void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
 69 bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
 70 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
 71 
 72 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
 73 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
 74                                unsigned issue_flags);
 75 
 76 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
 77 void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
 78                                  unsigned flags);
 79 bool io_alloc_async_data(struct io_kiocb *req);
 80 void io_req_task_queue(struct io_kiocb *req);
 81 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
 82 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
 83 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
 84 struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
 85 struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
 86 void tctx_task_work(struct callback_head *cb);
 87 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
 88 int io_uring_alloc_task_context(struct task_struct *task,
 89                                 struct io_ring_ctx *ctx);
 90 
 91 int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
 92                                      int start, int end);
 93 
 94 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
 95 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
 96 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
 97 void __io_submit_flush_completions(struct io_ring_ctx *ctx);
 98 
 99 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
100 void io_wq_submit_work(struct io_wq_work *work);
101 
102 void io_free_req(struct io_kiocb *req);
103 void io_queue_next(struct io_kiocb *req);
104 void io_task_refs_refill(struct io_uring_task *tctx);
105 bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
106 
107 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
108                         bool cancel_all);
109 
110 void io_activate_pollwq(struct io_ring_ctx *ctx);
111 
112 static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
113 {
114 #if defined(CONFIG_PROVE_LOCKING)
115         lockdep_assert(in_task());
116 
117         if (ctx->flags & IORING_SETUP_IOPOLL) {
118                 lockdep_assert_held(&ctx->uring_lock);
119         } else if (!ctx->task_complete) {
120                 lockdep_assert_held(&ctx->completion_lock);
121         } else if (ctx->submitter_task) {
122                 /*
123                  * ->submitter_task may be NULL and we can still post a CQE,
124                  * if the ring has been setup with IORING_SETUP_R_DISABLED.
125                  * Not from an SQE, as those cannot be submitted, but via
126                  * updating tagged resources.
127                  */
128                 if (ctx->submitter_task->flags & PF_EXITING)
129                         lockdep_assert(current_work());
130                 else
131                         lockdep_assert(current == ctx->submitter_task);
132         }
133 #endif
134 }
135 
136 static inline void io_req_task_work_add(struct io_kiocb *req)
137 {
138         __io_req_task_work_add(req, 0);
139 }
140 
141 static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
142 {
143         if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
144             ctx->submit_state.cq_flush)
145                 __io_submit_flush_completions(ctx);
146 }
147 
148 #define io_for_each_link(pos, head) \
149         for (pos = (head); pos; pos = pos->link)
150 
151 static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
152                                         struct io_uring_cqe **ret,
153                                         bool overflow)
154 {
155         io_lockdep_assert_cq_locked(ctx);
156 
157         if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
158                 if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
159                         return false;
160         }
161         *ret = ctx->cqe_cached;
162         ctx->cached_cq_tail++;
163         ctx->cqe_cached++;
164         if (ctx->flags & IORING_SETUP_CQE32)
165                 ctx->cqe_cached++;
166         return true;
167 }
168 
169 static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
170 {
171         return io_get_cqe_overflow(ctx, ret, false);
172 }
173 
174 static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
175                                             struct io_kiocb *req)
176 {
177         struct io_uring_cqe *cqe;
178 
179         /*
180          * If we can't get a cq entry, userspace overflowed the
181          * submission (by quite a lot). Increment the overflow count in
182          * the ring.
183          */
184         if (unlikely(!io_get_cqe(ctx, &cqe)))
185                 return false;
186 
187         if (trace_io_uring_complete_enabled())
188                 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
189                                         req->cqe.res, req->cqe.flags,
190                                         req->big_cqe.extra1, req->big_cqe.extra2);
191 
192         memcpy(cqe, &req->cqe, sizeof(*cqe));
193         if (ctx->flags & IORING_SETUP_CQE32) {
194                 memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
195                 memset(&req->big_cqe, 0, sizeof(req->big_cqe));
196         }
197         return true;
198 }
199 
200 static inline void req_set_fail(struct io_kiocb *req)
201 {
202         req->flags |= REQ_F_FAIL;
203         if (req->flags & REQ_F_CQE_SKIP) {
204                 req->flags &= ~REQ_F_CQE_SKIP;
205                 req->flags |= REQ_F_SKIP_LINK_CQES;
206         }
207 }
208 
209 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
210 {
211         req->cqe.res = res;
212         req->cqe.flags = cflags;
213 }
214 
215 static inline bool req_has_async_data(struct io_kiocb *req)
216 {
217         return req->flags & REQ_F_ASYNC_DATA;
218 }
219 
220 static inline void io_put_file(struct io_kiocb *req)
221 {
222         if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
223                 fput(req->file);
224 }
225 
226 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
227                                          unsigned issue_flags)
228 {
229         lockdep_assert_held(&ctx->uring_lock);
230         if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
231                 mutex_unlock(&ctx->uring_lock);
232 }
233 
234 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
235                                        unsigned issue_flags)
236 {
237         /*
238          * "Normal" inline submissions always hold the uring_lock, since we
239          * grab it from the system call. Same is true for the SQPOLL offload.
240          * The only exception is when we've detached the request and issue it
241          * from an async worker thread, grab the lock for that case.
242          */
243         if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
244                 mutex_lock(&ctx->uring_lock);
245         lockdep_assert_held(&ctx->uring_lock);
246 }
247 
248 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
249 {
250         /* order cqe stores with ring update */
251         smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
252 }
253 
254 static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
255 {
256         if (wq_has_sleeper(&ctx->poll_wq))
257                 __wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
258                                 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
259 }
260 
261 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
262 {
263         /*
264          * Trigger waitqueue handler on all waiters on our waitqueue. This
265          * won't necessarily wake up all the tasks, io_should_wake() will make
266          * that decision.
267          *
268          * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
269          * set in the mask so that if we recurse back into our own poll
270          * waitqueue handlers, we know we have a dependency between eventfd or
271          * epoll and should terminate multishot poll at that point.
272          */
273         if (wq_has_sleeper(&ctx->cq_wait))
274                 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
275                                 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
276 }
277 
278 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
279 {
280         struct io_rings *r = ctx->rings;
281 
282         /*
283          * SQPOLL must use the actual sqring head, as using the cached_sq_head
284          * is race prone if the SQPOLL thread has grabbed entries but not yet
285          * committed them to the ring. For !SQPOLL, this doesn't matter, but
286          * since this helper is just used for SQPOLL sqring waits (or POLLOUT),
287          * just read the actual sqring head unconditionally.
288          */
289         return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
290 }
291 
292 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
293 {
294         struct io_rings *rings = ctx->rings;
295         unsigned int entries;
296 
297         /* make sure SQ entry isn't read before tail */
298         entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
299         return min(entries, ctx->sq_entries);
300 }
301 
302 static inline int io_run_task_work(void)
303 {
304         bool ret = false;
305 
306         /*
307          * Always check-and-clear the task_work notification signal. With how
308          * signaling works for task_work, we can find it set with nothing to
309          * run. We need to clear it for that case, like get_signal() does.
310          */
311         if (test_thread_flag(TIF_NOTIFY_SIGNAL))
312                 clear_notify_signal();
313         /*
314          * PF_IO_WORKER never returns to userspace, so check here if we have
315          * notify work that needs processing.
316          */
317         if (current->flags & PF_IO_WORKER) {
318                 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
319                         __set_current_state(TASK_RUNNING);
320                         resume_user_mode_work(NULL);
321                 }
322                 if (current->io_uring) {
323                         unsigned int count = 0;
324 
325                         __set_current_state(TASK_RUNNING);
326                         tctx_task_work_run(current->io_uring, UINT_MAX, &count);
327                         if (count)
328                                 ret = true;
329                 }
330         }
331         if (task_work_pending(current)) {
332                 __set_current_state(TASK_RUNNING);
333                 task_work_run();
334                 ret = true;
335         }
336 
337         return ret;
338 }
339 
340 static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
341 {
342         return task_work_pending(current) || !llist_empty(&ctx->work_llist);
343 }
344 
345 static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
346 {
347         lockdep_assert_held(&ctx->uring_lock);
348 }
349 
350 /*
351  * Don't complete immediately but use deferred completion infrastructure.
352  * Protected by ->uring_lock and can only be used either with
353  * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
354  */
355 static inline void io_req_complete_defer(struct io_kiocb *req)
356         __must_hold(&req->ctx->uring_lock)
357 {
358         struct io_submit_state *state = &req->ctx->submit_state;
359 
360         lockdep_assert_held(&req->ctx->uring_lock);
361 
362         wq_list_add_tail(&req->comp_list, &state->compl_reqs);
363 }
364 
365 static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
366 {
367         if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
368                      ctx->has_evfd || ctx->poll_activated))
369                 __io_commit_cqring_flush(ctx);
370 }
371 
372 static inline void io_get_task_refs(int nr)
373 {
374         struct io_uring_task *tctx = current->io_uring;
375 
376         tctx->cached_refs -= nr;
377         if (unlikely(tctx->cached_refs < 0))
378                 io_task_refs_refill(tctx);
379 }
380 
381 static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
382 {
383         return !ctx->submit_state.free_list.next;
384 }
385 
386 extern struct kmem_cache *req_cachep;
387 extern struct kmem_cache *io_buf_cachep;
388 
389 static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
390 {
391         struct io_kiocb *req;
392 
393         req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
394         wq_stack_extract(&ctx->submit_state.free_list);
395         return req;
396 }
397 
398 static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
399 {
400         if (unlikely(io_req_cache_empty(ctx))) {
401                 if (!__io_alloc_req_refill(ctx))
402                         return false;
403         }
404         *req = io_extract_req(ctx);
405         return true;
406 }
407 
408 static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
409 {
410         return likely(ctx->submitter_task == current);
411 }
412 
413 static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
414 {
415         return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
416                       ctx->submitter_task == current);
417 }
418 
419 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
420 {
421         io_req_set_res(req, res, 0);
422         req->io_task_work.func = io_req_task_complete;
423         io_req_task_work_add(req);
424 }
425 
426 /*
427  * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
428  * slot.
429  */
430 static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
431 {
432         if (ctx->flags & IORING_SETUP_SQE128)
433                 return 2 * sizeof(struct io_uring_sqe);
434         return sizeof(struct io_uring_sqe);
435 }
436 
437 static inline bool io_file_can_poll(struct io_kiocb *req)
438 {
439         if (req->flags & REQ_F_CAN_POLL)
440                 return true;
441         if (req->file && file_can_poll(req->file)) {
442                 req->flags |= REQ_F_CAN_POLL;
443                 return true;
444         }
445         return false;
446 }
447 
448 enum {
449         IO_CHECK_CQ_OVERFLOW_BIT,
450         IO_CHECK_CQ_DROPPED_BIT,
451 };
452 
453 static inline bool io_has_work(struct io_ring_ctx *ctx)
454 {
455         return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
456                !llist_empty(&ctx->work_llist);
457 }
458 #endif
459 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php