~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/io_uring/eventfd.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <linux/kernel.h>
  3 #include <linux/errno.h>
  4 #include <linux/mm.h>
  5 #include <linux/slab.h>
  6 #include <linux/eventfd.h>
  7 #include <linux/eventpoll.h>
  8 #include <linux/io_uring.h>
  9 #include <linux/io_uring_types.h>
 10 
 11 #include "io-wq.h"
 12 #include "eventfd.h"
 13 
 14 struct io_ev_fd {
 15         struct eventfd_ctx      *cq_ev_fd;
 16         unsigned int            eventfd_async: 1;
 17         struct rcu_head         rcu;
 18         atomic_t                refs;
 19         atomic_t                ops;
 20 };
 21 
 22 enum {
 23         IO_EVENTFD_OP_SIGNAL_BIT,
 24 };
 25 
 26 static void io_eventfd_free(struct rcu_head *rcu)
 27 {
 28         struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
 29 
 30         eventfd_ctx_put(ev_fd->cq_ev_fd);
 31         kfree(ev_fd);
 32 }
 33 
 34 static void io_eventfd_do_signal(struct rcu_head *rcu)
 35 {
 36         struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
 37 
 38         eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
 39 
 40         if (atomic_dec_and_test(&ev_fd->refs))
 41                 io_eventfd_free(rcu);
 42 }
 43 
 44 void io_eventfd_signal(struct io_ring_ctx *ctx)
 45 {
 46         struct io_ev_fd *ev_fd = NULL;
 47 
 48         if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
 49                 return;
 50 
 51         guard(rcu)();
 52 
 53         /*
 54          * rcu_dereference ctx->io_ev_fd once and use it for both for checking
 55          * and eventfd_signal
 56          */
 57         ev_fd = rcu_dereference(ctx->io_ev_fd);
 58 
 59         /*
 60          * Check again if ev_fd exists incase an io_eventfd_unregister call
 61          * completed between the NULL check of ctx->io_ev_fd at the start of
 62          * the function and rcu_read_lock.
 63          */
 64         if (unlikely(!ev_fd))
 65                 return;
 66         if (!atomic_inc_not_zero(&ev_fd->refs))
 67                 return;
 68         if (ev_fd->eventfd_async && !io_wq_current_is_worker())
 69                 goto out;
 70 
 71         if (likely(eventfd_signal_allowed())) {
 72                 eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
 73         } else {
 74                 if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) {
 75                         call_rcu_hurry(&ev_fd->rcu, io_eventfd_do_signal);
 76                         return;
 77                 }
 78         }
 79 out:
 80         if (atomic_dec_and_test(&ev_fd->refs))
 81                 call_rcu(&ev_fd->rcu, io_eventfd_free);
 82 }
 83 
 84 void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
 85 {
 86         bool skip;
 87 
 88         spin_lock(&ctx->completion_lock);
 89 
 90         /*
 91          * Eventfd should only get triggered when at least one event has been
 92          * posted. Some applications rely on the eventfd notification count
 93          * only changing IFF a new CQE has been added to the CQ ring. There's
 94          * no depedency on 1:1 relationship between how many times this
 95          * function is called (and hence the eventfd count) and number of CQEs
 96          * posted to the CQ ring.
 97          */
 98         skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
 99         ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
100         spin_unlock(&ctx->completion_lock);
101         if (skip)
102                 return;
103 
104         io_eventfd_signal(ctx);
105 }
106 
107 int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
108                         unsigned int eventfd_async)
109 {
110         struct io_ev_fd *ev_fd;
111         __s32 __user *fds = arg;
112         int fd;
113 
114         ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
115                                         lockdep_is_held(&ctx->uring_lock));
116         if (ev_fd)
117                 return -EBUSY;
118 
119         if (copy_from_user(&fd, fds, sizeof(*fds)))
120                 return -EFAULT;
121 
122         ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
123         if (!ev_fd)
124                 return -ENOMEM;
125 
126         ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
127         if (IS_ERR(ev_fd->cq_ev_fd)) {
128                 int ret = PTR_ERR(ev_fd->cq_ev_fd);
129                 kfree(ev_fd);
130                 return ret;
131         }
132 
133         spin_lock(&ctx->completion_lock);
134         ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
135         spin_unlock(&ctx->completion_lock);
136 
137         ev_fd->eventfd_async = eventfd_async;
138         ctx->has_evfd = true;
139         atomic_set(&ev_fd->refs, 1);
140         atomic_set(&ev_fd->ops, 0);
141         rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
142         return 0;
143 }
144 
145 int io_eventfd_unregister(struct io_ring_ctx *ctx)
146 {
147         struct io_ev_fd *ev_fd;
148 
149         ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
150                                         lockdep_is_held(&ctx->uring_lock));
151         if (ev_fd) {
152                 ctx->has_evfd = false;
153                 rcu_assign_pointer(ctx->io_ev_fd, NULL);
154                 if (atomic_dec_and_test(&ev_fd->refs))
155                         call_rcu(&ev_fd->rcu, io_eventfd_free);
156                 return 0;
157         }
158 
159         return -ENXIO;
160 }
161 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php