~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/io_uring/poll.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <linux/kernel.h>
  3 #include <linux/errno.h>
  4 #include <linux/fs.h>
  5 #include <linux/file.h>
  6 #include <linux/mm.h>
  7 #include <linux/slab.h>
  8 #include <linux/poll.h>
  9 #include <linux/hashtable.h>
 10 #include <linux/io_uring.h>
 11 
 12 #include <trace/events/io_uring.h>
 13 
 14 #include <uapi/linux/io_uring.h>
 15 
 16 #include "io_uring.h"
 17 #include "alloc_cache.h"
 18 #include "refs.h"
 19 #include "napi.h"
 20 #include "opdef.h"
 21 #include "kbuf.h"
 22 #include "poll.h"
 23 #include "cancel.h"
 24 
 25 struct io_poll_update {
 26         struct file                     *file;
 27         u64                             old_user_data;
 28         u64                             new_user_data;
 29         __poll_t                        events;
 30         bool                            update_events;
 31         bool                            update_user_data;
 32 };
 33 
 34 struct io_poll_table {
 35         struct poll_table_struct pt;
 36         struct io_kiocb *req;
 37         int nr_entries;
 38         int error;
 39         bool owning;
 40         /* output value, set only if arm poll returns >0 */
 41         __poll_t result_mask;
 42 };
 43 
 44 #define IO_POLL_CANCEL_FLAG     BIT(31)
 45 #define IO_POLL_RETRY_FLAG      BIT(30)
 46 #define IO_POLL_REF_MASK        GENMASK(29, 0)
 47 
 48 /*
 49  * We usually have 1-2 refs taken, 128 is more than enough and we want to
 50  * maximise the margin between this amount and the moment when it overflows.
 51  */
 52 #define IO_POLL_REF_BIAS        128
 53 
 54 #define IO_WQE_F_DOUBLE         1
 55 
 56 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 57                         void *key);
 58 
 59 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
 60 {
 61         unsigned long priv = (unsigned long)wqe->private;
 62 
 63         return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
 64 }
 65 
 66 static inline bool wqe_is_double(struct wait_queue_entry *wqe)
 67 {
 68         unsigned long priv = (unsigned long)wqe->private;
 69 
 70         return priv & IO_WQE_F_DOUBLE;
 71 }
 72 
 73 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
 74 {
 75         int v;
 76 
 77         /*
 78          * poll_refs are already elevated and we don't have much hope for
 79          * grabbing the ownership. Instead of incrementing set a retry flag
 80          * to notify the loop that there might have been some change.
 81          */
 82         v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
 83         if (v & IO_POLL_REF_MASK)
 84                 return false;
 85         return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
 86 }
 87 
 88 /*
 89  * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
 90  * bump it and acquire ownership. It's disallowed to modify requests while not
 91  * owning it, that prevents from races for enqueueing task_work's and b/w
 92  * arming poll and wakeups.
 93  */
 94 static inline bool io_poll_get_ownership(struct io_kiocb *req)
 95 {
 96         if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
 97                 return io_poll_get_ownership_slowpath(req);
 98         return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
 99 }
100 
101 static void io_poll_mark_cancelled(struct io_kiocb *req)
102 {
103         atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
104 }
105 
106 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
107 {
108         /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
109         if (req->opcode == IORING_OP_POLL_ADD)
110                 return req->async_data;
111         return req->apoll->double_poll;
112 }
113 
114 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
115 {
116         if (req->opcode == IORING_OP_POLL_ADD)
117                 return io_kiocb_to_cmd(req, struct io_poll);
118         return &req->apoll->poll;
119 }
120 
121 static void io_poll_req_insert(struct io_kiocb *req)
122 {
123         struct io_hash_table *table = &req->ctx->cancel_table;
124         u32 index = hash_long(req->cqe.user_data, table->hash_bits);
125         struct io_hash_bucket *hb = &table->hbs[index];
126 
127         spin_lock(&hb->lock);
128         hlist_add_head(&req->hash_node, &hb->list);
129         spin_unlock(&hb->lock);
130 }
131 
132 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
133 {
134         struct io_hash_table *table = &req->ctx->cancel_table;
135         u32 index = hash_long(req->cqe.user_data, table->hash_bits);
136         spinlock_t *lock = &table->hbs[index].lock;
137 
138         spin_lock(lock);
139         hash_del(&req->hash_node);
140         spin_unlock(lock);
141 }
142 
143 static void io_poll_req_insert_locked(struct io_kiocb *req)
144 {
145         struct io_hash_table *table = &req->ctx->cancel_table_locked;
146         u32 index = hash_long(req->cqe.user_data, table->hash_bits);
147 
148         lockdep_assert_held(&req->ctx->uring_lock);
149 
150         hlist_add_head(&req->hash_node, &table->hbs[index].list);
151 }
152 
153 static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts)
154 {
155         struct io_ring_ctx *ctx = req->ctx;
156 
157         if (req->flags & REQ_F_HASH_LOCKED) {
158                 /*
159                  * ->cancel_table_locked is protected by ->uring_lock in
160                  * contrast to per bucket spinlocks. Likely, tctx_task_work()
161                  * already grabbed the mutex for us, but there is a chance it
162                  * failed.
163                  */
164                 io_tw_lock(ctx, ts);
165                 hash_del(&req->hash_node);
166                 req->flags &= ~REQ_F_HASH_LOCKED;
167         } else {
168                 io_poll_req_delete(req, ctx);
169         }
170 }
171 
172 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
173 {
174         poll->head = NULL;
175 #define IO_POLL_UNMASK  (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
176         /* mask in events that we always want/need */
177         poll->events = events | IO_POLL_UNMASK;
178         INIT_LIST_HEAD(&poll->wait.entry);
179         init_waitqueue_func_entry(&poll->wait, io_poll_wake);
180 }
181 
182 static inline void io_poll_remove_entry(struct io_poll *poll)
183 {
184         struct wait_queue_head *head = smp_load_acquire(&poll->head);
185 
186         if (head) {
187                 spin_lock_irq(&head->lock);
188                 list_del_init(&poll->wait.entry);
189                 poll->head = NULL;
190                 spin_unlock_irq(&head->lock);
191         }
192 }
193 
194 static void io_poll_remove_entries(struct io_kiocb *req)
195 {
196         /*
197          * Nothing to do if neither of those flags are set. Avoid dipping
198          * into the poll/apoll/double cachelines if we can.
199          */
200         if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
201                 return;
202 
203         /*
204          * While we hold the waitqueue lock and the waitqueue is nonempty,
205          * wake_up_pollfree() will wait for us.  However, taking the waitqueue
206          * lock in the first place can race with the waitqueue being freed.
207          *
208          * We solve this as eventpoll does: by taking advantage of the fact that
209          * all users of wake_up_pollfree() will RCU-delay the actual free.  If
210          * we enter rcu_read_lock() and see that the pointer to the queue is
211          * non-NULL, we can then lock it without the memory being freed out from
212          * under us.
213          *
214          * Keep holding rcu_read_lock() as long as we hold the queue lock, in
215          * case the caller deletes the entry from the queue, leaving it empty.
216          * In that case, only RCU prevents the queue memory from being freed.
217          */
218         rcu_read_lock();
219         if (req->flags & REQ_F_SINGLE_POLL)
220                 io_poll_remove_entry(io_poll_get_single(req));
221         if (req->flags & REQ_F_DOUBLE_POLL)
222                 io_poll_remove_entry(io_poll_get_double(req));
223         rcu_read_unlock();
224 }
225 
226 enum {
227         IOU_POLL_DONE = 0,
228         IOU_POLL_NO_ACTION = 1,
229         IOU_POLL_REMOVE_POLL_USE_RES = 2,
230         IOU_POLL_REISSUE = 3,
231         IOU_POLL_REQUEUE = 4,
232 };
233 
234 static void __io_poll_execute(struct io_kiocb *req, int mask)
235 {
236         unsigned flags = 0;
237 
238         io_req_set_res(req, mask, 0);
239         req->io_task_work.func = io_poll_task_func;
240 
241         trace_io_uring_task_add(req, mask);
242 
243         if (!(req->flags & REQ_F_POLL_NO_LAZY))
244                 flags = IOU_F_TWQ_LAZY_WAKE;
245         __io_req_task_work_add(req, flags);
246 }
247 
248 static inline void io_poll_execute(struct io_kiocb *req, int res)
249 {
250         if (io_poll_get_ownership(req))
251                 __io_poll_execute(req, res);
252 }
253 
254 /*
255  * All poll tw should go through this. Checks for poll events, manages
256  * references, does rewait, etc.
257  *
258  * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
259  * require, which is either spurious wakeup or multishot CQE is served.
260  * IOU_POLL_DONE when it's done with the request, then the mask is stored in
261  * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
262  * poll and that the result is stored in req->cqe.
263  */
264 static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
265 {
266         int v;
267 
268         /* req->task == current here, checking PF_EXITING is safe */
269         if (unlikely(req->task->flags & PF_EXITING))
270                 return -ECANCELED;
271 
272         do {
273                 v = atomic_read(&req->poll_refs);
274 
275                 if (unlikely(v != 1)) {
276                         /* tw should be the owner and so have some refs */
277                         if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
278                                 return IOU_POLL_NO_ACTION;
279                         if (v & IO_POLL_CANCEL_FLAG)
280                                 return -ECANCELED;
281                         /*
282                          * cqe.res contains only events of the first wake up
283                          * and all others are to be lost. Redo vfs_poll() to get
284                          * up to date state.
285                          */
286                         if ((v & IO_POLL_REF_MASK) != 1)
287                                 req->cqe.res = 0;
288 
289                         if (v & IO_POLL_RETRY_FLAG) {
290                                 req->cqe.res = 0;
291                                 /*
292                                  * We won't find new events that came in between
293                                  * vfs_poll and the ref put unless we clear the
294                                  * flag in advance.
295                                  */
296                                 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
297                                 v &= ~IO_POLL_RETRY_FLAG;
298                         }
299                 }
300 
301                 /* the mask was stashed in __io_poll_execute */
302                 if (!req->cqe.res) {
303                         struct poll_table_struct pt = { ._key = req->apoll_events };
304                         req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
305                         /*
306                          * We got woken with a mask, but someone else got to
307                          * it first. The above vfs_poll() doesn't add us back
308                          * to the waitqueue, so if we get nothing back, we
309                          * should be safe and attempt a reissue.
310                          */
311                         if (unlikely(!req->cqe.res)) {
312                                 /* Multishot armed need not reissue */
313                                 if (!(req->apoll_events & EPOLLONESHOT))
314                                         continue;
315                                 return IOU_POLL_REISSUE;
316                         }
317                 }
318                 if (req->apoll_events & EPOLLONESHOT)
319                         return IOU_POLL_DONE;
320 
321                 /* multishot, just fill a CQE and proceed */
322                 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
323                         __poll_t mask = mangle_poll(req->cqe.res &
324                                                     req->apoll_events);
325 
326                         if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) {
327                                 io_req_set_res(req, mask, 0);
328                                 return IOU_POLL_REMOVE_POLL_USE_RES;
329                         }
330                 } else {
331                         int ret = io_poll_issue(req, ts);
332                         if (ret == IOU_STOP_MULTISHOT)
333                                 return IOU_POLL_REMOVE_POLL_USE_RES;
334                         else if (ret == IOU_REQUEUE)
335                                 return IOU_POLL_REQUEUE;
336                         if (ret < 0)
337                                 return ret;
338                 }
339 
340                 /* force the next iteration to vfs_poll() */
341                 req->cqe.res = 0;
342 
343                 /*
344                  * Release all references, retry if someone tried to restart
345                  * task_work while we were executing it.
346                  */
347                 v &= IO_POLL_REF_MASK;
348         } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
349 
350         io_napi_add(req);
351         return IOU_POLL_NO_ACTION;
352 }
353 
354 void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
355 {
356         int ret;
357 
358         ret = io_poll_check_events(req, ts);
359         if (ret == IOU_POLL_NO_ACTION) {
360                 return;
361         } else if (ret == IOU_POLL_REQUEUE) {
362                 __io_poll_execute(req, 0);
363                 return;
364         }
365         io_poll_remove_entries(req);
366         io_poll_tw_hash_eject(req, ts);
367 
368         if (req->opcode == IORING_OP_POLL_ADD) {
369                 if (ret == IOU_POLL_DONE) {
370                         struct io_poll *poll;
371 
372                         poll = io_kiocb_to_cmd(req, struct io_poll);
373                         req->cqe.res = mangle_poll(req->cqe.res & poll->events);
374                 } else if (ret == IOU_POLL_REISSUE) {
375                         io_req_task_submit(req, ts);
376                         return;
377                 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
378                         req->cqe.res = ret;
379                         req_set_fail(req);
380                 }
381 
382                 io_req_set_res(req, req->cqe.res, 0);
383                 io_req_task_complete(req, ts);
384         } else {
385                 io_tw_lock(req->ctx, ts);
386 
387                 if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
388                         io_req_task_complete(req, ts);
389                 else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
390                         io_req_task_submit(req, ts);
391                 else
392                         io_req_defer_failed(req, ret);
393         }
394 }
395 
396 static void io_poll_cancel_req(struct io_kiocb *req)
397 {
398         io_poll_mark_cancelled(req);
399         /* kick tw, which should complete the request */
400         io_poll_execute(req, 0);
401 }
402 
403 #define IO_ASYNC_POLL_COMMON    (EPOLLONESHOT | EPOLLPRI)
404 
405 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
406 {
407         io_poll_mark_cancelled(req);
408         /* we have to kick tw in case it's not already */
409         io_poll_execute(req, 0);
410 
411         /*
412          * If the waitqueue is being freed early but someone is already
413          * holds ownership over it, we have to tear down the request as
414          * best we can. That means immediately removing the request from
415          * its waitqueue and preventing all further accesses to the
416          * waitqueue via the request.
417          */
418         list_del_init(&poll->wait.entry);
419 
420         /*
421          * Careful: this *must* be the last step, since as soon
422          * as req->head is NULL'ed out, the request can be
423          * completed and freed, since aio_poll_complete_work()
424          * will no longer need to take the waitqueue lock.
425          */
426         smp_store_release(&poll->head, NULL);
427         return 1;
428 }
429 
430 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
431                         void *key)
432 {
433         struct io_kiocb *req = wqe_to_req(wait);
434         struct io_poll *poll = container_of(wait, struct io_poll, wait);
435         __poll_t mask = key_to_poll(key);
436 
437         if (unlikely(mask & POLLFREE))
438                 return io_pollfree_wake(req, poll);
439 
440         /* for instances that support it check for an event match first */
441         if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
442                 return 0;
443 
444         if (io_poll_get_ownership(req)) {
445                 /*
446                  * If we trigger a multishot poll off our own wakeup path,
447                  * disable multishot as there is a circular dependency between
448                  * CQ posting and triggering the event.
449                  */
450                 if (mask & EPOLL_URING_WAKE)
451                         poll->events |= EPOLLONESHOT;
452 
453                 /* optional, saves extra locking for removal in tw handler */
454                 if (mask && poll->events & EPOLLONESHOT) {
455                         list_del_init(&poll->wait.entry);
456                         poll->head = NULL;
457                         if (wqe_is_double(wait))
458                                 req->flags &= ~REQ_F_DOUBLE_POLL;
459                         else
460                                 req->flags &= ~REQ_F_SINGLE_POLL;
461                 }
462                 __io_poll_execute(req, mask);
463         }
464         return 1;
465 }
466 
467 /* fails only when polling is already completing by the first entry */
468 static bool io_poll_double_prepare(struct io_kiocb *req)
469 {
470         struct wait_queue_head *head;
471         struct io_poll *poll = io_poll_get_single(req);
472 
473         /* head is RCU protected, see io_poll_remove_entries() comments */
474         rcu_read_lock();
475         head = smp_load_acquire(&poll->head);
476         /*
477          * poll arm might not hold ownership and so race for req->flags with
478          * io_poll_wake(). There is only one poll entry queued, serialise with
479          * it by taking its head lock. As we're still arming the tw hanlder
480          * is not going to be run, so there are no races with it.
481          */
482         if (head) {
483                 spin_lock_irq(&head->lock);
484                 req->flags |= REQ_F_DOUBLE_POLL;
485                 if (req->opcode == IORING_OP_POLL_ADD)
486                         req->flags |= REQ_F_ASYNC_DATA;
487                 spin_unlock_irq(&head->lock);
488         }
489         rcu_read_unlock();
490         return !!head;
491 }
492 
493 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
494                             struct wait_queue_head *head,
495                             struct io_poll **poll_ptr)
496 {
497         struct io_kiocb *req = pt->req;
498         unsigned long wqe_private = (unsigned long) req;
499 
500         /*
501          * The file being polled uses multiple waitqueues for poll handling
502          * (e.g. one for read, one for write). Setup a separate io_poll
503          * if this happens.
504          */
505         if (unlikely(pt->nr_entries)) {
506                 struct io_poll *first = poll;
507 
508                 /* double add on the same waitqueue head, ignore */
509                 if (first->head == head)
510                         return;
511                 /* already have a 2nd entry, fail a third attempt */
512                 if (*poll_ptr) {
513                         if ((*poll_ptr)->head == head)
514                                 return;
515                         pt->error = -EINVAL;
516                         return;
517                 }
518 
519                 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
520                 if (!poll) {
521                         pt->error = -ENOMEM;
522                         return;
523                 }
524 
525                 /* mark as double wq entry */
526                 wqe_private |= IO_WQE_F_DOUBLE;
527                 io_init_poll_iocb(poll, first->events);
528                 if (!io_poll_double_prepare(req)) {
529                         /* the request is completing, just back off */
530                         kfree(poll);
531                         return;
532                 }
533                 *poll_ptr = poll;
534         } else {
535                 /* fine to modify, there is no poll queued to race with us */
536                 req->flags |= REQ_F_SINGLE_POLL;
537         }
538 
539         pt->nr_entries++;
540         poll->head = head;
541         poll->wait.private = (void *) wqe_private;
542 
543         if (poll->events & EPOLLEXCLUSIVE) {
544                 add_wait_queue_exclusive(head, &poll->wait);
545         } else {
546                 add_wait_queue(head, &poll->wait);
547         }
548 }
549 
550 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
551                                struct poll_table_struct *p)
552 {
553         struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
554         struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
555 
556         __io_queue_proc(poll, pt, head,
557                         (struct io_poll **) &pt->req->async_data);
558 }
559 
560 static bool io_poll_can_finish_inline(struct io_kiocb *req,
561                                       struct io_poll_table *pt)
562 {
563         return pt->owning || io_poll_get_ownership(req);
564 }
565 
566 static void io_poll_add_hash(struct io_kiocb *req)
567 {
568         if (req->flags & REQ_F_HASH_LOCKED)
569                 io_poll_req_insert_locked(req);
570         else
571                 io_poll_req_insert(req);
572 }
573 
574 /*
575  * Returns 0 when it's handed over for polling. The caller owns the requests if
576  * it returns non-zero, but otherwise should not touch it. Negative values
577  * contain an error code. When the result is >0, the polling has completed
578  * inline and ipt.result_mask is set to the mask.
579  */
580 static int __io_arm_poll_handler(struct io_kiocb *req,
581                                  struct io_poll *poll,
582                                  struct io_poll_table *ipt, __poll_t mask,
583                                  unsigned issue_flags)
584 {
585         INIT_HLIST_NODE(&req->hash_node);
586         io_init_poll_iocb(poll, mask);
587         poll->file = req->file;
588         req->apoll_events = poll->events;
589 
590         ipt->pt._key = mask;
591         ipt->req = req;
592         ipt->error = 0;
593         ipt->nr_entries = 0;
594         /*
595          * Polling is either completed here or via task_work, so if we're in the
596          * task context we're naturally serialised with tw by merit of running
597          * the same task. When it's io-wq, take the ownership to prevent tw
598          * from running. However, when we're in the task context, skip taking
599          * it as an optimisation.
600          *
601          * Note: even though the request won't be completed/freed, without
602          * ownership we still can race with io_poll_wake().
603          * io_poll_can_finish_inline() tries to deal with that.
604          */
605         ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
606         atomic_set(&req->poll_refs, (int)ipt->owning);
607 
608         /* io-wq doesn't hold uring_lock */
609         if (issue_flags & IO_URING_F_UNLOCKED)
610                 req->flags &= ~REQ_F_HASH_LOCKED;
611 
612 
613         /*
614          * Exclusive waits may only wake a limited amount of entries
615          * rather than all of them, this may interfere with lazy
616          * wake if someone does wait(events > 1). Ensure we don't do
617          * lazy wake for those, as we need to process each one as they
618          * come in.
619          */
620         if (poll->events & EPOLLEXCLUSIVE)
621                 req->flags |= REQ_F_POLL_NO_LAZY;
622 
623         mask = vfs_poll(req->file, &ipt->pt) & poll->events;
624 
625         if (unlikely(ipt->error || !ipt->nr_entries)) {
626                 io_poll_remove_entries(req);
627 
628                 if (!io_poll_can_finish_inline(req, ipt)) {
629                         io_poll_mark_cancelled(req);
630                         return 0;
631                 } else if (mask && (poll->events & EPOLLET)) {
632                         ipt->result_mask = mask;
633                         return 1;
634                 }
635                 return ipt->error ?: -EINVAL;
636         }
637 
638         if (mask &&
639            ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
640                 if (!io_poll_can_finish_inline(req, ipt)) {
641                         io_poll_add_hash(req);
642                         return 0;
643                 }
644                 io_poll_remove_entries(req);
645                 ipt->result_mask = mask;
646                 /* no one else has access to the req, forget about the ref */
647                 return 1;
648         }
649 
650         io_poll_add_hash(req);
651 
652         if (mask && (poll->events & EPOLLET) &&
653             io_poll_can_finish_inline(req, ipt)) {
654                 __io_poll_execute(req, mask);
655                 return 0;
656         }
657         io_napi_add(req);
658 
659         if (ipt->owning) {
660                 /*
661                  * Try to release ownership. If we see a change of state, e.g.
662                  * poll was waken up, queue up a tw, it'll deal with it.
663                  */
664                 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
665                         __io_poll_execute(req, 0);
666         }
667         return 0;
668 }
669 
670 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
671                                struct poll_table_struct *p)
672 {
673         struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
674         struct async_poll *apoll = pt->req->apoll;
675 
676         __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
677 }
678 
679 /*
680  * We can't reliably detect loops in repeated poll triggers and issue
681  * subsequently failing. But rather than fail these immediately, allow a
682  * certain amount of retries before we give up. Given that this condition
683  * should _rarely_ trigger even once, we should be fine with a larger value.
684  */
685 #define APOLL_MAX_RETRY         128
686 
687 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
688                                              unsigned issue_flags)
689 {
690         struct io_ring_ctx *ctx = req->ctx;
691         struct async_poll *apoll;
692 
693         if (req->flags & REQ_F_POLLED) {
694                 apoll = req->apoll;
695                 kfree(apoll->double_poll);
696         } else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
697                 apoll = io_alloc_cache_get(&ctx->apoll_cache);
698                 if (!apoll)
699                         goto alloc_apoll;
700                 apoll->poll.retries = APOLL_MAX_RETRY;
701         } else {
702 alloc_apoll:
703                 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
704                 if (unlikely(!apoll))
705                         return NULL;
706                 apoll->poll.retries = APOLL_MAX_RETRY;
707         }
708         apoll->double_poll = NULL;
709         req->apoll = apoll;
710         if (unlikely(!--apoll->poll.retries))
711                 return NULL;
712         return apoll;
713 }
714 
715 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
716 {
717         const struct io_issue_def *def = &io_issue_defs[req->opcode];
718         struct async_poll *apoll;
719         struct io_poll_table ipt;
720         __poll_t mask = POLLPRI | POLLERR | EPOLLET;
721         int ret;
722 
723         /*
724          * apoll requests already grab the mutex to complete in the tw handler,
725          * so removal from the mutex-backed hash is free, use it by default.
726          */
727         req->flags |= REQ_F_HASH_LOCKED;
728 
729         if (!def->pollin && !def->pollout)
730                 return IO_APOLL_ABORTED;
731         if (!io_file_can_poll(req))
732                 return IO_APOLL_ABORTED;
733         if (!(req->flags & REQ_F_APOLL_MULTISHOT))
734                 mask |= EPOLLONESHOT;
735 
736         if (def->pollin) {
737                 mask |= EPOLLIN | EPOLLRDNORM;
738 
739                 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
740                 if (req->flags & REQ_F_CLEAR_POLLIN)
741                         mask &= ~EPOLLIN;
742         } else {
743                 mask |= EPOLLOUT | EPOLLWRNORM;
744         }
745         if (def->poll_exclusive)
746                 mask |= EPOLLEXCLUSIVE;
747 
748         apoll = io_req_alloc_apoll(req, issue_flags);
749         if (!apoll)
750                 return IO_APOLL_ABORTED;
751         req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
752         req->flags |= REQ_F_POLLED;
753         ipt.pt._qproc = io_async_queue_proc;
754 
755         io_kbuf_recycle(req, issue_flags);
756 
757         ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
758         if (ret)
759                 return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
760         trace_io_uring_poll_arm(req, mask, apoll->poll.events);
761         return IO_APOLL_OK;
762 }
763 
764 static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
765                                             struct io_hash_table *table,
766                                             bool cancel_all)
767 {
768         unsigned nr_buckets = 1U << table->hash_bits;
769         struct hlist_node *tmp;
770         struct io_kiocb *req;
771         bool found = false;
772         int i;
773 
774         for (i = 0; i < nr_buckets; i++) {
775                 struct io_hash_bucket *hb = &table->hbs[i];
776 
777                 spin_lock(&hb->lock);
778                 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
779                         if (io_match_task_safe(req, tsk, cancel_all)) {
780                                 hlist_del_init(&req->hash_node);
781                                 io_poll_cancel_req(req);
782                                 found = true;
783                         }
784                 }
785                 spin_unlock(&hb->lock);
786         }
787         return found;
788 }
789 
790 /*
791  * Returns true if we found and killed one or more poll requests
792  */
793 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
794                                bool cancel_all)
795         __must_hold(&ctx->uring_lock)
796 {
797         bool ret;
798 
799         ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
800         ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
801         return ret;
802 }
803 
804 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
805                                      struct io_cancel_data *cd,
806                                      struct io_hash_table *table,
807                                      struct io_hash_bucket **out_bucket)
808 {
809         struct io_kiocb *req;
810         u32 index = hash_long(cd->data, table->hash_bits);
811         struct io_hash_bucket *hb = &table->hbs[index];
812 
813         *out_bucket = NULL;
814 
815         spin_lock(&hb->lock);
816         hlist_for_each_entry(req, &hb->list, hash_node) {
817                 if (cd->data != req->cqe.user_data)
818                         continue;
819                 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
820                         continue;
821                 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
822                         if (io_cancel_match_sequence(req, cd->seq))
823                                 continue;
824                 }
825                 *out_bucket = hb;
826                 return req;
827         }
828         spin_unlock(&hb->lock);
829         return NULL;
830 }
831 
832 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
833                                           struct io_cancel_data *cd,
834                                           struct io_hash_table *table,
835                                           struct io_hash_bucket **out_bucket)
836 {
837         unsigned nr_buckets = 1U << table->hash_bits;
838         struct io_kiocb *req;
839         int i;
840 
841         *out_bucket = NULL;
842 
843         for (i = 0; i < nr_buckets; i++) {
844                 struct io_hash_bucket *hb = &table->hbs[i];
845 
846                 spin_lock(&hb->lock);
847                 hlist_for_each_entry(req, &hb->list, hash_node) {
848                         if (io_cancel_req_match(req, cd)) {
849                                 *out_bucket = hb;
850                                 return req;
851                         }
852                 }
853                 spin_unlock(&hb->lock);
854         }
855         return NULL;
856 }
857 
858 static int io_poll_disarm(struct io_kiocb *req)
859 {
860         if (!req)
861                 return -ENOENT;
862         if (!io_poll_get_ownership(req))
863                 return -EALREADY;
864         io_poll_remove_entries(req);
865         hash_del(&req->hash_node);
866         return 0;
867 }
868 
869 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
870                             struct io_hash_table *table)
871 {
872         struct io_hash_bucket *bucket;
873         struct io_kiocb *req;
874 
875         if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
876                          IORING_ASYNC_CANCEL_ANY))
877                 req = io_poll_file_find(ctx, cd, table, &bucket);
878         else
879                 req = io_poll_find(ctx, false, cd, table, &bucket);
880 
881         if (req)
882                 io_poll_cancel_req(req);
883         if (bucket)
884                 spin_unlock(&bucket->lock);
885         return req ? 0 : -ENOENT;
886 }
887 
888 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
889                    unsigned issue_flags)
890 {
891         int ret;
892 
893         ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
894         if (ret != -ENOENT)
895                 return ret;
896 
897         io_ring_submit_lock(ctx, issue_flags);
898         ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
899         io_ring_submit_unlock(ctx, issue_flags);
900         return ret;
901 }
902 
903 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
904                                      unsigned int flags)
905 {
906         u32 events;
907 
908         events = READ_ONCE(sqe->poll32_events);
909 #ifdef __BIG_ENDIAN
910         events = swahw32(events);
911 #endif
912         if (!(flags & IORING_POLL_ADD_MULTI))
913                 events |= EPOLLONESHOT;
914         if (!(flags & IORING_POLL_ADD_LEVEL))
915                 events |= EPOLLET;
916         return demangle_poll(events) |
917                 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
918 }
919 
920 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
921 {
922         struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
923         u32 flags;
924 
925         if (sqe->buf_index || sqe->splice_fd_in)
926                 return -EINVAL;
927         flags = READ_ONCE(sqe->len);
928         if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
929                       IORING_POLL_ADD_MULTI))
930                 return -EINVAL;
931         /* meaningless without update */
932         if (flags == IORING_POLL_ADD_MULTI)
933                 return -EINVAL;
934 
935         upd->old_user_data = READ_ONCE(sqe->addr);
936         upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
937         upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
938 
939         upd->new_user_data = READ_ONCE(sqe->off);
940         if (!upd->update_user_data && upd->new_user_data)
941                 return -EINVAL;
942         if (upd->update_events)
943                 upd->events = io_poll_parse_events(sqe, flags);
944         else if (sqe->poll32_events)
945                 return -EINVAL;
946 
947         return 0;
948 }
949 
950 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
951 {
952         struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
953         u32 flags;
954 
955         if (sqe->buf_index || sqe->off || sqe->addr)
956                 return -EINVAL;
957         flags = READ_ONCE(sqe->len);
958         if (flags & ~IORING_POLL_ADD_MULTI)
959                 return -EINVAL;
960         if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
961                 return -EINVAL;
962 
963         poll->events = io_poll_parse_events(sqe, flags);
964         return 0;
965 }
966 
967 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
968 {
969         struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
970         struct io_poll_table ipt;
971         int ret;
972 
973         ipt.pt._qproc = io_poll_queue_proc;
974 
975         /*
976          * If sqpoll or single issuer, there is no contention for ->uring_lock
977          * and we'll end up holding it in tw handlers anyway.
978          */
979         if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
980                 req->flags |= REQ_F_HASH_LOCKED;
981 
982         ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
983         if (ret > 0) {
984                 io_req_set_res(req, ipt.result_mask, 0);
985                 return IOU_OK;
986         }
987         return ret ?: IOU_ISSUE_SKIP_COMPLETE;
988 }
989 
990 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
991 {
992         struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
993         struct io_ring_ctx *ctx = req->ctx;
994         struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
995         struct io_hash_bucket *bucket;
996         struct io_kiocb *preq;
997         int ret2, ret = 0;
998 
999         io_ring_submit_lock(ctx, issue_flags);
1000         preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
1001         ret2 = io_poll_disarm(preq);
1002         if (bucket)
1003                 spin_unlock(&bucket->lock);
1004         if (!ret2)
1005                 goto found;
1006         if (ret2 != -ENOENT) {
1007                 ret = ret2;
1008                 goto out;
1009         }
1010 
1011         preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
1012         ret2 = io_poll_disarm(preq);
1013         if (bucket)
1014                 spin_unlock(&bucket->lock);
1015         if (ret2) {
1016                 ret = ret2;
1017                 goto out;
1018         }
1019 
1020 found:
1021         if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
1022                 ret = -EFAULT;
1023                 goto out;
1024         }
1025 
1026         if (poll_update->update_events || poll_update->update_user_data) {
1027                 /* only mask one event flags, keep behavior flags */
1028                 if (poll_update->update_events) {
1029                         struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
1030 
1031                         poll->events &= ~0xffff;
1032                         poll->events |= poll_update->events & 0xffff;
1033                         poll->events |= IO_POLL_UNMASK;
1034                 }
1035                 if (poll_update->update_user_data)
1036                         preq->cqe.user_data = poll_update->new_user_data;
1037 
1038                 ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
1039                 /* successfully updated, don't complete poll request */
1040                 if (!ret2 || ret2 == -EIOCBQUEUED)
1041                         goto out;
1042         }
1043 
1044         req_set_fail(preq);
1045         io_req_set_res(preq, -ECANCELED, 0);
1046         preq->io_task_work.func = io_req_task_complete;
1047         io_req_task_work_add(preq);
1048 out:
1049         io_ring_submit_unlock(ctx, issue_flags);
1050         if (ret < 0) {
1051                 req_set_fail(req);
1052                 return ret;
1053         }
1054         /* complete update request, we're done with it */
1055         io_req_set_res(req, ret, 0);
1056         return IOU_OK;
1057 }
1058 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php