~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/io_uring/cancel.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <linux/kernel.h>
  3 #include <linux/errno.h>
  4 #include <linux/fs.h>
  5 #include <linux/file.h>
  6 #include <linux/mm.h>
  7 #include <linux/slab.h>
  8 #include <linux/namei.h>
  9 #include <linux/nospec.h>
 10 #include <linux/io_uring.h>
 11 
 12 #include <uapi/linux/io_uring.h>
 13 
 14 #include "io_uring.h"
 15 #include "tctx.h"
 16 #include "poll.h"
 17 #include "timeout.h"
 18 #include "waitid.h"
 19 #include "futex.h"
 20 #include "cancel.h"
 21 
 22 struct io_cancel {
 23         struct file                     *file;
 24         u64                             addr;
 25         u32                             flags;
 26         s32                             fd;
 27         u8                              opcode;
 28 };
 29 
 30 #define CANCEL_FLAGS    (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
 31                          IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED | \
 32                          IORING_ASYNC_CANCEL_USERDATA | IORING_ASYNC_CANCEL_OP)
 33 
 34 /*
 35  * Returns true if the request matches the criteria outlined by 'cd'.
 36  */
 37 bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
 38 {
 39         bool match_user_data = cd->flags & IORING_ASYNC_CANCEL_USERDATA;
 40 
 41         if (req->ctx != cd->ctx)
 42                 return false;
 43 
 44         if (!(cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP)))
 45                 match_user_data = true;
 46 
 47         if (cd->flags & IORING_ASYNC_CANCEL_ANY)
 48                 goto check_seq;
 49         if (cd->flags & IORING_ASYNC_CANCEL_FD) {
 50                 if (req->file != cd->file)
 51                         return false;
 52         }
 53         if (cd->flags & IORING_ASYNC_CANCEL_OP) {
 54                 if (req->opcode != cd->opcode)
 55                         return false;
 56         }
 57         if (match_user_data && req->cqe.user_data != cd->data)
 58                 return false;
 59         if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
 60 check_seq:
 61                 if (io_cancel_match_sequence(req, cd->seq))
 62                         return false;
 63         }
 64 
 65         return true;
 66 }
 67 
 68 static bool io_cancel_cb(struct io_wq_work *work, void *data)
 69 {
 70         struct io_kiocb *req = container_of(work, struct io_kiocb, work);
 71         struct io_cancel_data *cd = data;
 72 
 73         return io_cancel_req_match(req, cd);
 74 }
 75 
 76 static int io_async_cancel_one(struct io_uring_task *tctx,
 77                                struct io_cancel_data *cd)
 78 {
 79         enum io_wq_cancel cancel_ret;
 80         int ret = 0;
 81         bool all;
 82 
 83         if (!tctx || !tctx->io_wq)
 84                 return -ENOENT;
 85 
 86         all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
 87         cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
 88         switch (cancel_ret) {
 89         case IO_WQ_CANCEL_OK:
 90                 ret = 0;
 91                 break;
 92         case IO_WQ_CANCEL_RUNNING:
 93                 ret = -EALREADY;
 94                 break;
 95         case IO_WQ_CANCEL_NOTFOUND:
 96                 ret = -ENOENT;
 97                 break;
 98         }
 99 
100         return ret;
101 }
102 
103 int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
104                   unsigned issue_flags)
105 {
106         struct io_ring_ctx *ctx = cd->ctx;
107         int ret;
108 
109         WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
110 
111         ret = io_async_cancel_one(tctx, cd);
112         /*
113          * Fall-through even for -EALREADY, as we may have poll armed
114          * that need unarming.
115          */
116         if (!ret)
117                 return 0;
118 
119         ret = io_poll_cancel(ctx, cd, issue_flags);
120         if (ret != -ENOENT)
121                 return ret;
122 
123         ret = io_waitid_cancel(ctx, cd, issue_flags);
124         if (ret != -ENOENT)
125                 return ret;
126 
127         ret = io_futex_cancel(ctx, cd, issue_flags);
128         if (ret != -ENOENT)
129                 return ret;
130 
131         spin_lock(&ctx->completion_lock);
132         if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
133                 ret = io_timeout_cancel(ctx, cd);
134         spin_unlock(&ctx->completion_lock);
135         return ret;
136 }
137 
138 int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
139 {
140         struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
141 
142         if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
143                 return -EINVAL;
144         if (sqe->off || sqe->splice_fd_in)
145                 return -EINVAL;
146 
147         cancel->addr = READ_ONCE(sqe->addr);
148         cancel->flags = READ_ONCE(sqe->cancel_flags);
149         if (cancel->flags & ~CANCEL_FLAGS)
150                 return -EINVAL;
151         if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
152                 if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
153                         return -EINVAL;
154                 cancel->fd = READ_ONCE(sqe->fd);
155         }
156         if (cancel->flags & IORING_ASYNC_CANCEL_OP) {
157                 if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
158                         return -EINVAL;
159                 cancel->opcode = READ_ONCE(sqe->len);
160         }
161 
162         return 0;
163 }
164 
165 static int __io_async_cancel(struct io_cancel_data *cd,
166                              struct io_uring_task *tctx,
167                              unsigned int issue_flags)
168 {
169         bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
170         struct io_ring_ctx *ctx = cd->ctx;
171         struct io_tctx_node *node;
172         int ret, nr = 0;
173 
174         do {
175                 ret = io_try_cancel(tctx, cd, issue_flags);
176                 if (ret == -ENOENT)
177                         break;
178                 if (!all)
179                         return ret;
180                 nr++;
181         } while (1);
182 
183         /* slow path, try all io-wq's */
184         io_ring_submit_lock(ctx, issue_flags);
185         ret = -ENOENT;
186         list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
187                 ret = io_async_cancel_one(node->task->io_uring, cd);
188                 if (ret != -ENOENT) {
189                         if (!all)
190                                 break;
191                         nr++;
192                 }
193         }
194         io_ring_submit_unlock(ctx, issue_flags);
195         return all ? nr : ret;
196 }
197 
198 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
199 {
200         struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
201         struct io_cancel_data cd = {
202                 .ctx    = req->ctx,
203                 .data   = cancel->addr,
204                 .flags  = cancel->flags,
205                 .opcode = cancel->opcode,
206                 .seq    = atomic_inc_return(&req->ctx->cancel_seq),
207         };
208         struct io_uring_task *tctx = req->task->io_uring;
209         int ret;
210 
211         if (cd.flags & IORING_ASYNC_CANCEL_FD) {
212                 if (req->flags & REQ_F_FIXED_FILE ||
213                     cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
214                         req->flags |= REQ_F_FIXED_FILE;
215                         req->file = io_file_get_fixed(req, cancel->fd,
216                                                         issue_flags);
217                 } else {
218                         req->file = io_file_get_normal(req, cancel->fd);
219                 }
220                 if (!req->file) {
221                         ret = -EBADF;
222                         goto done;
223                 }
224                 cd.file = req->file;
225         }
226 
227         ret = __io_async_cancel(&cd, tctx, issue_flags);
228 done:
229         if (ret < 0)
230                 req_set_fail(req);
231         io_req_set_res(req, ret, 0);
232         return IOU_OK;
233 }
234 
235 void init_hash_table(struct io_hash_table *table, unsigned size)
236 {
237         unsigned int i;
238 
239         for (i = 0; i < size; i++) {
240                 spin_lock_init(&table->hbs[i].lock);
241                 INIT_HLIST_HEAD(&table->hbs[i].list);
242         }
243 }
244 
245 static int __io_sync_cancel(struct io_uring_task *tctx,
246                             struct io_cancel_data *cd, int fd)
247 {
248         struct io_ring_ctx *ctx = cd->ctx;
249 
250         /* fixed must be grabbed every time since we drop the uring_lock */
251         if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
252             (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
253                 if (unlikely(fd >= ctx->nr_user_files))
254                         return -EBADF;
255                 fd = array_index_nospec(fd, ctx->nr_user_files);
256                 cd->file = io_file_from_index(&ctx->file_table, fd);
257                 if (!cd->file)
258                         return -EBADF;
259         }
260 
261         return __io_async_cancel(cd, tctx, 0);
262 }
263 
264 int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
265         __must_hold(&ctx->uring_lock)
266 {
267         struct io_cancel_data cd = {
268                 .ctx    = ctx,
269                 .seq    = atomic_inc_return(&ctx->cancel_seq),
270         };
271         ktime_t timeout = KTIME_MAX;
272         struct io_uring_sync_cancel_reg sc;
273         struct file *file = NULL;
274         DEFINE_WAIT(wait);
275         int ret, i;
276 
277         if (copy_from_user(&sc, arg, sizeof(sc)))
278                 return -EFAULT;
279         if (sc.flags & ~CANCEL_FLAGS)
280                 return -EINVAL;
281         for (i = 0; i < ARRAY_SIZE(sc.pad); i++)
282                 if (sc.pad[i])
283                         return -EINVAL;
284         for (i = 0; i < ARRAY_SIZE(sc.pad2); i++)
285                 if (sc.pad2[i])
286                         return -EINVAL;
287 
288         cd.data = sc.addr;
289         cd.flags = sc.flags;
290         cd.opcode = sc.opcode;
291 
292         /* we can grab a normal file descriptor upfront */
293         if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
294            !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
295                 file = fget(sc.fd);
296                 if (!file)
297                         return -EBADF;
298                 cd.file = file;
299         }
300 
301         ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
302 
303         /* found something, done! */
304         if (ret != -EALREADY)
305                 goto out;
306 
307         if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
308                 struct timespec64 ts = {
309                         .tv_sec         = sc.timeout.tv_sec,
310                         .tv_nsec        = sc.timeout.tv_nsec
311                 };
312 
313                 timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
314         }
315 
316         /*
317          * Keep looking until we get -ENOENT. we'll get woken everytime
318          * every time a request completes and will retry the cancelation.
319          */
320         do {
321                 cd.seq = atomic_inc_return(&ctx->cancel_seq);
322 
323                 prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
324 
325                 ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
326 
327                 mutex_unlock(&ctx->uring_lock);
328                 if (ret != -EALREADY)
329                         break;
330 
331                 ret = io_run_task_work_sig(ctx);
332                 if (ret < 0)
333                         break;
334                 ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
335                 if (!ret) {
336                         ret = -ETIME;
337                         break;
338                 }
339                 mutex_lock(&ctx->uring_lock);
340         } while (1);
341 
342         finish_wait(&ctx->cq_wait, &wait);
343         mutex_lock(&ctx->uring_lock);
344 
345         if (ret == -ENOENT || ret > 0)
346                 ret = 0;
347 out:
348         if (file)
349                 fput(file);
350         return ret;
351 }
352 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php