1 /* 1 /* 2 * An async IO implementation for Linux 2 * An async IO implementation for Linux 3 * Written by Benjamin LaHaise <bcrl@kvac 3 * Written by Benjamin LaHaise <bcrl@kvack.org> 4 * 4 * 5 * Implements an efficient asynchronous i 5 * Implements an efficient asynchronous io interface. 6 * 6 * 7 * Copyright 2000, 2001, 2002 Red Hat, In 7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. 8 * Copyright 2018 Christoph Hellwig. << 9 * 8 * 10 * See ../COPYING for licensing terms. 9 * See ../COPYING for licensing terms. 11 */ 10 */ 12 #define pr_fmt(fmt) "%s: " fmt, __func__ 11 #define pr_fmt(fmt) "%s: " fmt, __func__ 13 12 14 #include <linux/kernel.h> 13 #include <linux/kernel.h> 15 #include <linux/init.h> 14 #include <linux/init.h> 16 #include <linux/errno.h> 15 #include <linux/errno.h> 17 #include <linux/time.h> 16 #include <linux/time.h> 18 #include <linux/aio_abi.h> 17 #include <linux/aio_abi.h> 19 #include <linux/export.h> 18 #include <linux/export.h> 20 #include <linux/syscalls.h> 19 #include <linux/syscalls.h> 21 #include <linux/backing-dev.h> 20 #include <linux/backing-dev.h> 22 #include <linux/refcount.h> << 23 #include <linux/uio.h> 21 #include <linux/uio.h> 24 22 25 #include <linux/sched/signal.h> !! 23 #include <linux/sched.h> 26 #include <linux/fs.h> 24 #include <linux/fs.h> 27 #include <linux/file.h> 25 #include <linux/file.h> 28 #include <linux/mm.h> 26 #include <linux/mm.h> 29 #include <linux/mman.h> 27 #include <linux/mman.h> 30 #include <linux/percpu.h> !! 28 #include <linux/mmu_context.h> 31 #include <linux/slab.h> 29 #include <linux/slab.h> 32 #include <linux/timer.h> 30 #include <linux/timer.h> 33 #include <linux/aio.h> 31 #include <linux/aio.h> 34 #include <linux/highmem.h> 32 #include <linux/highmem.h> 35 #include <linux/workqueue.h> 33 #include <linux/workqueue.h> 36 #include <linux/security.h> 34 #include <linux/security.h> 37 #include <linux/eventfd.h> 35 #include <linux/eventfd.h> 38 #include <linux/blkdev.h> 36 #include <linux/blkdev.h> 39 #include <linux/compat.h> 37 #include <linux/compat.h> 40 #include <linux/migrate.h> << 41 #include <linux/ramfs.h> << 42 #include <linux/percpu-refcount.h> << 43 #include <linux/mount.h> << 44 #include <linux/pseudo_fs.h> << 45 38 46 #include <linux/uaccess.h> !! 39 #include <asm/kmap_types.h> 47 #include <linux/nospec.h> !! 40 #include <asm/uaccess.h> 48 << 49 #include "internal.h" << 50 << 51 #define KIOCB_KEY 0 << 52 41 53 #define AIO_RING_MAGIC 0xa10a 42 #define AIO_RING_MAGIC 0xa10a10a1 54 #define AIO_RING_COMPAT_FEATURES 1 43 #define AIO_RING_COMPAT_FEATURES 1 55 #define AIO_RING_INCOMPAT_FEATURES 0 44 #define AIO_RING_INCOMPAT_FEATURES 0 56 struct aio_ring { 45 struct aio_ring { 57 unsigned id; /* kernel inte 46 unsigned id; /* kernel internal index number */ 58 unsigned nr; /* number of i 47 unsigned nr; /* number of io_events */ 59 unsigned head; /* Written to !! 48 unsigned head; 60 * mutex by ai << 61 unsigned tail; 49 unsigned tail; 62 50 63 unsigned magic; 51 unsigned magic; 64 unsigned compat_features; 52 unsigned compat_features; 65 unsigned incompat_features; 53 unsigned incompat_features; 66 unsigned header_length; /* siz 54 unsigned header_length; /* size of aio_ring */ 67 55 68 56 69 struct io_event io_events[]; !! 57 struct io_event io_events[0]; 70 }; /* 128 bytes + ring size */ 58 }; /* 128 bytes + ring size */ 71 59 72 /* << 73 * Plugging is meant to work with larger batch << 74 * have more than the below, then don't bother << 75 */ << 76 #define AIO_PLUG_THRESHOLD 2 << 77 << 78 #define AIO_RING_PAGES 8 60 #define AIO_RING_PAGES 8 79 61 80 struct kioctx_table { << 81 struct rcu_head rcu; << 82 unsigned nr; << 83 struct kioctx __rcu *table[] __cou << 84 }; << 85 << 86 struct kioctx_cpu { << 87 unsigned reqs_available << 88 }; << 89 << 90 struct ctx_rq_wait { << 91 struct completion comp; << 92 atomic_t count; << 93 }; << 94 << 95 struct kioctx { 62 struct kioctx { 96 struct percpu_ref users; !! 63 atomic_t users; 97 atomic_t dead; 64 atomic_t dead; 98 65 99 struct percpu_ref reqs; !! 66 /* This needs improving */ 100 << 101 unsigned long user_id; 67 unsigned long user_id; 102 !! 68 struct hlist_node list; 103 struct kioctx_cpu __percpu *cpu; << 104 69 105 /* 70 /* 106 * For percpu reqs_available, number o << 107 * counter at a time: << 108 */ << 109 unsigned req_batch; << 110 /* << 111 * This is what userspace passed to io 71 * This is what userspace passed to io_setup(), it's not used for 112 * anything but counting against the g 72 * anything but counting against the global max_reqs quota. 113 * 73 * 114 * The real limit is nr_events - 1, wh 74 * The real limit is nr_events - 1, which will be larger (see 115 * aio_setup_ring()) 75 * aio_setup_ring()) 116 */ 76 */ 117 unsigned max_reqs; 77 unsigned max_reqs; 118 78 119 /* Size of ringbuffer, in units of str 79 /* Size of ringbuffer, in units of struct io_event */ 120 unsigned nr_events; 80 unsigned nr_events; 121 81 122 unsigned long mmap_base; 82 unsigned long mmap_base; 123 unsigned long mmap_size; 83 unsigned long mmap_size; 124 84 125 struct folio **ring_folios; !! 85 struct page **ring_pages; 126 long nr_pages; 86 long nr_pages; 127 87 128 struct rcu_work free_rwork; !! 88 struct rcu_head rcu_head; 129 !! 89 struct work_struct rcu_work; 130 /* << 131 * signals when all in-flight requests << 132 */ << 133 struct ctx_rq_wait *rq_wait; << 134 90 135 struct { 91 struct { 136 /* !! 92 atomic_t reqs_active; 137 * This counts the number of a << 138 * so we avoid overflowing it: << 139 * when allocating a kiocb and << 140 * io_event is pulled off the << 141 * << 142 * We batch accesses to it wit << 143 */ << 144 atomic_t reqs_available << 145 } ____cacheline_aligned_in_smp; 93 } ____cacheline_aligned_in_smp; 146 94 147 struct { 95 struct { 148 spinlock_t ctx_lock; 96 spinlock_t ctx_lock; 149 struct list_head active_reqs; 97 struct list_head active_reqs; /* used for cancellation */ 150 } ____cacheline_aligned_in_smp; 98 } ____cacheline_aligned_in_smp; 151 99 152 struct { 100 struct { 153 struct mutex ring_lock; 101 struct mutex ring_lock; 154 wait_queue_head_t wait; 102 wait_queue_head_t wait; 155 } ____cacheline_aligned_in_smp; 103 } ____cacheline_aligned_in_smp; 156 104 157 struct { 105 struct { 158 unsigned tail; 106 unsigned tail; 159 unsigned completed_even << 160 spinlock_t completion_loc 107 spinlock_t completion_lock; 161 } ____cacheline_aligned_in_smp; 108 } ____cacheline_aligned_in_smp; 162 109 163 struct folio *internal_foli !! 110 struct page *internal_pages[AIO_RING_PAGES]; 164 struct file *aio_ring_file << 165 << 166 unsigned id; << 167 }; << 168 << 169 /* << 170 * First field must be the file pointer in all << 171 * iocb unions! See also 'struct kiocb' in <li << 172 */ << 173 struct fsync_iocb { << 174 struct file *file; << 175 struct work_struct work; << 176 bool datasync; << 177 struct cred *creds; << 178 }; << 179 << 180 struct poll_iocb { << 181 struct file *file; << 182 struct wait_queue_head *head; << 183 __poll_t events; << 184 bool cancelled; << 185 bool work_scheduled << 186 bool work_need_resc << 187 struct wait_queue_entry wait; << 188 struct work_struct work; << 189 }; << 190 << 191 /* << 192 * NOTE! Each of the iocb union members has th << 193 * as the first entry in their struct definiti << 194 * access the file pointer through any of the << 195 * or directly as just 'ki_filp' in this struc << 196 */ << 197 struct aio_kiocb { << 198 union { << 199 struct file *ki_fi << 200 struct kiocb rw; << 201 struct fsync_iocb fsync; << 202 struct poll_iocb poll; << 203 }; << 204 << 205 struct kioctx *ki_ctx; << 206 kiocb_cancel_fn *ki_cancel; << 207 << 208 struct io_event ki_res; << 209 << 210 struct list_head ki_list; << 211 << 212 refcount_t ki_refcnt; << 213 << 214 /* << 215 * If the aio_resfd field of the users << 216 * this is the underlying eventfd cont << 217 */ << 218 struct eventfd_ctx *ki_eventfd; << 219 }; 111 }; 220 112 221 /*------ sysctl variables----*/ 113 /*------ sysctl variables----*/ 222 static DEFINE_SPINLOCK(aio_nr_lock); 114 static DEFINE_SPINLOCK(aio_nr_lock); 223 static unsigned long aio_nr; /* cur !! 115 unsigned long aio_nr; /* current system wide number of aio requests */ 224 static unsigned long aio_max_nr = 0x10000; /* !! 116 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 225 /*----end sysctl variables---*/ 117 /*----end sysctl variables---*/ 226 #ifdef CONFIG_SYSCTL << 227 static struct ctl_table aio_sysctls[] = { << 228 { << 229 .procname = "aio-nr", << 230 .data = &aio_nr, << 231 .maxlen = sizeof(aio_n << 232 .mode = 0444, << 233 .proc_handler = proc_doulong << 234 }, << 235 { << 236 .procname = "aio-max-nr" << 237 .data = &aio_max_nr, << 238 .maxlen = sizeof(aio_m << 239 .mode = 0644, << 240 .proc_handler = proc_doulong << 241 }, << 242 }; << 243 << 244 static void __init aio_sysctl_init(void) << 245 { << 246 register_sysctl_init("fs", aio_sysctls << 247 } << 248 #else << 249 #define aio_sysctl_init() do { } while (0) << 250 #endif << 251 118 252 static struct kmem_cache *kiocb_cachep; 119 static struct kmem_cache *kiocb_cachep; 253 static struct kmem_cache *kioctx_cachep 120 static struct kmem_cache *kioctx_cachep; 254 121 255 static struct vfsmount *aio_mnt; << 256 << 257 static const struct file_operations aio_ring_f << 258 static const struct address_space_operations a << 259 << 260 static struct file *aio_private_file(struct ki << 261 { << 262 struct file *file; << 263 struct inode *inode = alloc_anon_inode << 264 if (IS_ERR(inode)) << 265 return ERR_CAST(inode); << 266 << 267 inode->i_mapping->a_ops = &aio_ctx_aop << 268 inode->i_mapping->i_private_data = ctx << 269 inode->i_size = PAGE_SIZE * nr_pages; << 270 << 271 file = alloc_file_pseudo(inode, aio_mn << 272 O_RDWR, &aio_r << 273 if (IS_ERR(file)) << 274 iput(inode); << 275 return file; << 276 } << 277 << 278 static int aio_init_fs_context(struct fs_conte << 279 { << 280 if (!init_pseudo(fc, AIO_RING_MAGIC)) << 281 return -ENOMEM; << 282 fc->s_iflags |= SB_I_NOEXEC; << 283 return 0; << 284 } << 285 << 286 /* aio_setup 122 /* aio_setup 287 * Creates the slab caches used by the ai 123 * Creates the slab caches used by the aio routines, panic on 288 * failure as this is done early during t 124 * failure as this is done early during the boot sequence. 289 */ 125 */ 290 static int __init aio_setup(void) 126 static int __init aio_setup(void) 291 { 127 { 292 static struct file_system_type aio_fs !! 128 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 293 .name = "aio", << 294 .init_fs_context = aio_init_fs << 295 .kill_sb = kill_anon_su << 296 }; << 297 aio_mnt = kern_mount(&aio_fs); << 298 if (IS_ERR(aio_mnt)) << 299 panic("Failed to create aio fs << 300 << 301 kiocb_cachep = KMEM_CACHE(aio_kiocb, S << 302 kioctx_cachep = KMEM_CACHE(kioctx,SLAB 129 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 303 aio_sysctl_init(); << 304 return 0; << 305 } << 306 __initcall(aio_setup); << 307 << 308 static void put_aio_ring_file(struct kioctx *c << 309 { << 310 struct file *aio_ring_file = ctx->aio_ << 311 struct address_space *i_mapping; << 312 130 313 if (aio_ring_file) { !! 131 pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page)); 314 truncate_setsize(file_inode(ai << 315 132 316 /* Prevent further access to t !! 133 return 0; 317 i_mapping = aio_ring_file->f_m << 318 spin_lock(&i_mapping->i_privat << 319 i_mapping->i_private_data = NU << 320 ctx->aio_ring_file = NULL; << 321 spin_unlock(&i_mapping->i_priv << 322 << 323 fput(aio_ring_file); << 324 } << 325 } 134 } >> 135 __initcall(aio_setup); 326 136 327 static void aio_free_ring(struct kioctx *ctx) 137 static void aio_free_ring(struct kioctx *ctx) 328 { 138 { 329 int i; !! 139 long i; 330 << 331 /* Disconnect the kiotx from the ring << 332 * accesses to the kioctx from page mi << 333 */ << 334 put_aio_ring_file(ctx); << 335 140 336 for (i = 0; i < ctx->nr_pages; i++) { !! 141 for (i = 0; i < ctx->nr_pages; i++) 337 struct folio *folio = ctx->rin !! 142 put_page(ctx->ring_pages[i]); 338 143 339 if (!folio) !! 144 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) 340 continue; !! 145 kfree(ctx->ring_pages); 341 << 342 pr_debug("pid(%d) [%d] folio-> << 343 folio_ref_count(folio << 344 ctx->ring_folios[i] = NULL; << 345 folio_put(folio); << 346 } << 347 << 348 if (ctx->ring_folios && ctx->ring_foli << 349 kfree(ctx->ring_folios); << 350 ctx->ring_folios = NULL; << 351 } << 352 } 146 } 353 147 354 static int aio_ring_mremap(struct vm_area_stru !! 148 static int aio_setup_ring(struct kioctx *ctx) 355 { << 356 struct file *file = vma->vm_file; << 357 struct mm_struct *mm = vma->vm_mm; << 358 struct kioctx_table *table; << 359 int i, res = -EINVAL; << 360 << 361 spin_lock(&mm->ioctx_lock); << 362 rcu_read_lock(); << 363 table = rcu_dereference(mm->ioctx_tabl << 364 if (!table) << 365 goto out_unlock; << 366 << 367 for (i = 0; i < table->nr; i++) { << 368 struct kioctx *ctx; << 369 << 370 ctx = rcu_dereference(table->t << 371 if (ctx && ctx->aio_ring_file << 372 if (!atomic_read(&ctx- << 373 ctx->user_id = << 374 res = 0; << 375 } << 376 break; << 377 } << 378 } << 379 << 380 out_unlock: << 381 rcu_read_unlock(); << 382 spin_unlock(&mm->ioctx_lock); << 383 return res; << 384 } << 385 << 386 static const struct vm_operations_struct aio_r << 387 .mremap = aio_ring_mremap, << 388 #if IS_ENABLED(CONFIG_MMU) << 389 .fault = filemap_fault, << 390 .map_pages = filemap_map_pages, << 391 .page_mkwrite = filemap_page_mkwrite << 392 #endif << 393 }; << 394 << 395 static int aio_ring_mmap(struct file *file, st << 396 { << 397 vm_flags_set(vma, VM_DONTEXPAND); << 398 vma->vm_ops = &aio_ring_vm_ops; << 399 return 0; << 400 } << 401 << 402 static const struct file_operations aio_ring_f << 403 .mmap = aio_ring_mmap, << 404 }; << 405 << 406 #if IS_ENABLED(CONFIG_MIGRATION) << 407 static int aio_migrate_folio(struct address_sp << 408 struct folio *src, enu << 409 { << 410 struct kioctx *ctx; << 411 unsigned long flags; << 412 pgoff_t idx; << 413 int rc = 0; << 414 << 415 /* mapping->i_private_lock here protec << 416 spin_lock(&mapping->i_private_lock); << 417 ctx = mapping->i_private_data; << 418 if (!ctx) { << 419 rc = -EINVAL; << 420 goto out; << 421 } << 422 << 423 /* The ring_lock mutex. The prevents << 424 * to the ring's head, and prevents pa << 425 * a partially initialized kiotx. << 426 */ << 427 if (!mutex_trylock(&ctx->ring_lock)) { << 428 rc = -EAGAIN; << 429 goto out; << 430 } << 431 << 432 idx = src->index; << 433 if (idx < (pgoff_t)ctx->nr_pages) { << 434 /* Make sure the old folio has << 435 if (ctx->ring_folios[idx] != s << 436 rc = -EAGAIN; << 437 } else << 438 rc = -EINVAL; << 439 << 440 if (rc != 0) << 441 goto out_unlock; << 442 << 443 /* Writeback must be complete */ << 444 BUG_ON(folio_test_writeback(src)); << 445 folio_get(dst); << 446 << 447 rc = folio_migrate_mapping(mapping, ds << 448 if (rc != MIGRATEPAGE_SUCCESS) { << 449 folio_put(dst); << 450 goto out_unlock; << 451 } << 452 << 453 /* Take completion_lock to prevent oth << 454 * while the old folio is copied to th << 455 * events from being lost. << 456 */ << 457 spin_lock_irqsave(&ctx->completion_loc << 458 folio_copy(dst, src); << 459 folio_migrate_flags(dst, src); << 460 BUG_ON(ctx->ring_folios[idx] != src); << 461 ctx->ring_folios[idx] = dst; << 462 spin_unlock_irqrestore(&ctx->completio << 463 << 464 /* The old folio is no longer accessib << 465 folio_put(src); << 466 << 467 out_unlock: << 468 mutex_unlock(&ctx->ring_lock); << 469 out: << 470 spin_unlock(&mapping->i_private_lock); << 471 return rc; << 472 } << 473 #else << 474 #define aio_migrate_folio NULL << 475 #endif << 476 << 477 static const struct address_space_operations a << 478 .dirty_folio = noop_dirty_folio, << 479 .migrate_folio = aio_migrate_folio, << 480 }; << 481 << 482 static int aio_setup_ring(struct kioctx *ctx, << 483 { 149 { 484 struct aio_ring *ring; 150 struct aio_ring *ring; >> 151 unsigned nr_events = ctx->max_reqs; 485 struct mm_struct *mm = current->mm; 152 struct mm_struct *mm = current->mm; 486 unsigned long size, unused; !! 153 unsigned long size, populate; 487 int nr_pages; 154 int nr_pages; 488 int i; << 489 struct file *file; << 490 155 491 /* Compensate for the ring buffer's he 156 /* Compensate for the ring buffer's head/tail overlap entry */ 492 nr_events += 2; /* 1 is required, 2 fo 157 nr_events += 2; /* 1 is required, 2 for good luck */ 493 158 494 size = sizeof(struct aio_ring); 159 size = sizeof(struct aio_ring); 495 size += sizeof(struct io_event) * nr_e 160 size += sizeof(struct io_event) * nr_events; >> 161 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; 496 162 497 nr_pages = PFN_UP(size); << 498 if (nr_pages < 0) 163 if (nr_pages < 0) 499 return -EINVAL; 164 return -EINVAL; 500 165 501 file = aio_private_file(ctx, nr_pages) !! 166 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); 502 if (IS_ERR(file)) { << 503 ctx->aio_ring_file = NULL; << 504 return -ENOMEM; << 505 } << 506 167 507 ctx->aio_ring_file = file; !! 168 ctx->nr_events = 0; 508 nr_events = (PAGE_SIZE * nr_pages - si !! 169 ctx->ring_pages = ctx->internal_pages; 509 / sizeof(struct io_eve << 510 << 511 ctx->ring_folios = ctx->internal_folio << 512 if (nr_pages > AIO_RING_PAGES) { 170 if (nr_pages > AIO_RING_PAGES) { 513 ctx->ring_folios = kcalloc(nr_ !! 171 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), 514 GFP !! 172 GFP_KERNEL); 515 if (!ctx->ring_folios) { !! 173 if (!ctx->ring_pages) 516 put_aio_ring_file(ctx) << 517 return -ENOMEM; 174 return -ENOMEM; 518 } << 519 } << 520 << 521 for (i = 0; i < nr_pages; i++) { << 522 struct folio *folio; << 523 << 524 folio = __filemap_get_folio(fi << 525 FG << 526 GF << 527 if (IS_ERR(folio)) << 528 break; << 529 << 530 pr_debug("pid(%d) [%d] folio-> << 531 folio_ref_count(folio << 532 folio_end_read(folio, true); << 533 << 534 ctx->ring_folios[i] = folio; << 535 } << 536 ctx->nr_pages = i; << 537 << 538 if (unlikely(i != nr_pages)) { << 539 aio_free_ring(ctx); << 540 return -ENOMEM; << 541 } 175 } 542 176 543 ctx->mmap_size = nr_pages * PAGE_SIZE; 177 ctx->mmap_size = nr_pages * PAGE_SIZE; 544 pr_debug("attempting mmap of %lu bytes 178 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); 545 !! 179 down_write(&mm->mmap_sem); 546 if (mmap_write_lock_killable(mm)) { !! 180 ctx->mmap_base = do_mmap_pgoff(NULL, 0, ctx->mmap_size, 547 ctx->mmap_size = 0; !! 181 PROT_READ|PROT_WRITE, 548 aio_free_ring(ctx); !! 182 MAP_ANONYMOUS|MAP_PRIVATE, 0, &populate); 549 return -EINTR; << 550 } << 551 << 552 ctx->mmap_base = do_mmap(ctx->aio_ring << 553 PROT_READ | P << 554 MAP_SHARED, 0 << 555 mmap_write_unlock(mm); << 556 if (IS_ERR((void *)ctx->mmap_base)) { 183 if (IS_ERR((void *)ctx->mmap_base)) { >> 184 up_write(&mm->mmap_sem); 557 ctx->mmap_size = 0; 185 ctx->mmap_size = 0; 558 aio_free_ring(ctx); 186 aio_free_ring(ctx); 559 return -ENOMEM; !! 187 return -EAGAIN; 560 } 188 } 561 189 562 pr_debug("mmap address: 0x%08lx\n", ct 190 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); >> 191 ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages, >> 192 1, 0, ctx->ring_pages, NULL); >> 193 up_write(&mm->mmap_sem); >> 194 >> 195 if (unlikely(ctx->nr_pages != nr_pages)) { >> 196 aio_free_ring(ctx); >> 197 return -EAGAIN; >> 198 } >> 199 if (populate) >> 200 mm_populate(ctx->mmap_base, populate); 563 201 564 ctx->user_id = ctx->mmap_base; 202 ctx->user_id = ctx->mmap_base; 565 ctx->nr_events = nr_events; /* trusted 203 ctx->nr_events = nr_events; /* trusted copy */ 566 204 567 ring = folio_address(ctx->ring_folios[ !! 205 ring = kmap_atomic(ctx->ring_pages[0]); 568 ring->nr = nr_events; /* user copy * 206 ring->nr = nr_events; /* user copy */ 569 ring->id = ~0U; !! 207 ring->id = ctx->user_id; 570 ring->head = ring->tail = 0; 208 ring->head = ring->tail = 0; 571 ring->magic = AIO_RING_MAGIC; 209 ring->magic = AIO_RING_MAGIC; 572 ring->compat_features = AIO_RING_COMPA 210 ring->compat_features = AIO_RING_COMPAT_FEATURES; 573 ring->incompat_features = AIO_RING_INC 211 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 574 ring->header_length = sizeof(struct ai 212 ring->header_length = sizeof(struct aio_ring); 575 flush_dcache_folio(ctx->ring_folios[0] !! 213 kunmap_atomic(ring); >> 214 flush_dcache_page(ctx->ring_pages[0]); 576 215 577 return 0; 216 return 0; 578 } 217 } 579 218 580 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / s 219 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 581 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - 220 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 582 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PE 221 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 583 222 584 void kiocb_set_cancel_fn(struct kiocb *iocb, k !! 223 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) 585 { 224 { 586 struct aio_kiocb *req; !! 225 struct kioctx *ctx = req->ki_ctx; 587 struct kioctx *ctx; << 588 unsigned long flags; 226 unsigned long flags; 589 227 590 /* !! 228 spin_lock_irqsave(&ctx->ctx_lock, flags); 591 * kiocb didn't come from aio or is ne << 592 * ignore it. << 593 */ << 594 if (!(iocb->ki_flags & IOCB_AIO_RW)) << 595 return; << 596 << 597 req = container_of(iocb, struct aio_ki << 598 << 599 if (WARN_ON_ONCE(!list_empty(&req->ki_ << 600 return; << 601 229 602 ctx = req->ki_ctx; !! 230 if (!req->ki_list.next) >> 231 list_add(&req->ki_list, &ctx->active_reqs); 603 232 604 spin_lock_irqsave(&ctx->ctx_lock, flag << 605 list_add_tail(&req->ki_list, &ctx->act << 606 req->ki_cancel = cancel; 233 req->ki_cancel = cancel; >> 234 607 spin_unlock_irqrestore(&ctx->ctx_lock, 235 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 608 } 236 } 609 EXPORT_SYMBOL(kiocb_set_cancel_fn); 237 EXPORT_SYMBOL(kiocb_set_cancel_fn); 610 238 611 /* !! 239 static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb, 612 * free_ioctx() should be RCU delayed to synch !! 240 struct io_event *res) 613 * protected lookup_ioctx() and also needs pro << 614 * aio_free_ring(). Use rcu_work. << 615 */ << 616 static void free_ioctx(struct work_struct *wor << 617 { 241 { 618 struct kioctx *ctx = container_of(to_r !! 242 kiocb_cancel_fn *old, *cancel; 619 free !! 243 int ret = -EINVAL; 620 pr_debug("freeing %p\n", ctx); << 621 244 622 aio_free_ring(ctx); !! 245 /* 623 free_percpu(ctx->cpu); !! 246 * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it 624 percpu_ref_exit(&ctx->reqs); !! 247 * actually has a cancel function, hence the cmpxchg() 625 percpu_ref_exit(&ctx->users); !! 248 */ 626 kmem_cache_free(kioctx_cachep, ctx); << 627 } << 628 249 629 static void free_ioctx_reqs(struct percpu_ref !! 250 cancel = ACCESS_ONCE(kiocb->ki_cancel); 630 { !! 251 do { 631 struct kioctx *ctx = container_of(ref, !! 252 if (!cancel || cancel == KIOCB_CANCELLED) >> 253 return ret; >> 254 >> 255 old = cancel; >> 256 cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); >> 257 } while (cancel != old); >> 258 >> 259 atomic_inc(&kiocb->ki_users); >> 260 spin_unlock_irq(&ctx->ctx_lock); >> 261 >> 262 memset(res, 0, sizeof(*res)); >> 263 res->obj = (u64)(unsigned long)kiocb->ki_obj.user; >> 264 res->data = kiocb->ki_user_data; >> 265 ret = cancel(kiocb, res); >> 266 >> 267 spin_lock_irq(&ctx->ctx_lock); 632 268 633 /* At this point we know that there ar !! 269 return ret; 634 if (ctx->rq_wait && atomic_dec_and_tes !! 270 } 635 complete(&ctx->rq_wait->comp); << 636 271 637 /* Synchronize against RCU protected t !! 272 static void free_ioctx_rcu(struct rcu_head *head) 638 INIT_RCU_WORK(&ctx->free_rwork, free_i !! 273 { 639 queue_rcu_work(system_wq, &ctx->free_r !! 274 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); >> 275 kmem_cache_free(kioctx_cachep, ctx); 640 } 276 } 641 277 642 /* 278 /* 643 * When this function runs, the kioctx has bee 279 * When this function runs, the kioctx has been removed from the "hash table" 644 * and ctx->users has dropped to 0, so we know 280 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - 645 * now it's safe to cancel any that need to be 281 * now it's safe to cancel any that need to be. 646 */ 282 */ 647 static void free_ioctx_users(struct percpu_ref !! 283 static void free_ioctx(struct kioctx *ctx) 648 { 284 { 649 struct kioctx *ctx = container_of(ref, !! 285 struct aio_ring *ring; 650 struct aio_kiocb *req; !! 286 struct io_event res; >> 287 struct kiocb *req; >> 288 unsigned head, avail; 651 289 652 spin_lock_irq(&ctx->ctx_lock); 290 spin_lock_irq(&ctx->ctx_lock); 653 291 654 while (!list_empty(&ctx->active_reqs)) 292 while (!list_empty(&ctx->active_reqs)) { 655 req = list_first_entry(&ctx->a 293 req = list_first_entry(&ctx->active_reqs, 656 struct !! 294 struct kiocb, ki_list); 657 req->ki_cancel(&req->rw); !! 295 658 list_del_init(&req->ki_list); 296 list_del_init(&req->ki_list); >> 297 kiocb_cancel(ctx, req, &res); 659 } 298 } 660 299 661 spin_unlock_irq(&ctx->ctx_lock); 300 spin_unlock_irq(&ctx->ctx_lock); 662 301 663 percpu_ref_kill(&ctx->reqs); !! 302 ring = kmap_atomic(ctx->ring_pages[0]); 664 percpu_ref_put(&ctx->reqs); !! 303 head = ring->head; 665 } !! 304 kunmap_atomic(ring); 666 << 667 static int ioctx_add_table(struct kioctx *ctx, << 668 { << 669 unsigned i, new_nr; << 670 struct kioctx_table *table, *old; << 671 struct aio_ring *ring; << 672 305 673 spin_lock(&mm->ioctx_lock); !! 306 while (atomic_read(&ctx->reqs_active) > 0) { 674 table = rcu_dereference_raw(mm->ioctx_ !! 307 wait_event(ctx->wait, >> 308 head != ctx->tail || >> 309 atomic_read(&ctx->reqs_active) <= 0); 675 310 676 while (1) { !! 311 avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; 677 if (table) << 678 for (i = 0; i < table- << 679 if (!rcu_acces << 680 ctx->i << 681 rcu_as << 682 spin_u << 683 << 684 /* Whi << 685 * we << 686 * cha << 687 */ << 688 ring = << 689 ring-> << 690 return << 691 } << 692 312 693 new_nr = (table ? table->nr : !! 313 head += avail; 694 spin_unlock(&mm->ioctx_lock); !! 314 head %= ctx->nr_events; >> 315 } 695 316 696 table = kzalloc(struct_size(ta !! 317 WARN_ON(atomic_read(&ctx->reqs_active) < 0); 697 if (!table) << 698 return -ENOMEM; << 699 318 700 table->nr = new_nr; !! 319 aio_free_ring(ctx); 701 320 702 spin_lock(&mm->ioctx_lock); !! 321 pr_debug("freeing %p\n", ctx); 703 old = rcu_dereference_raw(mm-> << 704 322 705 if (!old) { !! 323 /* 706 rcu_assign_pointer(mm- !! 324 * Here the call_rcu() is between the wait_event() for reqs_active to 707 } else if (table->nr > old->nr !! 325 * hit 0, and freeing the ioctx. 708 memcpy(table->table, o !! 326 * 709 old->nr * sizeo !! 327 * aio_complete() decrements reqs_active, but it has to touch the ioctx 710 !! 328 * after to issue a wakeup so we use rcu. 711 rcu_assign_pointer(mm- !! 329 */ 712 kfree_rcu(old, rcu); !! 330 call_rcu(&ctx->rcu_head, free_ioctx_rcu); 713 } else { << 714 kfree(table); << 715 table = old; << 716 } << 717 } << 718 } 331 } 719 332 720 static void aio_nr_sub(unsigned nr) !! 333 static void put_ioctx(struct kioctx *ctx) 721 { 334 { 722 spin_lock(&aio_nr_lock); !! 335 if (unlikely(atomic_dec_and_test(&ctx->users))) 723 if (WARN_ON(aio_nr - nr > aio_nr)) !! 336 free_ioctx(ctx); 724 aio_nr = 0; << 725 else << 726 aio_nr -= nr; << 727 spin_unlock(&aio_nr_lock); << 728 } 337 } 729 338 730 /* ioctx_alloc 339 /* ioctx_alloc 731 * Allocates and initializes an ioctx. R 340 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 732 */ 341 */ 733 static struct kioctx *ioctx_alloc(unsigned nr_ 342 static struct kioctx *ioctx_alloc(unsigned nr_events) 734 { 343 { 735 struct mm_struct *mm = current->mm; 344 struct mm_struct *mm = current->mm; 736 struct kioctx *ctx; 345 struct kioctx *ctx; 737 int err = -ENOMEM; 346 int err = -ENOMEM; 738 347 739 /* << 740 * Store the original nr_events -- wha << 741 * for counting against the global lim << 742 */ << 743 unsigned int max_reqs = nr_events; << 744 << 745 /* << 746 * We keep track of the number of avai << 747 * overflow (reqs_available), and we a << 748 * << 749 * So since up to half the slots might << 750 * and unavailable, double nr_events s << 751 * expected: additionally, we move req << 752 * counters at a time, so make sure th << 753 */ << 754 nr_events = max(nr_events, num_possibl << 755 nr_events *= 2; << 756 << 757 /* Prevent overflows */ 348 /* Prevent overflows */ 758 if (nr_events > (0x10000000U / sizeof( !! 349 if ((nr_events > (0x10000000U / sizeof(struct io_event))) || >> 350 (nr_events > (0x10000000U / sizeof(struct kiocb)))) { 759 pr_debug("ENOMEM: nr_events to 351 pr_debug("ENOMEM: nr_events too high\n"); 760 return ERR_PTR(-EINVAL); 352 return ERR_PTR(-EINVAL); 761 } 353 } 762 354 763 if (!nr_events || (unsigned long)max_r !! 355 if (!nr_events || (unsigned long)nr_events > aio_max_nr) 764 return ERR_PTR(-EAGAIN); 356 return ERR_PTR(-EAGAIN); 765 357 766 ctx = kmem_cache_zalloc(kioctx_cachep, 358 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 767 if (!ctx) 359 if (!ctx) 768 return ERR_PTR(-ENOMEM); 360 return ERR_PTR(-ENOMEM); 769 361 770 ctx->max_reqs = max_reqs; !! 362 ctx->max_reqs = nr_events; 771 363 >> 364 atomic_set(&ctx->users, 2); >> 365 atomic_set(&ctx->dead, 0); 772 spin_lock_init(&ctx->ctx_lock); 366 spin_lock_init(&ctx->ctx_lock); 773 spin_lock_init(&ctx->completion_lock); 367 spin_lock_init(&ctx->completion_lock); 774 mutex_init(&ctx->ring_lock); 368 mutex_init(&ctx->ring_lock); 775 /* Protect against page migration thro << 776 * the ring_lock mutex held until setu << 777 mutex_lock(&ctx->ring_lock); << 778 init_waitqueue_head(&ctx->wait); 369 init_waitqueue_head(&ctx->wait); 779 370 780 INIT_LIST_HEAD(&ctx->active_reqs); 371 INIT_LIST_HEAD(&ctx->active_reqs); 781 372 782 if (percpu_ref_init(&ctx->users, free_ !! 373 if (aio_setup_ring(ctx) < 0) 783 goto err; !! 374 goto out_freectx; 784 << 785 if (percpu_ref_init(&ctx->reqs, free_i << 786 goto err; << 787 << 788 ctx->cpu = alloc_percpu(struct kioctx_ << 789 if (!ctx->cpu) << 790 goto err; << 791 << 792 err = aio_setup_ring(ctx, nr_events); << 793 if (err < 0) << 794 goto err; << 795 << 796 atomic_set(&ctx->reqs_available, ctx-> << 797 ctx->req_batch = (ctx->nr_events - 1) << 798 if (ctx->req_batch < 1) << 799 ctx->req_batch = 1; << 800 375 801 /* limit the number of system wide aio 376 /* limit the number of system wide aios */ 802 spin_lock(&aio_nr_lock); 377 spin_lock(&aio_nr_lock); 803 if (aio_nr + ctx->max_reqs > aio_max_n !! 378 if (aio_nr + nr_events > aio_max_nr || 804 aio_nr + ctx->max_reqs < aio_nr) { !! 379 aio_nr + nr_events < aio_nr) { 805 spin_unlock(&aio_nr_lock); 380 spin_unlock(&aio_nr_lock); 806 err = -EAGAIN; !! 381 goto out_cleanup; 807 goto err_ctx; << 808 } 382 } 809 aio_nr += ctx->max_reqs; 383 aio_nr += ctx->max_reqs; 810 spin_unlock(&aio_nr_lock); 384 spin_unlock(&aio_nr_lock); 811 385 812 percpu_ref_get(&ctx->users); /* io_ !! 386 /* now link into global list. */ 813 percpu_ref_get(&ctx->reqs); /* fre !! 387 spin_lock(&mm->ioctx_lock); 814 !! 388 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); 815 err = ioctx_add_table(ctx, mm); !! 389 spin_unlock(&mm->ioctx_lock); 816 if (err) << 817 goto err_cleanup; << 818 << 819 /* Release the ring_lock mutex now tha << 820 mutex_unlock(&ctx->ring_lock); << 821 390 822 pr_debug("allocated ioctx %p[%ld]: mm= 391 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 823 ctx, ctx->user_id, mm, ctx->n 392 ctx, ctx->user_id, mm, ctx->nr_events); 824 return ctx; 393 return ctx; 825 394 826 err_cleanup: !! 395 out_cleanup: 827 aio_nr_sub(ctx->max_reqs); !! 396 err = -EAGAIN; 828 err_ctx: << 829 atomic_set(&ctx->dead, 1); << 830 if (ctx->mmap_size) << 831 vm_munmap(ctx->mmap_base, ctx- << 832 aio_free_ring(ctx); 397 aio_free_ring(ctx); 833 err: !! 398 out_freectx: 834 mutex_unlock(&ctx->ring_lock); << 835 free_percpu(ctx->cpu); << 836 percpu_ref_exit(&ctx->reqs); << 837 percpu_ref_exit(&ctx->users); << 838 kmem_cache_free(kioctx_cachep, ctx); 399 kmem_cache_free(kioctx_cachep, ctx); 839 pr_debug("error allocating ioctx %d\n" 400 pr_debug("error allocating ioctx %d\n", err); 840 return ERR_PTR(err); 401 return ERR_PTR(err); 841 } 402 } 842 403 >> 404 static void kill_ioctx_work(struct work_struct *work) >> 405 { >> 406 struct kioctx *ctx = container_of(work, struct kioctx, rcu_work); >> 407 >> 408 wake_up_all(&ctx->wait); >> 409 put_ioctx(ctx); >> 410 } >> 411 >> 412 static void kill_ioctx_rcu(struct rcu_head *head) >> 413 { >> 414 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); >> 415 >> 416 INIT_WORK(&ctx->rcu_work, kill_ioctx_work); >> 417 schedule_work(&ctx->rcu_work); >> 418 } >> 419 843 /* kill_ioctx 420 /* kill_ioctx 844 * Cancels all outstanding aio requests o 421 * Cancels all outstanding aio requests on an aio context. Used 845 * when the processes owning a context ha 422 * when the processes owning a context have all exited to encourage 846 * the rapid destruction of the kioctx. 423 * the rapid destruction of the kioctx. 847 */ 424 */ 848 static int kill_ioctx(struct mm_struct *mm, st !! 425 static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) 849 struct ctx_rq_wait *wait << 850 { 426 { 851 struct kioctx_table *table; !! 427 if (!atomic_xchg(&ctx->dead, 1)) { 852 !! 428 spin_lock(&mm->ioctx_lock); 853 spin_lock(&mm->ioctx_lock); !! 429 hlist_del_rcu(&ctx->list); 854 if (atomic_xchg(&ctx->dead, 1)) { << 855 spin_unlock(&mm->ioctx_lock); 430 spin_unlock(&mm->ioctx_lock); 856 return -EINVAL; << 857 } << 858 431 859 table = rcu_dereference_raw(mm->ioctx_ !! 432 /* 860 WARN_ON(ctx != rcu_access_pointer(tabl !! 433 * It'd be more correct to do this in free_ioctx(), after all 861 RCU_INIT_POINTER(table->table[ctx->id] !! 434 * the outstanding kiocbs have finished - but by then io_destroy 862 spin_unlock(&mm->ioctx_lock); !! 435 * has already returned, so io_setup() could potentially return 863 !! 436 * -EAGAIN with no ioctxs actually in use (as far as userspace 864 /* free_ioctx_reqs() will do the neces !! 437 * could tell). 865 wake_up_all(&ctx->wait); !! 438 */ >> 439 spin_lock(&aio_nr_lock); >> 440 BUG_ON(aio_nr - ctx->max_reqs > aio_nr); >> 441 aio_nr -= ctx->max_reqs; >> 442 spin_unlock(&aio_nr_lock); 866 443 867 /* !! 444 if (ctx->mmap_size) 868 * It'd be more correct to do this in !! 445 vm_munmap(ctx->mmap_base, ctx->mmap_size); 869 * the outstanding kiocbs have finishe << 870 * has already returned, so io_setup() << 871 * -EAGAIN with no ioctxs actually in << 872 * could tell). << 873 */ << 874 aio_nr_sub(ctx->max_reqs); << 875 446 876 if (ctx->mmap_size) !! 447 /* Between hlist_del_rcu() and dropping the initial ref */ 877 vm_munmap(ctx->mmap_base, ctx- !! 448 call_rcu(&ctx->rcu_head, kill_ioctx_rcu); >> 449 } >> 450 } 878 451 879 ctx->rq_wait = wait; !! 452 /* wait_on_sync_kiocb: 880 percpu_ref_kill(&ctx->users); !! 453 * Waits on the given sync kiocb to complete. 881 return 0; !! 454 */ >> 455 ssize_t wait_on_sync_kiocb(struct kiocb *iocb) >> 456 { >> 457 while (atomic_read(&iocb->ki_users)) { >> 458 set_current_state(TASK_UNINTERRUPTIBLE); >> 459 if (!atomic_read(&iocb->ki_users)) >> 460 break; >> 461 io_schedule(); >> 462 } >> 463 __set_current_state(TASK_RUNNING); >> 464 return iocb->ki_user_data; 882 } 465 } >> 466 EXPORT_SYMBOL(wait_on_sync_kiocb); 883 467 884 /* 468 /* 885 * exit_aio: called when the last user of mm g 469 * exit_aio: called when the last user of mm goes away. At this point, there is 886 * no way for any new requests to be submited 470 * no way for any new requests to be submited or any of the io_* syscalls to be 887 * called on the context. 471 * called on the context. 888 * 472 * 889 * There may be outstanding kiocbs, but free_i 473 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on 890 * them. 474 * them. 891 */ 475 */ 892 void exit_aio(struct mm_struct *mm) 476 void exit_aio(struct mm_struct *mm) 893 { 477 { 894 struct kioctx_table *table = rcu_deref !! 478 struct kioctx *ctx; 895 struct ctx_rq_wait wait; !! 479 struct hlist_node *n; 896 int i, skipped; << 897 << 898 if (!table) << 899 return; << 900 << 901 atomic_set(&wait.count, table->nr); << 902 init_completion(&wait.comp); << 903 << 904 skipped = 0; << 905 for (i = 0; i < table->nr; ++i) { << 906 struct kioctx *ctx = << 907 rcu_dereference_protec << 908 << 909 if (!ctx) { << 910 skipped++; << 911 continue; << 912 } << 913 480 >> 481 hlist_for_each_entry_safe(ctx, n, &mm->ioctx_list, list) { >> 482 if (1 != atomic_read(&ctx->users)) >> 483 printk(KERN_DEBUG >> 484 "exit_aio:ioctx still alive: %d %d %d\n", >> 485 atomic_read(&ctx->users), >> 486 atomic_read(&ctx->dead), >> 487 atomic_read(&ctx->reqs_active)); 914 /* 488 /* 915 * We don't need to bother wit !! 489 * We don't need to bother with munmap() here - 916 * is coming and it'll unmap e !! 490 * exit_mmap(mm) is coming and it'll unmap everything. 917 * this is not necessarily our !! 491 * Since aio_free_ring() uses non-zero ->mmap_size 918 * Since kill_ioctx() uses non !! 492 * as indicator that it needs to unmap the area, 919 * that it needs to unmap the !! 493 * just set it to 0; aio_free_ring() is the only >> 494 * place that uses ->mmap_size, so it's safe. 920 */ 495 */ 921 ctx->mmap_size = 0; 496 ctx->mmap_size = 0; 922 kill_ioctx(mm, ctx, &wait); << 923 } << 924 << 925 if (!atomic_sub_and_test(skipped, &wai << 926 /* Wait until all IO for the c << 927 wait_for_completion(&wait.comp << 928 } << 929 << 930 RCU_INIT_POINTER(mm->ioctx_table, NULL << 931 kfree(table); << 932 } << 933 << 934 static void put_reqs_available(struct kioctx * << 935 { << 936 struct kioctx_cpu *kcpu; << 937 unsigned long flags; << 938 497 939 local_irq_save(flags); !! 498 kill_ioctx(mm, ctx); 940 kcpu = this_cpu_ptr(ctx->cpu); << 941 kcpu->reqs_available += nr; << 942 << 943 while (kcpu->reqs_available >= ctx->re << 944 kcpu->reqs_available -= ctx->r << 945 atomic_add(ctx->req_batch, &ct << 946 } 499 } 947 << 948 local_irq_restore(flags); << 949 } 500 } 950 501 951 static bool __get_reqs_available(struct kioctx !! 502 /* aio_get_req 952 { !! 503 * Allocate a slot for an aio request. Increments the ki_users count 953 struct kioctx_cpu *kcpu; !! 504 * of the kioctx so that the kioctx stays around until all requests are 954 bool ret = false; !! 505 * complete. Returns NULL if no requests are free. 955 unsigned long flags; !! 506 * 956 !! 507 * Returns with kiocb->ki_users set to 2. The io submit code path holds 957 local_irq_save(flags); !! 508 * an extra reference while submitting the i/o. 958 kcpu = this_cpu_ptr(ctx->cpu); !! 509 * This prevents races between the aio code path referencing the 959 if (!kcpu->reqs_available) { !! 510 * req (after submitting it) and aio_complete() freeing the req. 960 int avail = atomic_read(&ctx-> << 961 << 962 do { << 963 if (avail < ctx->req_b << 964 goto out; << 965 } while (!atomic_try_cmpxchg(& << 966 & << 967 << 968 kcpu->reqs_available += ctx->r << 969 } << 970 << 971 ret = true; << 972 kcpu->reqs_available--; << 973 out: << 974 local_irq_restore(flags); << 975 return ret; << 976 } << 977 << 978 /* refill_reqs_available << 979 * Updates the reqs_available reference c << 980 * number of free slots in the completion << 981 * from aio_complete() (to optimistically << 982 * from aio_get_req() (the we're out of e << 983 * called holding ctx->completion_lock. << 984 */ 511 */ 985 static void refill_reqs_available(struct kioct !! 512 static inline struct kiocb *aio_get_req(struct kioctx *ctx) 986 unsigned tai << 987 { 513 { 988 unsigned events_in_ring, completed; !! 514 struct kiocb *req; 989 << 990 /* Clamp head since userland can write << 991 head %= ctx->nr_events; << 992 if (head <= tail) << 993 events_in_ring = tail - head; << 994 else << 995 events_in_ring = ctx->nr_event << 996 515 997 completed = ctx->completed_events; !! 516 if (atomic_read(&ctx->reqs_active) >= ctx->nr_events) 998 if (events_in_ring < completed) !! 517 return NULL; 999 completed -= events_in_ring; << 1000 else << 1001 completed = 0; << 1002 << 1003 if (!completed) << 1004 return; << 1005 518 1006 ctx->completed_events -= completed; !! 519 if (atomic_inc_return(&ctx->reqs_active) > ctx->nr_events - 1) 1007 put_reqs_available(ctx, completed); !! 520 goto out_put; 1008 } << 1009 521 1010 /* user_refill_reqs_available !! 522 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); 1011 * Called to refill reqs_available when !! 523 if (unlikely(!req)) 1012 * out of space in the completion ring. !! 524 goto out_put; 1013 */ << 1014 static void user_refill_reqs_available(struct << 1015 { << 1016 spin_lock_irq(&ctx->completion_lock); << 1017 if (ctx->completed_events) { << 1018 struct aio_ring *ring; << 1019 unsigned head; << 1020 << 1021 /* Access of ring->head may r << 1022 * here, but that's okay sinc << 1023 * or the new version, and ei << 1024 * part is that head cannot p << 1025 * aio_complete() from updati << 1026 * ctx->completion_lock. Eve << 1027 * against ctx->completed_eve << 1028 * safe/right thing. << 1029 */ << 1030 ring = folio_address(ctx->rin << 1031 head = ring->head; << 1032 525 1033 refill_reqs_available(ctx, he !! 526 atomic_set(&req->ki_users, 2); 1034 } !! 527 req->ki_ctx = ctx; 1035 528 1036 spin_unlock_irq(&ctx->completion_lock !! 529 return req; >> 530 out_put: >> 531 atomic_dec(&ctx->reqs_active); >> 532 return NULL; 1037 } 533 } 1038 534 1039 static bool get_reqs_available(struct kioctx !! 535 static void kiocb_free(struct kiocb *req) 1040 { 536 { 1041 if (__get_reqs_available(ctx)) !! 537 if (req->ki_filp) 1042 return true; !! 538 fput(req->ki_filp); 1043 user_refill_reqs_available(ctx); !! 539 if (req->ki_eventfd != NULL) 1044 return __get_reqs_available(ctx); !! 540 eventfd_ctx_put(req->ki_eventfd); >> 541 if (req->ki_dtor) >> 542 req->ki_dtor(req); >> 543 if (req->ki_iovec != &req->ki_inline_vec) >> 544 kfree(req->ki_iovec); >> 545 kmem_cache_free(kiocb_cachep, req); 1045 } 546 } 1046 547 1047 /* aio_get_req !! 548 void aio_put_req(struct kiocb *req) 1048 * Allocate a slot for an aio request. << 1049 * Returns NULL if no requests are free. << 1050 * << 1051 * The refcount is initialized to 2 - one for << 1052 * one for the synchronous code that does thi << 1053 */ << 1054 static inline struct aio_kiocb *aio_get_req(s << 1055 { 549 { 1056 struct aio_kiocb *req; !! 550 if (atomic_dec_and_test(&req->ki_users)) 1057 !! 551 kiocb_free(req); 1058 req = kmem_cache_alloc(kiocb_cachep, << 1059 if (unlikely(!req)) << 1060 return NULL; << 1061 << 1062 if (unlikely(!get_reqs_available(ctx) << 1063 kmem_cache_free(kiocb_cachep, << 1064 return NULL; << 1065 } << 1066 << 1067 percpu_ref_get(&ctx->reqs); << 1068 req->ki_ctx = ctx; << 1069 INIT_LIST_HEAD(&req->ki_list); << 1070 refcount_set(&req->ki_refcnt, 2); << 1071 req->ki_eventfd = NULL; << 1072 return req; << 1073 } 552 } >> 553 EXPORT_SYMBOL(aio_put_req); 1074 554 1075 static struct kioctx *lookup_ioctx(unsigned l 555 static struct kioctx *lookup_ioctx(unsigned long ctx_id) 1076 { 556 { 1077 struct aio_ring __user *ring = (void << 1078 struct mm_struct *mm = current->mm; 557 struct mm_struct *mm = current->mm; 1079 struct kioctx *ctx, *ret = NULL; 558 struct kioctx *ctx, *ret = NULL; 1080 struct kioctx_table *table; << 1081 unsigned id; << 1082 << 1083 if (get_user(id, &ring->id)) << 1084 return NULL; << 1085 559 1086 rcu_read_lock(); 560 rcu_read_lock(); 1087 table = rcu_dereference(mm->ioctx_tab << 1088 << 1089 if (!table || id >= table->nr) << 1090 goto out; << 1091 561 1092 id = array_index_nospec(id, table->nr !! 562 hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) { 1093 ctx = rcu_dereference(table->table[id !! 563 if (ctx->user_id == ctx_id) { 1094 if (ctx && ctx->user_id == ctx_id) { !! 564 atomic_inc(&ctx->users); 1095 if (percpu_ref_tryget_live(&c << 1096 ret = ctx; 565 ret = ctx; >> 566 break; >> 567 } 1097 } 568 } 1098 out: !! 569 1099 rcu_read_unlock(); 570 rcu_read_unlock(); 1100 return ret; 571 return ret; 1101 } 572 } 1102 573 1103 static inline void iocb_destroy(struct aio_ki << 1104 { << 1105 if (iocb->ki_eventfd) << 1106 eventfd_ctx_put(iocb->ki_even << 1107 if (iocb->ki_filp) << 1108 fput(iocb->ki_filp); << 1109 percpu_ref_put(&iocb->ki_ctx->reqs); << 1110 kmem_cache_free(kiocb_cachep, iocb); << 1111 } << 1112 << 1113 struct aio_waiter { << 1114 struct wait_queue_entry w; << 1115 size_t min_nr; << 1116 }; << 1117 << 1118 /* aio_complete 574 /* aio_complete 1119 * Called when the io request on the giv 575 * Called when the io request on the given iocb is complete. 1120 */ 576 */ 1121 static void aio_complete(struct aio_kiocb *io !! 577 void aio_complete(struct kiocb *iocb, long res, long res2) 1122 { 578 { 1123 struct kioctx *ctx = iocb->ki_ctx; 579 struct kioctx *ctx = iocb->ki_ctx; 1124 struct aio_ring *ring; 580 struct aio_ring *ring; 1125 struct io_event *ev_page, *event; 581 struct io_event *ev_page, *event; 1126 unsigned tail, pos, head, avail; << 1127 unsigned long flags; 582 unsigned long flags; >> 583 unsigned tail, pos; >> 584 >> 585 /* >> 586 * Special case handling for sync iocbs: >> 587 * - events go directly into the iocb for fast handling >> 588 * - the sync task with the iocb in its stack holds the single iocb >> 589 * ref, no other paths have a way to get another ref >> 590 * - the sync task helpfully left a reference to itself in the iocb >> 591 */ >> 592 if (is_sync_kiocb(iocb)) { >> 593 BUG_ON(atomic_read(&iocb->ki_users) != 1); >> 594 iocb->ki_user_data = res; >> 595 atomic_set(&iocb->ki_users, 0); >> 596 wake_up_process(iocb->ki_obj.tsk); >> 597 return; >> 598 } >> 599 >> 600 /* >> 601 * Take rcu_read_lock() in case the kioctx is being destroyed, as we >> 602 * need to issue a wakeup after decrementing reqs_active. >> 603 */ >> 604 rcu_read_lock(); >> 605 >> 606 if (iocb->ki_list.next) { >> 607 unsigned long flags; >> 608 >> 609 spin_lock_irqsave(&ctx->ctx_lock, flags); >> 610 list_del(&iocb->ki_list); >> 611 spin_unlock_irqrestore(&ctx->ctx_lock, flags); >> 612 } >> 613 >> 614 /* >> 615 * cancelled requests don't get events, userland was given one >> 616 * when the event got cancelled. >> 617 */ >> 618 if (unlikely(xchg(&iocb->ki_cancel, >> 619 KIOCB_CANCELLED) == KIOCB_CANCELLED)) { >> 620 atomic_dec(&ctx->reqs_active); >> 621 /* Still need the wake_up in case free_ioctx is waiting */ >> 622 goto put_rq; >> 623 } 1128 624 1129 /* 625 /* 1130 * Add a completion event to the ring 626 * Add a completion event to the ring buffer. Must be done holding 1131 * ctx->completion_lock to prevent ot !! 627 * ctx->ctx_lock to prevent other code from messing with the tail 1132 * pointer since we might be called f 628 * pointer since we might be called from irq context. 1133 */ 629 */ 1134 spin_lock_irqsave(&ctx->completion_lo 630 spin_lock_irqsave(&ctx->completion_lock, flags); 1135 631 1136 tail = ctx->tail; 632 tail = ctx->tail; 1137 pos = tail + AIO_EVENTS_OFFSET; 633 pos = tail + AIO_EVENTS_OFFSET; 1138 634 1139 if (++tail >= ctx->nr_events) 635 if (++tail >= ctx->nr_events) 1140 tail = 0; 636 tail = 0; 1141 637 1142 ev_page = folio_address(ctx->ring_fol !! 638 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1143 event = ev_page + pos % AIO_EVENTS_PE 639 event = ev_page + pos % AIO_EVENTS_PER_PAGE; 1144 640 1145 *event = iocb->ki_res; !! 641 event->obj = (u64)(unsigned long)iocb->ki_obj.user; 1146 !! 642 event->data = iocb->ki_user_data; 1147 flush_dcache_folio(ctx->ring_folios[p !! 643 event->res = res; 1148 !! 644 event->res2 = res2; 1149 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\ !! 645 1150 (void __user *)(unsigned lon !! 646 kunmap_atomic(ev_page); 1151 iocb->ki_res.data, iocb->ki_ !! 647 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); >> 648 >> 649 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", >> 650 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, >> 651 res, res2); 1152 652 1153 /* after flagging the request as done 653 /* after flagging the request as done, we 1154 * must never even look at it again 654 * must never even look at it again 1155 */ 655 */ 1156 smp_wmb(); /* make event visible 656 smp_wmb(); /* make event visible before updating tail */ 1157 657 1158 ctx->tail = tail; 658 ctx->tail = tail; 1159 659 1160 ring = folio_address(ctx->ring_folios !! 660 ring = kmap_atomic(ctx->ring_pages[0]); 1161 head = ring->head; << 1162 ring->tail = tail; 661 ring->tail = tail; 1163 flush_dcache_folio(ctx->ring_folios[0 !! 662 kunmap_atomic(ring); >> 663 flush_dcache_page(ctx->ring_pages[0]); 1164 664 1165 ctx->completed_events++; << 1166 if (ctx->completed_events > 1) << 1167 refill_reqs_available(ctx, he << 1168 << 1169 avail = tail > head << 1170 ? tail - head << 1171 : tail + ctx->nr_events - hea << 1172 spin_unlock_irqrestore(&ctx->completi 665 spin_unlock_irqrestore(&ctx->completion_lock, flags); 1173 666 1174 pr_debug("added to ring %p at [%u]\n" 667 pr_debug("added to ring %p at [%u]\n", iocb, tail); 1175 668 1176 /* 669 /* 1177 * Check if the user asked us to deli 670 * Check if the user asked us to deliver the result through an 1178 * eventfd. The eventfd_signal() func 671 * eventfd. The eventfd_signal() function is safe to be called 1179 * from IRQ context. 672 * from IRQ context. 1180 */ 673 */ 1181 if (iocb->ki_eventfd) !! 674 if (iocb->ki_eventfd != NULL) 1182 eventfd_signal(iocb->ki_event !! 675 eventfd_signal(iocb->ki_eventfd, 1); >> 676 >> 677 put_rq: >> 678 /* everything turned out well, dispose of the aiocb. */ >> 679 aio_put_req(iocb); >> 680 atomic_dec(&ctx->reqs_active); 1183 681 1184 /* 682 /* 1185 * We have to order our ring_info tai 683 * We have to order our ring_info tail store above and test 1186 * of the wait list below outside the 684 * of the wait list below outside the wait lock. This is 1187 * like in wake_up_bit() where cleari 685 * like in wake_up_bit() where clearing a bit has to be 1188 * ordered with the unlocked test. 686 * ordered with the unlocked test. 1189 */ 687 */ 1190 smp_mb(); 688 smp_mb(); 1191 689 1192 if (waitqueue_active(&ctx->wait)) { !! 690 if (waitqueue_active(&ctx->wait)) 1193 struct aio_waiter *curr, *nex !! 691 wake_up(&ctx->wait); 1194 unsigned long flags; << 1195 << 1196 spin_lock_irqsave(&ctx->wait. << 1197 list_for_each_entry_safe(curr << 1198 if (avail >= curr->mi << 1199 wake_up_proce << 1200 list_del_init << 1201 } << 1202 spin_unlock_irqrestore(&ctx-> << 1203 } << 1204 } << 1205 692 1206 static inline void iocb_put(struct aio_kiocb !! 693 rcu_read_unlock(); 1207 { << 1208 if (refcount_dec_and_test(&iocb->ki_r << 1209 aio_complete(iocb); << 1210 iocb_destroy(iocb); << 1211 } << 1212 } 694 } >> 695 EXPORT_SYMBOL(aio_complete); 1213 696 1214 /* aio_read_events_ring !! 697 /* aio_read_events 1215 * Pull an event off of the ioctx's even 698 * Pull an event off of the ioctx's event ring. Returns the number of 1216 * events fetched 699 * events fetched 1217 */ 700 */ 1218 static long aio_read_events_ring(struct kioct 701 static long aio_read_events_ring(struct kioctx *ctx, 1219 struct io_ev 702 struct io_event __user *event, long nr) 1220 { 703 { 1221 struct aio_ring *ring; 704 struct aio_ring *ring; 1222 unsigned head, tail, pos; !! 705 unsigned head, pos; 1223 long ret = 0; 706 long ret = 0; 1224 int copy_ret; 707 int copy_ret; 1225 708 1226 /* << 1227 * The mutex can block and wake us up << 1228 * wait_event_interruptible_hrtimeout << 1229 * and repeat. This should be rare en << 1230 * peformance issues. See the comment << 1231 */ << 1232 sched_annotate_sleep(); << 1233 mutex_lock(&ctx->ring_lock); 709 mutex_lock(&ctx->ring_lock); 1234 710 1235 /* Access to ->ring_folios here is pr !! 711 ring = kmap_atomic(ctx->ring_pages[0]); 1236 ring = folio_address(ctx->ring_folios << 1237 head = ring->head; 712 head = ring->head; 1238 tail = ring->tail; !! 713 kunmap_atomic(ring); 1239 << 1240 /* << 1241 * Ensure that once we've read the cu << 1242 * we also see the events that were s << 1243 */ << 1244 smp_rmb(); << 1245 714 1246 pr_debug("h%u t%u m%u\n", head, tail, !! 715 pr_debug("h%u t%u m%u\n", head, ctx->tail, ctx->nr_events); 1247 716 1248 if (head == tail) !! 717 if (head == ctx->tail) 1249 goto out; 718 goto out; 1250 719 1251 head %= ctx->nr_events; 720 head %= ctx->nr_events; 1252 tail %= ctx->nr_events; << 1253 721 1254 while (ret < nr) { 722 while (ret < nr) { 1255 long avail; 723 long avail; 1256 struct io_event *ev; 724 struct io_event *ev; 1257 struct folio *folio; !! 725 struct page *page; 1258 726 1259 avail = (head <= tail ? tail !! 727 avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; 1260 if (head == tail) !! 728 if (head == ctx->tail) 1261 break; 729 break; 1262 730 >> 731 avail = min(avail, nr - ret); >> 732 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - >> 733 ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE)); >> 734 1263 pos = head + AIO_EVENTS_OFFSE 735 pos = head + AIO_EVENTS_OFFSET; 1264 folio = ctx->ring_folios[pos !! 736 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; 1265 pos %= AIO_EVENTS_PER_PAGE; 737 pos %= AIO_EVENTS_PER_PAGE; 1266 738 1267 avail = min(avail, nr - ret); !! 739 ev = kmap(page); 1268 avail = min_t(long, avail, AI << 1269 << 1270 ev = folio_address(folio); << 1271 copy_ret = copy_to_user(event 740 copy_ret = copy_to_user(event + ret, ev + pos, 1272 sizeo 741 sizeof(*ev) * avail); >> 742 kunmap(page); 1273 743 1274 if (unlikely(copy_ret)) { 744 if (unlikely(copy_ret)) { 1275 ret = -EFAULT; 745 ret = -EFAULT; 1276 goto out; 746 goto out; 1277 } 747 } 1278 748 1279 ret += avail; 749 ret += avail; 1280 head += avail; 750 head += avail; 1281 head %= ctx->nr_events; 751 head %= ctx->nr_events; 1282 } 752 } 1283 753 1284 ring = folio_address(ctx->ring_folios !! 754 ring = kmap_atomic(ctx->ring_pages[0]); 1285 ring->head = head; 755 ring->head = head; 1286 flush_dcache_folio(ctx->ring_folios[0 !! 756 kunmap_atomic(ring); >> 757 flush_dcache_page(ctx->ring_pages[0]); 1287 758 1288 pr_debug("%li h%u t%u\n", ret, head, !! 759 pr_debug("%li h%u t%u\n", ret, head, ctx->tail); 1289 out: 760 out: 1290 mutex_unlock(&ctx->ring_lock); 761 mutex_unlock(&ctx->ring_lock); 1291 762 1292 return ret; 763 return ret; 1293 } 764 } 1294 765 1295 static bool aio_read_events(struct kioctx *ct 766 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, 1296 struct io_event _ 767 struct io_event __user *event, long *i) 1297 { 768 { 1298 long ret = aio_read_events_ring(ctx, 769 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); 1299 770 1300 if (ret > 0) 771 if (ret > 0) 1301 *i += ret; 772 *i += ret; 1302 773 1303 if (unlikely(atomic_read(&ctx->dead)) 774 if (unlikely(atomic_read(&ctx->dead))) 1304 ret = -EINVAL; 775 ret = -EINVAL; 1305 776 1306 if (!*i) 777 if (!*i) 1307 *i = ret; 778 *i = ret; 1308 779 1309 return ret < 0 || *i >= min_nr; 780 return ret < 0 || *i >= min_nr; 1310 } 781 } 1311 782 1312 static long read_events(struct kioctx *ctx, l 783 static long read_events(struct kioctx *ctx, long min_nr, long nr, 1313 struct io_event __use 784 struct io_event __user *event, 1314 ktime_t until) !! 785 struct timespec __user *timeout) 1315 { 786 { 1316 struct hrtimer_sleeper t; !! 787 ktime_t until = { .tv64 = KTIME_MAX }; 1317 struct aio_waiter w; !! 788 long ret = 0; 1318 long ret = 0, ret2 = 0; !! 789 >> 790 if (timeout) { >> 791 struct timespec ts; >> 792 >> 793 if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) >> 794 return -EFAULT; >> 795 >> 796 until = timespec_to_ktime(ts); >> 797 } 1319 798 1320 /* 799 /* 1321 * Note that aio_read_events() is bei 800 * Note that aio_read_events() is being called as the conditional - i.e. 1322 * we're calling it after prepare_to_ 801 * we're calling it after prepare_to_wait() has set task state to 1323 * TASK_INTERRUPTIBLE. 802 * TASK_INTERRUPTIBLE. 1324 * 803 * 1325 * But aio_read_events() can block, a 804 * But aio_read_events() can block, and if it blocks it's going to flip 1326 * the task state back to TASK_RUNNIN 805 * the task state back to TASK_RUNNING. 1327 * 806 * 1328 * This should be ok, provided it doe 807 * This should be ok, provided it doesn't flip the state back to 1329 * TASK_RUNNING and return 0 too much 808 * TASK_RUNNING and return 0 too much - that causes us to spin. That 1330 * will only happen if the mutex_lock 809 * will only happen if the mutex_lock() call blocks, and we then find 1331 * the ringbuffer empty. So in practi 810 * the ringbuffer empty. So in practice we should be ok, but it's 1332 * something to be aware of when touc 811 * something to be aware of when touching this code. 1333 */ 812 */ 1334 aio_read_events(ctx, min_nr, nr, even !! 813 wait_event_interruptible_hrtimeout(ctx->wait, 1335 if (until == 0 || ret < 0 || ret >= m !! 814 aio_read_events(ctx, min_nr, nr, event, &ret), until); 1336 return ret; << 1337 << 1338 hrtimer_init_sleeper_on_stack(&t, CLO << 1339 if (until != KTIME_MAX) { << 1340 hrtimer_set_expires_range_ns( << 1341 hrtimer_sleeper_start_expires << 1342 } << 1343 << 1344 init_wait(&w.w); << 1345 << 1346 while (1) { << 1347 unsigned long nr_got = ret; << 1348 << 1349 w.min_nr = min_nr - ret; << 1350 << 1351 ret2 = prepare_to_wait_event( << 1352 if (!ret2 && !t.task) << 1353 ret2 = -ETIME; << 1354 << 1355 if (aio_read_events(ctx, min_ << 1356 break; << 1357 << 1358 if (nr_got == ret) << 1359 schedule(); << 1360 } << 1361 815 1362 finish_wait(&ctx->wait, &w.w); !! 816 if (!ret && signal_pending(current)) 1363 hrtimer_cancel(&t.timer); !! 817 ret = -EINTR; 1364 destroy_hrtimer_on_stack(&t.timer); << 1365 818 1366 return ret; 819 return ret; 1367 } 820 } 1368 821 1369 /* sys_io_setup: 822 /* sys_io_setup: 1370 * Create an aio_context capable of rece 823 * Create an aio_context capable of receiving at least nr_events. 1371 * ctxp must not point to an aio_context 824 * ctxp must not point to an aio_context that already exists, and 1372 * must be initialized to 0 prior to the 825 * must be initialized to 0 prior to the call. On successful 1373 * creation of the aio_context, *ctxp is 826 * creation of the aio_context, *ctxp is filled in with the resulting 1374 * handle. May fail with -EINVAL if *ct 827 * handle. May fail with -EINVAL if *ctxp is not initialized, 1375 * if the specified nr_events exceeds in 828 * if the specified nr_events exceeds internal limits. May fail 1376 * with -EAGAIN if the specified nr_even 829 * with -EAGAIN if the specified nr_events exceeds the user's limit 1377 * of available events. May fail with - 830 * of available events. May fail with -ENOMEM if insufficient kernel 1378 * resources are available. May fail wi 831 * resources are available. May fail with -EFAULT if an invalid 1379 * pointer is passed for ctxp. Will fai 832 * pointer is passed for ctxp. Will fail with -ENOSYS if not 1380 * implemented. 833 * implemented. 1381 */ 834 */ 1382 SYSCALL_DEFINE2(io_setup, unsigned, nr_events 835 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) 1383 { 836 { 1384 struct kioctx *ioctx = NULL; 837 struct kioctx *ioctx = NULL; 1385 unsigned long ctx; 838 unsigned long ctx; 1386 long ret; 839 long ret; 1387 840 1388 ret = get_user(ctx, ctxp); 841 ret = get_user(ctx, ctxp); 1389 if (unlikely(ret)) 842 if (unlikely(ret)) 1390 goto out; 843 goto out; 1391 844 1392 ret = -EINVAL; 845 ret = -EINVAL; 1393 if (unlikely(ctx || nr_events == 0)) 846 if (unlikely(ctx || nr_events == 0)) { 1394 pr_debug("EINVAL: ctx %lu nr_ !! 847 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", 1395 ctx, nr_events); 848 ctx, nr_events); 1396 goto out; 849 goto out; 1397 } 850 } 1398 851 1399 ioctx = ioctx_alloc(nr_events); 852 ioctx = ioctx_alloc(nr_events); 1400 ret = PTR_ERR(ioctx); 853 ret = PTR_ERR(ioctx); 1401 if (!IS_ERR(ioctx)) { 854 if (!IS_ERR(ioctx)) { 1402 ret = put_user(ioctx->user_id 855 ret = put_user(ioctx->user_id, ctxp); 1403 if (ret) 856 if (ret) 1404 kill_ioctx(current->m !! 857 kill_ioctx(current->mm, ioctx); 1405 percpu_ref_put(&ioctx->users) !! 858 put_ioctx(ioctx); 1406 } << 1407 << 1408 out: << 1409 return ret; << 1410 } << 1411 << 1412 #ifdef CONFIG_COMPAT << 1413 COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr << 1414 { << 1415 struct kioctx *ioctx = NULL; << 1416 unsigned long ctx; << 1417 long ret; << 1418 << 1419 ret = get_user(ctx, ctx32p); << 1420 if (unlikely(ret)) << 1421 goto out; << 1422 << 1423 ret = -EINVAL; << 1424 if (unlikely(ctx || nr_events == 0)) << 1425 pr_debug("EINVAL: ctx %lu nr_ << 1426 ctx, nr_events); << 1427 goto out; << 1428 } << 1429 << 1430 ioctx = ioctx_alloc(nr_events); << 1431 ret = PTR_ERR(ioctx); << 1432 if (!IS_ERR(ioctx)) { << 1433 /* truncating is ok because i << 1434 ret = put_user((u32)ioctx->us << 1435 if (ret) << 1436 kill_ioctx(current->m << 1437 percpu_ref_put(&ioctx->users) << 1438 } 859 } 1439 860 1440 out: 861 out: 1441 return ret; 862 return ret; 1442 } 863 } 1443 #endif << 1444 864 1445 /* sys_io_destroy: 865 /* sys_io_destroy: 1446 * Destroy the aio_context specified. M 866 * Destroy the aio_context specified. May cancel any outstanding 1447 * AIOs and block on completion. Will f 867 * AIOs and block on completion. Will fail with -ENOSYS if not 1448 * implemented. May fail with -EINVAL i 868 * implemented. May fail with -EINVAL if the context pointed to 1449 * is invalid. 869 * is invalid. 1450 */ 870 */ 1451 SYSCALL_DEFINE1(io_destroy, aio_context_t, ct 871 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) 1452 { 872 { 1453 struct kioctx *ioctx = lookup_ioctx(c 873 struct kioctx *ioctx = lookup_ioctx(ctx); 1454 if (likely(NULL != ioctx)) { 874 if (likely(NULL != ioctx)) { 1455 struct ctx_rq_wait wait; !! 875 kill_ioctx(current->mm, ioctx); 1456 int ret; !! 876 put_ioctx(ioctx); 1457 !! 877 return 0; 1458 init_completion(&wait.comp); << 1459 atomic_set(&wait.count, 1); << 1460 << 1461 /* Pass requests_done to kill << 1462 * in a thread-safe way. If w << 1463 * a race condition if two io << 1464 */ << 1465 ret = kill_ioctx(current->mm, << 1466 percpu_ref_put(&ioctx->users) << 1467 << 1468 /* Wait until all IO for the << 1469 * keep using user-space buff << 1470 * is destroyed. << 1471 */ << 1472 if (!ret) << 1473 wait_for_completion(& << 1474 << 1475 return ret; << 1476 } 878 } 1477 pr_debug("EINVAL: invalid context id\ !! 879 pr_debug("EINVAL: io_destroy: invalid context id\n"); 1478 return -EINVAL; 880 return -EINVAL; 1479 } 881 } 1480 882 1481 static void aio_remove_iocb(struct aio_kiocb !! 883 static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret) 1482 { 884 { 1483 struct kioctx *ctx = iocb->ki_ctx; !! 885 struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg]; 1484 unsigned long flags; << 1485 << 1486 spin_lock_irqsave(&ctx->ctx_lock, fla << 1487 list_del(&iocb->ki_list); << 1488 spin_unlock_irqrestore(&ctx->ctx_lock << 1489 } << 1490 << 1491 static void aio_complete_rw(struct kiocb *kio << 1492 { << 1493 struct aio_kiocb *iocb = container_of << 1494 886 1495 if (!list_empty_careful(&iocb->ki_lis !! 887 BUG_ON(ret <= 0); 1496 aio_remove_iocb(iocb); << 1497 888 1498 if (kiocb->ki_flags & IOCB_WRITE) { !! 889 while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) { 1499 struct inode *inode = file_in !! 890 ssize_t this = min((ssize_t)iov->iov_len, ret); 1500 !! 891 iov->iov_base += this; 1501 if (S_ISREG(inode->i_mode)) !! 892 iov->iov_len -= this; 1502 kiocb_end_write(kiocb !! 893 iocb->ki_left -= this; 1503 } !! 894 ret -= this; 1504 !! 895 if (iov->iov_len == 0) { 1505 iocb->ki_res.res = res; !! 896 iocb->ki_cur_seg++; 1506 iocb->ki_res.res2 = 0; !! 897 iov++; 1507 iocb_put(iocb); << 1508 } << 1509 << 1510 static int aio_prep_rw(struct kiocb *req, con << 1511 { << 1512 int ret; << 1513 << 1514 req->ki_complete = aio_complete_rw; << 1515 req->private = NULL; << 1516 req->ki_pos = iocb->aio_offset; << 1517 req->ki_flags = req->ki_filp->f_iocb_ << 1518 if (iocb->aio_flags & IOCB_FLAG_RESFD << 1519 req->ki_flags |= IOCB_EVENTFD << 1520 if (iocb->aio_flags & IOCB_FLAG_IOPRI << 1521 /* << 1522 * If the IOCB_FLAG_IOPRIO fl << 1523 * aio_reqprio is interpreted << 1524 * class and priority. << 1525 */ << 1526 ret = ioprio_check_cap(iocb-> << 1527 if (ret) { << 1528 pr_debug("aio ioprio << 1529 return ret; << 1530 } 898 } 1531 << 1532 req->ki_ioprio = iocb->aio_re << 1533 } else << 1534 req->ki_ioprio = get_current_ << 1535 << 1536 ret = kiocb_set_rw_flags(req, iocb->a << 1537 if (unlikely(ret)) << 1538 return ret; << 1539 << 1540 req->ki_flags &= ~IOCB_HIPRI; /* no o << 1541 return 0; << 1542 } << 1543 << 1544 static ssize_t aio_setup_rw(int rw, const str << 1545 struct iovec **iovec, bool ve << 1546 struct iov_iter *iter) << 1547 { << 1548 void __user *buf = (void __user *)(ui << 1549 size_t len = iocb->aio_nbytes; << 1550 << 1551 if (!vectored) { << 1552 ssize_t ret = import_ubuf(rw, << 1553 *iovec = NULL; << 1554 return ret; << 1555 } 899 } 1556 900 1557 return __import_iovec(rw, buf, len, U !! 901 /* the caller should not have done more io than what fit in >> 902 * the remaining iovecs */ >> 903 BUG_ON(ret > 0 && iocb->ki_left == 0); 1558 } 904 } 1559 905 1560 static inline void aio_rw_done(struct kiocb * !! 906 typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *, 1561 { !! 907 unsigned long, loff_t); 1562 switch (ret) { << 1563 case -EIOCBQUEUED: << 1564 break; << 1565 case -ERESTARTSYS: << 1566 case -ERESTARTNOINTR: << 1567 case -ERESTARTNOHAND: << 1568 case -ERESTART_RESTARTBLOCK: << 1569 /* << 1570 * There's no easy way to res << 1571 * may be already running. Ju << 1572 */ << 1573 ret = -EINTR; << 1574 fallthrough; << 1575 default: << 1576 req->ki_complete(req, ret); << 1577 } << 1578 } << 1579 908 1580 static int aio_read(struct kiocb *req, const !! 909 static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op) 1581 bool vectored, bool c << 1582 { 910 { 1583 struct iovec inline_vecs[UIO_FASTIOV] !! 911 struct file *file = iocb->ki_filp; 1584 struct iov_iter iter; !! 912 struct address_space *mapping = file->f_mapping; 1585 struct file *file; !! 913 struct inode *inode = mapping->host; 1586 int ret; !! 914 ssize_t ret = 0; 1587 915 1588 ret = aio_prep_rw(req, iocb, READ); !! 916 /* This matches the pread()/pwrite() logic */ 1589 if (ret) !! 917 if (iocb->ki_pos < 0) 1590 return ret; << 1591 file = req->ki_filp; << 1592 if (unlikely(!(file->f_mode & FMODE_R << 1593 return -EBADF; << 1594 if (unlikely(!file->f_op->read_iter)) << 1595 return -EINVAL; 918 return -EINVAL; 1596 919 1597 ret = aio_setup_rw(ITER_DEST, iocb, & !! 920 if (rw == WRITE) 1598 if (ret < 0) !! 921 file_start_write(file); 1599 return ret; !! 922 do { 1600 ret = rw_verify_area(READ, file, &req !! 923 ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], 1601 if (!ret) !! 924 iocb->ki_nr_segs - iocb->ki_cur_seg, 1602 aio_rw_done(req, file->f_op-> !! 925 iocb->ki_pos); 1603 kfree(iovec); !! 926 if (ret > 0) >> 927 aio_advance_iovec(iocb, ret); >> 928 >> 929 /* retry all partial writes. retry partial reads as long as its a >> 930 * regular file. */ >> 931 } while (ret > 0 && iocb->ki_left > 0 && >> 932 (rw == WRITE || >> 933 (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); >> 934 if (rw == WRITE) >> 935 file_end_write(file); >> 936 >> 937 /* This means we must have transferred all that we could */ >> 938 /* No need to retry anymore */ >> 939 if ((ret == 0) || (iocb->ki_left == 0)) >> 940 ret = iocb->ki_nbytes - iocb->ki_left; >> 941 >> 942 /* If we managed to write some out we return that, rather than >> 943 * the eventual error. */ >> 944 if (rw == WRITE >> 945 && ret < 0 && ret != -EIOCBQUEUED >> 946 && iocb->ki_nbytes - iocb->ki_left) >> 947 ret = iocb->ki_nbytes - iocb->ki_left; >> 948 1604 return ret; 949 return ret; 1605 } 950 } 1606 951 1607 static int aio_write(struct kiocb *req, const !! 952 static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat) 1608 bool vectored, bool << 1609 { 953 { 1610 struct iovec inline_vecs[UIO_FASTIOV] !! 954 ssize_t ret; 1611 struct iov_iter iter; << 1612 struct file *file; << 1613 int ret; << 1614 << 1615 ret = aio_prep_rw(req, iocb, WRITE); << 1616 if (ret) << 1617 return ret; << 1618 file = req->ki_filp; << 1619 955 1620 if (unlikely(!(file->f_mode & FMODE_W !! 956 kiocb->ki_nr_segs = kiocb->ki_nbytes; 1621 return -EBADF; << 1622 if (unlikely(!file->f_op->write_iter) << 1623 return -EINVAL; << 1624 957 1625 ret = aio_setup_rw(ITER_SOURCE, iocb, !! 958 #ifdef CONFIG_COMPAT >> 959 if (compat) >> 960 ret = compat_rw_copy_check_uvector(rw, >> 961 (struct compat_iovec __user *)kiocb->ki_buf, >> 962 kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec, >> 963 &kiocb->ki_iovec); >> 964 else >> 965 #endif >> 966 ret = rw_copy_check_uvector(rw, >> 967 (struct iovec __user *)kiocb->ki_buf, >> 968 kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec, >> 969 &kiocb->ki_iovec); 1626 if (ret < 0) 970 if (ret < 0) 1627 return ret; 971 return ret; 1628 ret = rw_verify_area(WRITE, file, &re << 1629 if (!ret) { << 1630 if (S_ISREG(file_inode(file)- << 1631 kiocb_start_write(req << 1632 req->ki_flags |= IOCB_WRITE; << 1633 aio_rw_done(req, file->f_op-> << 1634 } << 1635 kfree(iovec); << 1636 return ret; << 1637 } << 1638 972 1639 static void aio_fsync_work(struct work_struct !! 973 /* ki_nbytes now reflect bytes instead of segs */ 1640 { !! 974 kiocb->ki_nbytes = ret; 1641 struct aio_kiocb *iocb = container_of << 1642 const struct cred *old_cred = overrid << 1643 << 1644 iocb->ki_res.res = vfs_fsync(iocb->fs << 1645 revert_creds(old_cred); << 1646 put_cred(iocb->fsync.creds); << 1647 iocb_put(iocb); << 1648 } << 1649 << 1650 static int aio_fsync(struct fsync_iocb *req, << 1651 bool datasync) << 1652 { << 1653 if (unlikely(iocb->aio_buf || iocb->a << 1654 iocb->aio_rw_flags)) << 1655 return -EINVAL; << 1656 << 1657 if (unlikely(!req->file->f_op->fsync) << 1658 return -EINVAL; << 1659 << 1660 req->creds = prepare_creds(); << 1661 if (!req->creds) << 1662 return -ENOMEM; << 1663 << 1664 req->datasync = datasync; << 1665 INIT_WORK(&req->work, aio_fsync_work) << 1666 schedule_work(&req->work); << 1667 return 0; 975 return 0; 1668 } 976 } 1669 977 1670 static void aio_poll_put_work(struct work_str !! 978 static ssize_t aio_setup_single_vector(int rw, struct kiocb *kiocb) 1671 { 979 { 1672 struct poll_iocb *req = container_of( !! 980 size_t len = kiocb->ki_nbytes; 1673 struct aio_kiocb *iocb = container_of << 1674 981 1675 iocb_put(iocb); !! 982 if (len > MAX_RW_COUNT) 1676 } !! 983 len = MAX_RW_COUNT; 1677 << 1678 /* << 1679 * Safely lock the waitqueue which the reques << 1680 * case where the ->poll() provider decides t << 1681 * << 1682 * Returns true on success, meaning that req- << 1683 * is on req->head, and an RCU read lock was << 1684 * request was already removed from its waitq << 1685 */ << 1686 static bool poll_iocb_lock_wq(struct poll_ioc << 1687 { << 1688 wait_queue_head_t *head; << 1689 984 1690 /* !! 985 if (unlikely(!access_ok(!rw, kiocb->ki_buf, len))) 1691 * While we hold the waitqueue lock a !! 986 return -EFAULT; 1692 * wake_up_pollfree() will wait for u << 1693 * lock in the first place can race w << 1694 * << 1695 * We solve this as eventpoll does: b << 1696 * all users of wake_up_pollfree() wi << 1697 * we enter rcu_read_lock() and see t << 1698 * non-NULL, we can then lock it with << 1699 * under us, then check whether the r << 1700 * << 1701 * Keep holding rcu_read_lock() as lo << 1702 * case the caller deletes the entry << 1703 * In that case, only RCU prevents th << 1704 */ << 1705 rcu_read_lock(); << 1706 head = smp_load_acquire(&req->head); << 1707 if (head) { << 1708 spin_lock(&head->lock); << 1709 if (!list_empty(&req->wait.en << 1710 return true; << 1711 spin_unlock(&head->lock); << 1712 } << 1713 rcu_read_unlock(); << 1714 return false; << 1715 } << 1716 << 1717 static void poll_iocb_unlock_wq(struct poll_i << 1718 { << 1719 spin_unlock(&req->head->lock); << 1720 rcu_read_unlock(); << 1721 } << 1722 << 1723 static void aio_poll_complete_work(struct wor << 1724 { << 1725 struct poll_iocb *req = container_of( << 1726 struct aio_kiocb *iocb = container_of << 1727 struct poll_table_struct pt = { ._key << 1728 struct kioctx *ctx = iocb->ki_ctx; << 1729 __poll_t mask = 0; << 1730 << 1731 if (!READ_ONCE(req->cancelled)) << 1732 mask = vfs_poll(req->file, &p << 1733 << 1734 /* << 1735 * Note that ->ki_cancel callers also << 1736 * calling ->ki_cancel. We need the << 1737 * synchronize with them. In the can << 1738 * itself is not actually needed, but << 1739 * avoid further branches in the fast << 1740 */ << 1741 spin_lock_irq(&ctx->ctx_lock); << 1742 if (poll_iocb_lock_wq(req)) { << 1743 if (!mask && !READ_ONCE(req-> << 1744 /* << 1745 * The request isn't << 1746 * Reschedule complet << 1747 */ << 1748 if (req->work_need_re << 1749 schedule_work << 1750 req->work_nee << 1751 } else { << 1752 req->work_sch << 1753 } << 1754 poll_iocb_unlock_wq(r << 1755 spin_unlock_irq(&ctx- << 1756 return; << 1757 } << 1758 list_del_init(&req->wait.entr << 1759 poll_iocb_unlock_wq(req); << 1760 } /* else, POLLFREE has freed the wai << 1761 list_del_init(&iocb->ki_list); << 1762 iocb->ki_res.res = mangle_poll(mask); << 1763 spin_unlock_irq(&ctx->ctx_lock); << 1764 << 1765 iocb_put(iocb); << 1766 } << 1767 << 1768 /* assumes we are called with irqs disabled * << 1769 static int aio_poll_cancel(struct kiocb *iocb << 1770 { << 1771 struct aio_kiocb *aiocb = container_o << 1772 struct poll_iocb *req = &aiocb->poll; << 1773 << 1774 if (poll_iocb_lock_wq(req)) { << 1775 WRITE_ONCE(req->cancelled, tr << 1776 if (!req->work_scheduled) { << 1777 schedule_work(&aiocb- << 1778 req->work_scheduled = << 1779 } << 1780 poll_iocb_unlock_wq(req); << 1781 } /* else, the request was force-canc << 1782 987 >> 988 kiocb->ki_iovec = &kiocb->ki_inline_vec; >> 989 kiocb->ki_iovec->iov_base = kiocb->ki_buf; >> 990 kiocb->ki_iovec->iov_len = len; >> 991 kiocb->ki_nr_segs = 1; 1783 return 0; 992 return 0; 1784 } 993 } 1785 994 1786 static int aio_poll_wake(struct wait_queue_en !! 995 /* 1787 void *key) !! 996 * aio_setup_iocb: 1788 { !! 997 * Performs the initial checks and aio retry method 1789 struct poll_iocb *req = container_of( !! 998 * setup for the kiocb at the time of io submission. 1790 struct aio_kiocb *iocb = container_of !! 999 */ 1791 __poll_t mask = key_to_poll(key); !! 1000 static ssize_t aio_run_iocb(struct kiocb *req, bool compat) 1792 unsigned long flags; !! 1001 { 1793 !! 1002 struct file *file = req->ki_filp; 1794 /* for instances that support it chec !! 1003 ssize_t ret; 1795 if (mask && !(mask & req->events)) !! 1004 int rw; 1796 return 0; !! 1005 fmode_t mode; 1797 !! 1006 aio_rw_op *rw_op; 1798 /* << 1799 * Complete the request inline if pos << 1800 * conditions be met: << 1801 * 1. An event mask must have been << 1802 * instead, then mask == 0 and w << 1803 * the events, so inline complet << 1804 * 2. The completion work must not << 1805 * 3. ctx_lock must not be busy. W << 1806 * already hold the waitqueue lo << 1807 * locking order. Use irqsave/i << 1808 * filesystems (e.g. fuse) call << 1809 * yet IRQs have to be disabled << 1810 */ << 1811 if (mask && !req->work_scheduled && << 1812 spin_trylock_irqsave(&iocb->ki_ct << 1813 struct kioctx *ctx = iocb->ki << 1814 1007 1815 list_del_init(&req->wait.entr !! 1008 switch (req->ki_opcode) { 1816 list_del(&iocb->ki_list); !! 1009 case IOCB_CMD_PREAD: 1817 iocb->ki_res.res = mangle_pol !! 1010 case IOCB_CMD_PREADV: 1818 if (iocb->ki_eventfd && !even !! 1011 mode = FMODE_READ; 1819 iocb = NULL; !! 1012 rw = READ; 1820 INIT_WORK(&req->work, !! 1013 rw_op = file->f_op->aio_read; 1821 schedule_work(&req->w !! 1014 goto rw_common; 1822 } << 1823 spin_unlock_irqrestore(&ctx-> << 1824 if (iocb) << 1825 iocb_put(iocb); << 1826 } else { << 1827 /* << 1828 * Schedule the completion wo << 1829 * scheduled, record that ano << 1830 * << 1831 * Don't remove the request f << 1832 * not actually be complete y << 1833 * is called), and we must no << 1834 * exception to this; see bel << 1835 */ << 1836 if (req->work_scheduled) { << 1837 req->work_need_resche << 1838 } else { << 1839 schedule_work(&req->w << 1840 req->work_scheduled = << 1841 } << 1842 1015 1843 /* !! 1016 case IOCB_CMD_PWRITE: 1844 * If the waitqueue is being !! 1017 case IOCB_CMD_PWRITEV: 1845 * the request inline, we hav !! 1018 mode = FMODE_WRITE; 1846 * we can. That means immedi !! 1019 rw = WRITE; 1847 * waitqueue and preventing a !! 1020 rw_op = file->f_op->aio_write; 1848 * waitqueue via the request. !! 1021 goto rw_common; 1849 * completion work (done abov !! 1022 rw_common: 1850 * cancelled, to potentially !! 1023 if (unlikely(!(file->f_mode & mode))) 1851 */ !! 1024 return -EBADF; 1852 if (mask & POLLFREE) { !! 1025 1853 WRITE_ONCE(req->cance !! 1026 if (!rw_op) 1854 list_del_init(&req->w !! 1027 return -EINVAL; 1855 !! 1028 1856 /* !! 1029 ret = (req->ki_opcode == IOCB_CMD_PREADV || 1857 * Careful: this *mus !! 1030 req->ki_opcode == IOCB_CMD_PWRITEV) 1858 * as req->head is NU !! 1031 ? aio_setup_vectored_rw(rw, req, compat) 1859 * completed and free !! 1032 : aio_setup_single_vector(rw, req); 1860 * will no longer nee !! 1033 if (ret) 1861 */ !! 1034 return ret; 1862 smp_store_release(&re << 1863 } << 1864 } << 1865 return 1; << 1866 } << 1867 1035 1868 struct aio_poll_table { !! 1036 ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); 1869 struct poll_table_struct pt; !! 1037 if (ret < 0) 1870 struct aio_kiocb *iocb !! 1038 return ret; 1871 bool queue << 1872 int error << 1873 }; << 1874 1039 1875 static void !! 1040 req->ki_nbytes = ret; 1876 aio_poll_queue_proc(struct file *file, struct !! 1041 req->ki_left = ret; 1877 struct poll_table_struct *p) << 1878 { << 1879 struct aio_poll_table *pt = container << 1880 << 1881 /* multiple wait queues per file are << 1882 if (unlikely(pt->queued)) { << 1883 pt->error = -EINVAL; << 1884 return; << 1885 } << 1886 1042 1887 pt->queued = true; !! 1043 ret = aio_rw_vect_retry(req, rw, rw_op); 1888 pt->error = 0; !! 1044 break; 1889 pt->iocb->poll.head = head; << 1890 add_wait_queue(head, &pt->iocb->poll. << 1891 } << 1892 << 1893 static int aio_poll(struct aio_kiocb *aiocb, << 1894 { << 1895 struct kioctx *ctx = aiocb->ki_ctx; << 1896 struct poll_iocb *req = &aiocb->poll; << 1897 struct aio_poll_table apt; << 1898 bool cancel = false; << 1899 __poll_t mask; << 1900 1045 1901 /* reject any unknown events outside !! 1046 case IOCB_CMD_FDSYNC: 1902 if ((u16)iocb->aio_buf != iocb->aio_b !! 1047 if (!file->f_op->aio_fsync) 1903 return -EINVAL; !! 1048 return -EINVAL; 1904 /* reject fields that are not defined << 1905 if (iocb->aio_offset || iocb->aio_nby << 1906 return -EINVAL; << 1907 1049 1908 INIT_WORK(&req->work, aio_poll_comple !! 1050 ret = file->f_op->aio_fsync(req, 1); 1909 req->events = demangle_poll(iocb->aio !! 1051 break; 1910 1052 1911 req->head = NULL; !! 1053 case IOCB_CMD_FSYNC: 1912 req->cancelled = false; !! 1054 if (!file->f_op->aio_fsync) 1913 req->work_scheduled = false; !! 1055 return -EINVAL; 1914 req->work_need_resched = false; << 1915 << 1916 apt.pt._qproc = aio_poll_queue_proc; << 1917 apt.pt._key = req->events; << 1918 apt.iocb = aiocb; << 1919 apt.queued = false; << 1920 apt.error = -EINVAL; /* same as no su << 1921 << 1922 /* initialized the list so that we ca << 1923 INIT_LIST_HEAD(&req->wait.entry); << 1924 init_waitqueue_func_entry(&req->wait, << 1925 1056 1926 mask = vfs_poll(req->file, &apt.pt) & !! 1057 ret = file->f_op->aio_fsync(req, 0); 1927 spin_lock_irq(&ctx->ctx_lock); !! 1058 break; 1928 if (likely(apt.queued)) { << 1929 bool on_queue = poll_iocb_loc << 1930 1059 1931 if (!on_queue || req->work_sc !! 1060 default: 1932 /* !! 1061 pr_debug("EINVAL: no operation provided\n"); 1933 * aio_poll_wake() al !! 1062 return -EINVAL; 1934 * completion work, o << 1935 */ << 1936 if (apt.error) /* uns << 1937 cancel = true << 1938 apt.error = 0; << 1939 mask = 0; << 1940 } << 1941 if (mask || apt.error) { << 1942 /* Steal to complete << 1943 list_del_init(&req->w << 1944 } else if (cancel) { << 1945 /* Cancel if possible << 1946 WRITE_ONCE(req->cance << 1947 } else if (on_queue) { << 1948 /* << 1949 * Actually waiting f << 1950 * active_reqs so tha << 1951 */ << 1952 list_add_tail(&aiocb- << 1953 aiocb->ki_cancel = ai << 1954 } << 1955 if (on_queue) << 1956 poll_iocb_unlock_wq(r << 1957 } << 1958 if (mask) { /* no async, we'd stolen << 1959 aiocb->ki_res.res = mangle_po << 1960 apt.error = 0; << 1961 } 1063 } 1962 spin_unlock_irq(&ctx->ctx_lock); << 1963 if (mask) << 1964 iocb_put(aiocb); << 1965 return apt.error; << 1966 } << 1967 << 1968 static int __io_submit_one(struct kioctx *ctx << 1969 struct iocb __user << 1970 bool compat) << 1971 { << 1972 req->ki_filp = fget(iocb->aio_fildes) << 1973 if (unlikely(!req->ki_filp)) << 1974 return -EBADF; << 1975 1064 1976 if (iocb->aio_flags & IOCB_FLAG_RESFD !! 1065 if (ret != -EIOCBQUEUED) { 1977 struct eventfd_ctx *eventfd; << 1978 /* 1066 /* 1979 * If the IOCB_FLAG_RESFD fla !! 1067 * There's no easy way to restart the syscall since other AIO's 1980 * instance of the file* now. !! 1068 * may be already running. Just fail this IO with EINTR. 1981 * an eventfd() fd, and will << 1982 * event using the eventfd_si << 1983 */ 1069 */ 1984 eventfd = eventfd_ctx_fdget(i !! 1070 if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || 1985 if (IS_ERR(eventfd)) !! 1071 ret == -ERESTARTNOHAND || 1986 return PTR_ERR(eventf !! 1072 ret == -ERESTART_RESTARTBLOCK)) 1987 !! 1073 ret = -EINTR; 1988 req->ki_eventfd = eventfd; !! 1074 aio_complete(req, ret, 0); 1989 } 1075 } 1990 1076 1991 if (unlikely(put_user(KIOCB_KEY, &use !! 1077 return 0; 1992 pr_debug("EFAULT: aio_key\n") << 1993 return -EFAULT; << 1994 } << 1995 << 1996 req->ki_res.obj = (u64)(unsigned long << 1997 req->ki_res.data = iocb->aio_data; << 1998 req->ki_res.res = 0; << 1999 req->ki_res.res2 = 0; << 2000 << 2001 switch (iocb->aio_lio_opcode) { << 2002 case IOCB_CMD_PREAD: << 2003 return aio_read(&req->rw, ioc << 2004 case IOCB_CMD_PWRITE: << 2005 return aio_write(&req->rw, io << 2006 case IOCB_CMD_PREADV: << 2007 return aio_read(&req->rw, ioc << 2008 case IOCB_CMD_PWRITEV: << 2009 return aio_write(&req->rw, io << 2010 case IOCB_CMD_FSYNC: << 2011 return aio_fsync(&req->fsync, << 2012 case IOCB_CMD_FDSYNC: << 2013 return aio_fsync(&req->fsync, << 2014 case IOCB_CMD_POLL: << 2015 return aio_poll(req, iocb); << 2016 default: << 2017 pr_debug("invalid aio operati << 2018 return -EINVAL; << 2019 } << 2020 } 1078 } 2021 1079 2022 static int io_submit_one(struct kioctx *ctx, 1080 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 2023 bool compat) !! 1081 struct iocb *iocb, bool compat) 2024 { 1082 { 2025 struct aio_kiocb *req; !! 1083 struct kiocb *req; 2026 struct iocb iocb; !! 1084 ssize_t ret; 2027 int err; << 2028 << 2029 if (unlikely(copy_from_user(&iocb, us << 2030 return -EFAULT; << 2031 1085 2032 /* enforce forwards compatibility on 1086 /* enforce forwards compatibility on users */ 2033 if (unlikely(iocb.aio_reserved2)) { !! 1087 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { 2034 pr_debug("EINVAL: reserve fie 1088 pr_debug("EINVAL: reserve field set\n"); 2035 return -EINVAL; 1089 return -EINVAL; 2036 } 1090 } 2037 1091 2038 /* prevent overflows */ 1092 /* prevent overflows */ 2039 if (unlikely( 1093 if (unlikely( 2040 (iocb.aio_buf != (unsigned long)i !! 1094 (iocb->aio_buf != (unsigned long)iocb->aio_buf) || 2041 (iocb.aio_nbytes != (size_t)iocb. !! 1095 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || 2042 ((ssize_t)iocb.aio_nbytes < 0) !! 1096 ((ssize_t)iocb->aio_nbytes < 0) 2043 )) { 1097 )) { 2044 pr_debug("EINVAL: overflow ch !! 1098 pr_debug("EINVAL: io_submit: overflow check\n"); 2045 return -EINVAL; 1099 return -EINVAL; 2046 } 1100 } 2047 1101 2048 req = aio_get_req(ctx); 1102 req = aio_get_req(ctx); 2049 if (unlikely(!req)) 1103 if (unlikely(!req)) 2050 return -EAGAIN; 1104 return -EAGAIN; 2051 1105 2052 err = __io_submit_one(ctx, &iocb, use !! 1106 req->ki_filp = fget(iocb->aio_fildes); >> 1107 if (unlikely(!req->ki_filp)) { >> 1108 ret = -EBADF; >> 1109 goto out_put_req; >> 1110 } 2053 1111 2054 /* Done with the synchronous referenc !! 1112 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 2055 iocb_put(req); !! 1113 /* >> 1114 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an >> 1115 * instance of the file* now. The file descriptor must be >> 1116 * an eventfd() fd, and will be signaled for each completed >> 1117 * event using the eventfd_signal() function. >> 1118 */ >> 1119 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); >> 1120 if (IS_ERR(req->ki_eventfd)) { >> 1121 ret = PTR_ERR(req->ki_eventfd); >> 1122 req->ki_eventfd = NULL; >> 1123 goto out_put_req; >> 1124 } >> 1125 } 2056 1126 2057 /* !! 1127 ret = put_user(KIOCB_KEY, &user_iocb->aio_key); 2058 * If err is 0, we'd either done aio_ !! 1128 if (unlikely(ret)) { 2059 * arranged for that to be done async !! 1129 pr_debug("EFAULT: aio_key\n"); 2060 * means that we need to destroy req !! 1130 goto out_put_req; 2061 */ << 2062 if (unlikely(err)) { << 2063 iocb_destroy(req); << 2064 put_reqs_available(ctx, 1); << 2065 } 1131 } 2066 return err; !! 1132 >> 1133 req->ki_obj.user = user_iocb; >> 1134 req->ki_user_data = iocb->aio_data; >> 1135 req->ki_pos = iocb->aio_offset; >> 1136 >> 1137 req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; >> 1138 req->ki_left = req->ki_nbytes = iocb->aio_nbytes; >> 1139 req->ki_opcode = iocb->aio_lio_opcode; >> 1140 >> 1141 ret = aio_run_iocb(req, compat); >> 1142 if (ret) >> 1143 goto out_put_req; >> 1144 >> 1145 aio_put_req(req); /* drop extra ref to req */ >> 1146 return 0; >> 1147 out_put_req: >> 1148 atomic_dec(&ctx->reqs_active); >> 1149 aio_put_req(req); /* drop extra ref to req */ >> 1150 aio_put_req(req); /* drop i/o ref to req */ >> 1151 return ret; 2067 } 1152 } 2068 1153 2069 /* sys_io_submit: !! 1154 long do_io_submit(aio_context_t ctx_id, long nr, 2070 * Queue the nr iocbs pointed to by iocb !! 1155 struct iocb __user *__user *iocbpp, bool compat) 2071 * the number of iocbs queued. May retu << 2072 * specified by ctx_id is invalid, if nr << 2073 * *iocbpp[0] is not properly initialize << 2074 * is invalid for the file descriptor in << 2075 * -EFAULT if any of the data structures << 2076 * fail with -EBADF if the file descript << 2077 * iocb is invalid. May fail with -EAGA << 2078 * are available to queue any iocbs. Wi << 2079 * fail with -ENOSYS if not implemented. << 2080 */ << 2081 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx << 2082 struct iocb __user * __user * << 2083 { 1156 { 2084 struct kioctx *ctx; 1157 struct kioctx *ctx; 2085 long ret = 0; 1158 long ret = 0; 2086 int i = 0; 1159 int i = 0; 2087 struct blk_plug plug; 1160 struct blk_plug plug; 2088 1161 2089 if (unlikely(nr < 0)) 1162 if (unlikely(nr < 0)) 2090 return -EINVAL; 1163 return -EINVAL; 2091 1164 >> 1165 if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) >> 1166 nr = LONG_MAX/sizeof(*iocbpp); >> 1167 >> 1168 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) >> 1169 return -EFAULT; >> 1170 2092 ctx = lookup_ioctx(ctx_id); 1171 ctx = lookup_ioctx(ctx_id); 2093 if (unlikely(!ctx)) { 1172 if (unlikely(!ctx)) { 2094 pr_debug("EINVAL: invalid con 1173 pr_debug("EINVAL: invalid context id\n"); 2095 return -EINVAL; 1174 return -EINVAL; 2096 } 1175 } 2097 1176 2098 if (nr > ctx->nr_events) !! 1177 blk_start_plug(&plug); 2099 nr = ctx->nr_events; << 2100 1178 2101 if (nr > AIO_PLUG_THRESHOLD) !! 1179 /* 2102 blk_start_plug(&plug); !! 1180 * AKPM: should this return a partial result if some of the IOs were 2103 for (i = 0; i < nr; i++) { !! 1181 * successfully submitted? >> 1182 */ >> 1183 for (i=0; i<nr; i++) { 2104 struct iocb __user *user_iocb 1184 struct iocb __user *user_iocb; >> 1185 struct iocb tmp; >> 1186 >> 1187 if (unlikely(__get_user(user_iocb, iocbpp + i))) { >> 1188 ret = -EFAULT; >> 1189 break; >> 1190 } 2105 1191 2106 if (unlikely(get_user(user_io !! 1192 if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { 2107 ret = -EFAULT; 1193 ret = -EFAULT; 2108 break; 1194 break; 2109 } 1195 } 2110 1196 2111 ret = io_submit_one(ctx, user !! 1197 ret = io_submit_one(ctx, user_iocb, &tmp, compat); 2112 if (ret) 1198 if (ret) 2113 break; 1199 break; 2114 } 1200 } 2115 if (nr > AIO_PLUG_THRESHOLD) !! 1201 blk_finish_plug(&plug); 2116 blk_finish_plug(&plug); << 2117 1202 2118 percpu_ref_put(&ctx->users); !! 1203 put_ioctx(ctx); 2119 return i ? i : ret; 1204 return i ? i : ret; 2120 } 1205 } 2121 1206 2122 #ifdef CONFIG_COMPAT !! 1207 /* sys_io_submit: 2123 COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_ !! 1208 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 2124 int, nr, compat_uptr_t !! 1209 * the number of iocbs queued. May return -EINVAL if the aio_context >> 1210 * specified by ctx_id is invalid, if nr is < 0, if the iocb at >> 1211 * *iocbpp[0] is not properly initialized, if the operation specified >> 1212 * is invalid for the file descriptor in the iocb. May fail with >> 1213 * -EFAULT if any of the data structures point to invalid data. May >> 1214 * fail with -EBADF if the file descriptor specified in the first >> 1215 * iocb is invalid. May fail with -EAGAIN if insufficient resources >> 1216 * are available to queue any iocbs. Will return 0 if nr is 0. Will >> 1217 * fail with -ENOSYS if not implemented. >> 1218 */ >> 1219 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, >> 1220 struct iocb __user * __user *, iocbpp) 2125 { 1221 { 2126 struct kioctx *ctx; !! 1222 return do_io_submit(ctx_id, nr, iocbpp, 0); 2127 long ret = 0; !! 1223 } 2128 int i = 0; << 2129 struct blk_plug plug; << 2130 << 2131 if (unlikely(nr < 0)) << 2132 return -EINVAL; << 2133 << 2134 ctx = lookup_ioctx(ctx_id); << 2135 if (unlikely(!ctx)) { << 2136 pr_debug("EINVAL: invalid con << 2137 return -EINVAL; << 2138 } << 2139 1224 2140 if (nr > ctx->nr_events) !! 1225 /* lookup_kiocb 2141 nr = ctx->nr_events; !! 1226 * Finds a given iocb for cancellation. >> 1227 */ >> 1228 static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, >> 1229 u32 key) >> 1230 { >> 1231 struct list_head *pos; 2142 1232 2143 if (nr > AIO_PLUG_THRESHOLD) !! 1233 assert_spin_locked(&ctx->ctx_lock); 2144 blk_start_plug(&plug); << 2145 for (i = 0; i < nr; i++) { << 2146 compat_uptr_t user_iocb; << 2147 1234 2148 if (unlikely(get_user(user_io !! 1235 if (key != KIOCB_KEY) 2149 ret = -EFAULT; !! 1236 return NULL; 2150 break; << 2151 } << 2152 1237 2153 ret = io_submit_one(ctx, comp !! 1238 /* TODO: use a hash or array, this sucks. */ 2154 if (ret) !! 1239 list_for_each(pos, &ctx->active_reqs) { 2155 break; !! 1240 struct kiocb *kiocb = list_kiocb(pos); >> 1241 if (kiocb->ki_obj.user == iocb) >> 1242 return kiocb; 2156 } 1243 } 2157 if (nr > AIO_PLUG_THRESHOLD) !! 1244 return NULL; 2158 blk_finish_plug(&plug); << 2159 << 2160 percpu_ref_put(&ctx->users); << 2161 return i ? i : ret; << 2162 } 1245 } 2163 #endif << 2164 1246 2165 /* sys_io_cancel: 1247 /* sys_io_cancel: 2166 * Attempts to cancel an iocb previously 1248 * Attempts to cancel an iocb previously passed to io_submit. If 2167 * the operation is successfully cancell 1249 * the operation is successfully cancelled, the resulting event is 2168 * copied into the memory pointed to by 1250 * copied into the memory pointed to by result without being placed 2169 * into the completion queue and 0 is re 1251 * into the completion queue and 0 is returned. May fail with 2170 * -EFAULT if any of the data structures 1252 * -EFAULT if any of the data structures pointed to are invalid. 2171 * May fail with -EINVAL if aio_context 1253 * May fail with -EINVAL if aio_context specified by ctx_id is 2172 * invalid. May fail with -EAGAIN if th 1254 * invalid. May fail with -EAGAIN if the iocb specified was not 2173 * cancelled. Will fail with -ENOSYS if 1255 * cancelled. Will fail with -ENOSYS if not implemented. 2174 */ 1256 */ 2175 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx 1257 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 2176 struct io_event __user *, res 1258 struct io_event __user *, result) 2177 { 1259 { >> 1260 struct io_event res; 2178 struct kioctx *ctx; 1261 struct kioctx *ctx; 2179 struct aio_kiocb *kiocb; !! 1262 struct kiocb *kiocb; 2180 int ret = -EINVAL; << 2181 u32 key; 1263 u32 key; 2182 u64 obj = (u64)(unsigned long)iocb; !! 1264 int ret; 2183 1265 2184 if (unlikely(get_user(key, &iocb->aio !! 1266 ret = get_user(key, &iocb->aio_key); >> 1267 if (unlikely(ret)) 2185 return -EFAULT; 1268 return -EFAULT; 2186 if (unlikely(key != KIOCB_KEY)) << 2187 return -EINVAL; << 2188 1269 2189 ctx = lookup_ioctx(ctx_id); 1270 ctx = lookup_ioctx(ctx_id); 2190 if (unlikely(!ctx)) 1271 if (unlikely(!ctx)) 2191 return -EINVAL; 1272 return -EINVAL; 2192 1273 2193 spin_lock_irq(&ctx->ctx_lock); 1274 spin_lock_irq(&ctx->ctx_lock); 2194 /* TODO: use a hash or array, this su !! 1275 2195 list_for_each_entry(kiocb, &ctx->acti !! 1276 kiocb = lookup_kiocb(ctx, iocb, key); 2196 if (kiocb->ki_res.obj == obj) !! 1277 if (kiocb) 2197 ret = kiocb->ki_cance !! 1278 ret = kiocb_cancel(ctx, kiocb, &res); 2198 list_del_init(&kiocb- !! 1279 else 2199 break; !! 1280 ret = -EINVAL; 2200 } !! 1281 2201 } << 2202 spin_unlock_irq(&ctx->ctx_lock); 1282 spin_unlock_irq(&ctx->ctx_lock); 2203 1283 2204 if (!ret) { 1284 if (!ret) { 2205 /* !! 1285 /* Cancellation succeeded -- copy the result 2206 * The result argument is no !! 1286 * into the user's buffer. 2207 * always delivered via the r << 2208 * cancellation is progress: << 2209 */ 1287 */ 2210 ret = -EINPROGRESS; !! 1288 if (copy_to_user(result, &res, sizeof(res))) >> 1289 ret = -EFAULT; 2211 } 1290 } 2212 1291 2213 percpu_ref_put(&ctx->users); !! 1292 put_ioctx(ctx); 2214 << 2215 return ret; << 2216 } << 2217 << 2218 static long do_io_getevents(aio_context_t ctx << 2219 long min_nr, << 2220 long nr, << 2221 struct io_event __user *event << 2222 struct timespec64 *ts) << 2223 { << 2224 ktime_t until = ts ? timespec64_to_kt << 2225 struct kioctx *ioctx = lookup_ioctx(c << 2226 long ret = -EINVAL; << 2227 << 2228 if (likely(ioctx)) { << 2229 if (likely(min_nr <= nr && mi << 2230 ret = read_events(ioc << 2231 percpu_ref_put(&ioctx->users) << 2232 } << 2233 1293 2234 return ret; 1294 return ret; 2235 } 1295 } 2236 1296 2237 /* io_getevents: 1297 /* io_getevents: 2238 * Attempts to read at least min_nr even 1298 * Attempts to read at least min_nr events and up to nr events from 2239 * the completion queue for the aio_cont 1299 * the completion queue for the aio_context specified by ctx_id. If 2240 * it succeeds, the number of read event 1300 * it succeeds, the number of read events is returned. May fail with 2241 * -EINVAL if ctx_id is invalid, if min_ 1301 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is 2242 * out of range, if timeout is out of ra 1302 * out of range, if timeout is out of range. May fail with -EFAULT 2243 * if any of the memory specified is inv 1303 * if any of the memory specified is invalid. May return 0 or 2244 * < min_nr if the timeout specified by 1304 * < min_nr if the timeout specified by timeout has elapsed 2245 * before sufficient events are availabl 1305 * before sufficient events are available, where timeout == NULL 2246 * specifies an infinite timeout. Note t 1306 * specifies an infinite timeout. Note that the timeout pointed to by 2247 * timeout is relative. Will fail with 1307 * timeout is relative. Will fail with -ENOSYS if not implemented. 2248 */ 1308 */ 2249 #ifdef CONFIG_64BIT << 2250 << 2251 SYSCALL_DEFINE5(io_getevents, aio_context_t, 1309 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 2252 long, min_nr, 1310 long, min_nr, 2253 long, nr, 1311 long, nr, 2254 struct io_event __user *, eve 1312 struct io_event __user *, events, 2255 struct __kernel_timespec __us !! 1313 struct timespec __user *, timeout) 2256 { << 2257 struct timespec64 ts; << 2258 int ret; << 2259 << 2260 if (timeout && unlikely(get_timespec6 << 2261 return -EFAULT; << 2262 << 2263 ret = do_io_getevents(ctx_id, min_nr, << 2264 if (!ret && signal_pending(current)) << 2265 ret = -EINTR; << 2266 return ret; << 2267 } << 2268 << 2269 #endif << 2270 << 2271 struct __aio_sigset { << 2272 const sigset_t __user *sigmask; << 2273 size_t sigsetsize; << 2274 }; << 2275 << 2276 SYSCALL_DEFINE6(io_pgetevents, << 2277 aio_context_t, ctx_id, << 2278 long, min_nr, << 2279 long, nr, << 2280 struct io_event __user *, eve << 2281 struct __kernel_timespec __us << 2282 const struct __aio_sigset __u << 2283 { << 2284 struct __aio_sigset ksig = { NULL << 2285 struct timespec64 ts; << 2286 bool interrupted; << 2287 int ret; << 2288 << 2289 if (timeout && unlikely(get_timespec6 << 2290 return -EFAULT; << 2291 << 2292 if (usig && copy_from_user(&ksig, usi << 2293 return -EFAULT; << 2294 << 2295 ret = set_user_sigmask(ksig.sigmask, << 2296 if (ret) << 2297 return ret; << 2298 << 2299 ret = do_io_getevents(ctx_id, min_nr, << 2300 << 2301 interrupted = signal_pending(current) << 2302 restore_saved_sigmask_unless(interrup << 2303 if (interrupted && !ret) << 2304 ret = -ERESTARTNOHAND; << 2305 << 2306 return ret; << 2307 } << 2308 << 2309 #if defined(CONFIG_COMPAT_32BIT_TIME) && !def << 2310 << 2311 SYSCALL_DEFINE6(io_pgetevents_time32, << 2312 aio_context_t, ctx_id, << 2313 long, min_nr, << 2314 long, nr, << 2315 struct io_event __user *, eve << 2316 struct old_timespec32 __user << 2317 const struct __aio_sigset __u << 2318 { 1314 { 2319 struct __aio_sigset ksig = { NULL !! 1315 struct kioctx *ioctx = lookup_ioctx(ctx_id); 2320 struct timespec64 ts; !! 1316 long ret = -EINVAL; 2321 bool interrupted; << 2322 int ret; << 2323 << 2324 if (timeout && unlikely(get_old_times << 2325 return -EFAULT; << 2326 << 2327 if (usig && copy_from_user(&ksig, usi << 2328 return -EFAULT; << 2329 << 2330 << 2331 ret = set_user_sigmask(ksig.sigmask, << 2332 if (ret) << 2333 return ret; << 2334 << 2335 ret = do_io_getevents(ctx_id, min_nr, << 2336 << 2337 interrupted = signal_pending(current) << 2338 restore_saved_sigmask_unless(interrup << 2339 if (interrupted && !ret) << 2340 ret = -ERESTARTNOHAND; << 2341 << 2342 return ret; << 2343 } << 2344 << 2345 #endif << 2346 << 2347 #if defined(CONFIG_COMPAT_32BIT_TIME) << 2348 << 2349 SYSCALL_DEFINE5(io_getevents_time32, __u32, c << 2350 __s32, min_nr, << 2351 __s32, nr, << 2352 struct io_event __user *, eve << 2353 struct old_timespec32 __user << 2354 { << 2355 struct timespec64 t; << 2356 int ret; << 2357 << 2358 if (timeout && get_old_timespec32(&t, << 2359 return -EFAULT; << 2360 << 2361 ret = do_io_getevents(ctx_id, min_nr, << 2362 if (!ret && signal_pending(current)) << 2363 ret = -EINTR; << 2364 return ret; << 2365 } << 2366 << 2367 #endif << 2368 << 2369 #ifdef CONFIG_COMPAT << 2370 << 2371 struct __compat_aio_sigset { << 2372 compat_uptr_t sigmask; << 2373 compat_size_t sigsetsize; << 2374 }; << 2375 << 2376 #if defined(CONFIG_COMPAT_32BIT_TIME) << 2377 << 2378 COMPAT_SYSCALL_DEFINE6(io_pgetevents, << 2379 compat_aio_context_t, ctx_id, << 2380 compat_long_t, min_nr, << 2381 compat_long_t, nr, << 2382 struct io_event __user *, eve << 2383 struct old_timespec32 __user << 2384 const struct __compat_aio_sig << 2385 { << 2386 struct __compat_aio_sigset ksig = { 0 << 2387 struct timespec64 t; << 2388 bool interrupted; << 2389 int ret; << 2390 << 2391 if (timeout && get_old_timespec32(&t, << 2392 return -EFAULT; << 2393 << 2394 if (usig && copy_from_user(&ksig, usi << 2395 return -EFAULT; << 2396 << 2397 ret = set_compat_user_sigmask(compat_ << 2398 if (ret) << 2399 return ret; << 2400 << 2401 ret = do_io_getevents(ctx_id, min_nr, << 2402 << 2403 interrupted = signal_pending(current) << 2404 restore_saved_sigmask_unless(interrup << 2405 if (interrupted && !ret) << 2406 ret = -ERESTARTNOHAND; << 2407 << 2408 return ret; << 2409 } << 2410 << 2411 #endif << 2412 << 2413 COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, << 2414 compat_aio_context_t, ctx_id, << 2415 compat_long_t, min_nr, << 2416 compat_long_t, nr, << 2417 struct io_event __user *, eve << 2418 struct __kernel_timespec __us << 2419 const struct __compat_aio_sig << 2420 { << 2421 struct __compat_aio_sigset ksig = { 0 << 2422 struct timespec64 t; << 2423 bool interrupted; << 2424 int ret; << 2425 << 2426 if (timeout && get_timespec64(&t, tim << 2427 return -EFAULT; << 2428 << 2429 if (usig && copy_from_user(&ksig, usi << 2430 return -EFAULT; << 2431 << 2432 ret = set_compat_user_sigmask(compat_ << 2433 if (ret) << 2434 return ret; << 2435 << 2436 ret = do_io_getevents(ctx_id, min_nr, << 2437 << 2438 interrupted = signal_pending(current) << 2439 restore_saved_sigmask_unless(interrup << 2440 if (interrupted && !ret) << 2441 ret = -ERESTARTNOHAND; << 2442 1317 >> 1318 if (likely(ioctx)) { >> 1319 if (likely(min_nr <= nr && min_nr >= 0)) >> 1320 ret = read_events(ioctx, min_nr, nr, events, timeout); >> 1321 put_ioctx(ioctx); >> 1322 } 2443 return ret; 1323 return ret; 2444 } 1324 } 2445 #endif << 2446 1325
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.