1 /* 1 /* 2 * An async IO implementation for Linux 2 * An async IO implementation for Linux 3 * Written by Benjamin LaHaise <bcrl@kvac 3 * Written by Benjamin LaHaise <bcrl@kvack.org> 4 * 4 * 5 * Implements an efficient asynchronous i 5 * Implements an efficient asynchronous io interface. 6 * 6 * 7 * Copyright 2000, 2001, 2002 Red Hat, In 7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. 8 * Copyright 2018 Christoph Hellwig. 8 * Copyright 2018 Christoph Hellwig. 9 * 9 * 10 * See ../COPYING for licensing terms. 10 * See ../COPYING for licensing terms. 11 */ 11 */ 12 #define pr_fmt(fmt) "%s: " fmt, __func__ 12 #define pr_fmt(fmt) "%s: " fmt, __func__ 13 13 14 #include <linux/kernel.h> 14 #include <linux/kernel.h> 15 #include <linux/init.h> 15 #include <linux/init.h> 16 #include <linux/errno.h> 16 #include <linux/errno.h> 17 #include <linux/time.h> 17 #include <linux/time.h> 18 #include <linux/aio_abi.h> 18 #include <linux/aio_abi.h> 19 #include <linux/export.h> 19 #include <linux/export.h> 20 #include <linux/syscalls.h> 20 #include <linux/syscalls.h> 21 #include <linux/backing-dev.h> 21 #include <linux/backing-dev.h> 22 #include <linux/refcount.h> 22 #include <linux/refcount.h> 23 #include <linux/uio.h> 23 #include <linux/uio.h> 24 24 25 #include <linux/sched/signal.h> 25 #include <linux/sched/signal.h> 26 #include <linux/fs.h> 26 #include <linux/fs.h> 27 #include <linux/file.h> 27 #include <linux/file.h> 28 #include <linux/mm.h> 28 #include <linux/mm.h> 29 #include <linux/mman.h> 29 #include <linux/mman.h> 30 #include <linux/percpu.h> 30 #include <linux/percpu.h> 31 #include <linux/slab.h> 31 #include <linux/slab.h> 32 #include <linux/timer.h> 32 #include <linux/timer.h> 33 #include <linux/aio.h> 33 #include <linux/aio.h> 34 #include <linux/highmem.h> 34 #include <linux/highmem.h> 35 #include <linux/workqueue.h> 35 #include <linux/workqueue.h> 36 #include <linux/security.h> 36 #include <linux/security.h> 37 #include <linux/eventfd.h> 37 #include <linux/eventfd.h> 38 #include <linux/blkdev.h> 38 #include <linux/blkdev.h> 39 #include <linux/compat.h> 39 #include <linux/compat.h> 40 #include <linux/migrate.h> 40 #include <linux/migrate.h> 41 #include <linux/ramfs.h> 41 #include <linux/ramfs.h> 42 #include <linux/percpu-refcount.h> 42 #include <linux/percpu-refcount.h> 43 #include <linux/mount.h> 43 #include <linux/mount.h> 44 #include <linux/pseudo_fs.h> 44 #include <linux/pseudo_fs.h> 45 45 46 #include <linux/uaccess.h> 46 #include <linux/uaccess.h> 47 #include <linux/nospec.h> 47 #include <linux/nospec.h> 48 48 49 #include "internal.h" 49 #include "internal.h" 50 50 51 #define KIOCB_KEY 0 51 #define KIOCB_KEY 0 52 52 53 #define AIO_RING_MAGIC 0xa10a 53 #define AIO_RING_MAGIC 0xa10a10a1 54 #define AIO_RING_COMPAT_FEATURES 1 54 #define AIO_RING_COMPAT_FEATURES 1 55 #define AIO_RING_INCOMPAT_FEATURES 0 55 #define AIO_RING_INCOMPAT_FEATURES 0 56 struct aio_ring { 56 struct aio_ring { 57 unsigned id; /* kernel inte 57 unsigned id; /* kernel internal index number */ 58 unsigned nr; /* number of i 58 unsigned nr; /* number of io_events */ 59 unsigned head; /* Written to 59 unsigned head; /* Written to by userland or under ring_lock 60 * mutex by ai 60 * mutex by aio_read_events_ring(). */ 61 unsigned tail; 61 unsigned tail; 62 62 63 unsigned magic; 63 unsigned magic; 64 unsigned compat_features; 64 unsigned compat_features; 65 unsigned incompat_features; 65 unsigned incompat_features; 66 unsigned header_length; /* siz 66 unsigned header_length; /* size of aio_ring */ 67 67 68 68 69 struct io_event io_events[]; 69 struct io_event io_events[]; 70 }; /* 128 bytes + ring size */ 70 }; /* 128 bytes + ring size */ 71 71 72 /* 72 /* 73 * Plugging is meant to work with larger batch 73 * Plugging is meant to work with larger batches of IOs. If we don't 74 * have more than the below, then don't bother 74 * have more than the below, then don't bother setting up a plug. 75 */ 75 */ 76 #define AIO_PLUG_THRESHOLD 2 76 #define AIO_PLUG_THRESHOLD 2 77 77 78 #define AIO_RING_PAGES 8 78 #define AIO_RING_PAGES 8 79 79 80 struct kioctx_table { 80 struct kioctx_table { 81 struct rcu_head rcu; 81 struct rcu_head rcu; 82 unsigned nr; 82 unsigned nr; 83 struct kioctx __rcu *table[] __cou !! 83 struct kioctx __rcu *table[]; 84 }; 84 }; 85 85 86 struct kioctx_cpu { 86 struct kioctx_cpu { 87 unsigned reqs_available 87 unsigned reqs_available; 88 }; 88 }; 89 89 90 struct ctx_rq_wait { 90 struct ctx_rq_wait { 91 struct completion comp; 91 struct completion comp; 92 atomic_t count; 92 atomic_t count; 93 }; 93 }; 94 94 95 struct kioctx { 95 struct kioctx { 96 struct percpu_ref users; 96 struct percpu_ref users; 97 atomic_t dead; 97 atomic_t dead; 98 98 99 struct percpu_ref reqs; 99 struct percpu_ref reqs; 100 100 101 unsigned long user_id; 101 unsigned long user_id; 102 102 103 struct kioctx_cpu __percpu *cpu; !! 103 struct __percpu kioctx_cpu *cpu; 104 104 105 /* 105 /* 106 * For percpu reqs_available, number o 106 * For percpu reqs_available, number of slots we move to/from global 107 * counter at a time: 107 * counter at a time: 108 */ 108 */ 109 unsigned req_batch; 109 unsigned req_batch; 110 /* 110 /* 111 * This is what userspace passed to io 111 * This is what userspace passed to io_setup(), it's not used for 112 * anything but counting against the g 112 * anything but counting against the global max_reqs quota. 113 * 113 * 114 * The real limit is nr_events - 1, wh 114 * The real limit is nr_events - 1, which will be larger (see 115 * aio_setup_ring()) 115 * aio_setup_ring()) 116 */ 116 */ 117 unsigned max_reqs; 117 unsigned max_reqs; 118 118 119 /* Size of ringbuffer, in units of str 119 /* Size of ringbuffer, in units of struct io_event */ 120 unsigned nr_events; 120 unsigned nr_events; 121 121 122 unsigned long mmap_base; 122 unsigned long mmap_base; 123 unsigned long mmap_size; 123 unsigned long mmap_size; 124 124 125 struct folio **ring_folios; !! 125 struct page **ring_pages; 126 long nr_pages; 126 long nr_pages; 127 127 128 struct rcu_work free_rwork; 128 struct rcu_work free_rwork; /* see free_ioctx() */ 129 129 130 /* 130 /* 131 * signals when all in-flight requests 131 * signals when all in-flight requests are done 132 */ 132 */ 133 struct ctx_rq_wait *rq_wait; 133 struct ctx_rq_wait *rq_wait; 134 134 135 struct { 135 struct { 136 /* 136 /* 137 * This counts the number of a 137 * This counts the number of available slots in the ringbuffer, 138 * so we avoid overflowing it: 138 * so we avoid overflowing it: it's decremented (if positive) 139 * when allocating a kiocb and 139 * when allocating a kiocb and incremented when the resulting 140 * io_event is pulled off the 140 * io_event is pulled off the ringbuffer. 141 * 141 * 142 * We batch accesses to it wit 142 * We batch accesses to it with a percpu version. 143 */ 143 */ 144 atomic_t reqs_available 144 atomic_t reqs_available; 145 } ____cacheline_aligned_in_smp; 145 } ____cacheline_aligned_in_smp; 146 146 147 struct { 147 struct { 148 spinlock_t ctx_lock; 148 spinlock_t ctx_lock; 149 struct list_head active_reqs; 149 struct list_head active_reqs; /* used for cancellation */ 150 } ____cacheline_aligned_in_smp; 150 } ____cacheline_aligned_in_smp; 151 151 152 struct { 152 struct { 153 struct mutex ring_lock; 153 struct mutex ring_lock; 154 wait_queue_head_t wait; 154 wait_queue_head_t wait; 155 } ____cacheline_aligned_in_smp; 155 } ____cacheline_aligned_in_smp; 156 156 157 struct { 157 struct { 158 unsigned tail; 158 unsigned tail; 159 unsigned completed_even 159 unsigned completed_events; 160 spinlock_t completion_loc 160 spinlock_t completion_lock; 161 } ____cacheline_aligned_in_smp; 161 } ____cacheline_aligned_in_smp; 162 162 163 struct folio *internal_foli !! 163 struct page *internal_pages[AIO_RING_PAGES]; 164 struct file *aio_ring_file 164 struct file *aio_ring_file; 165 165 166 unsigned id; 166 unsigned id; 167 }; 167 }; 168 168 169 /* 169 /* 170 * First field must be the file pointer in all 170 * First field must be the file pointer in all the 171 * iocb unions! See also 'struct kiocb' in <li 171 * iocb unions! See also 'struct kiocb' in <linux/fs.h> 172 */ 172 */ 173 struct fsync_iocb { 173 struct fsync_iocb { 174 struct file *file; 174 struct file *file; 175 struct work_struct work; 175 struct work_struct work; 176 bool datasync; 176 bool datasync; 177 struct cred *creds; 177 struct cred *creds; 178 }; 178 }; 179 179 180 struct poll_iocb { 180 struct poll_iocb { 181 struct file *file; 181 struct file *file; 182 struct wait_queue_head *head; 182 struct wait_queue_head *head; 183 __poll_t events; 183 __poll_t events; 184 bool cancelled; 184 bool cancelled; 185 bool work_scheduled 185 bool work_scheduled; 186 bool work_need_resc 186 bool work_need_resched; 187 struct wait_queue_entry wait; 187 struct wait_queue_entry wait; 188 struct work_struct work; 188 struct work_struct work; 189 }; 189 }; 190 190 191 /* 191 /* 192 * NOTE! Each of the iocb union members has th 192 * NOTE! Each of the iocb union members has the file pointer 193 * as the first entry in their struct definiti 193 * as the first entry in their struct definition. So you can 194 * access the file pointer through any of the 194 * access the file pointer through any of the sub-structs, 195 * or directly as just 'ki_filp' in this struc 195 * or directly as just 'ki_filp' in this struct. 196 */ 196 */ 197 struct aio_kiocb { 197 struct aio_kiocb { 198 union { 198 union { 199 struct file *ki_fi 199 struct file *ki_filp; 200 struct kiocb rw; 200 struct kiocb rw; 201 struct fsync_iocb fsync; 201 struct fsync_iocb fsync; 202 struct poll_iocb poll; 202 struct poll_iocb poll; 203 }; 203 }; 204 204 205 struct kioctx *ki_ctx; 205 struct kioctx *ki_ctx; 206 kiocb_cancel_fn *ki_cancel; 206 kiocb_cancel_fn *ki_cancel; 207 207 208 struct io_event ki_res; 208 struct io_event ki_res; 209 209 210 struct list_head ki_list; 210 struct list_head ki_list; /* the aio core uses this 211 211 * for cancellation */ 212 refcount_t ki_refcnt; 212 refcount_t ki_refcnt; 213 213 214 /* 214 /* 215 * If the aio_resfd field of the users 215 * If the aio_resfd field of the userspace iocb is not zero, 216 * this is the underlying eventfd cont 216 * this is the underlying eventfd context to deliver events to. 217 */ 217 */ 218 struct eventfd_ctx *ki_eventfd; 218 struct eventfd_ctx *ki_eventfd; 219 }; 219 }; 220 220 221 /*------ sysctl variables----*/ 221 /*------ sysctl variables----*/ 222 static DEFINE_SPINLOCK(aio_nr_lock); 222 static DEFINE_SPINLOCK(aio_nr_lock); 223 static unsigned long aio_nr; /* cur 223 static unsigned long aio_nr; /* current system wide number of aio requests */ 224 static unsigned long aio_max_nr = 0x10000; /* 224 static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 225 /*----end sysctl variables---*/ 225 /*----end sysctl variables---*/ 226 #ifdef CONFIG_SYSCTL 226 #ifdef CONFIG_SYSCTL 227 static struct ctl_table aio_sysctls[] = { 227 static struct ctl_table aio_sysctls[] = { 228 { 228 { 229 .procname = "aio-nr", 229 .procname = "aio-nr", 230 .data = &aio_nr, 230 .data = &aio_nr, 231 .maxlen = sizeof(aio_n 231 .maxlen = sizeof(aio_nr), 232 .mode = 0444, 232 .mode = 0444, 233 .proc_handler = proc_doulong 233 .proc_handler = proc_doulongvec_minmax, 234 }, 234 }, 235 { 235 { 236 .procname = "aio-max-nr" 236 .procname = "aio-max-nr", 237 .data = &aio_max_nr, 237 .data = &aio_max_nr, 238 .maxlen = sizeof(aio_m 238 .maxlen = sizeof(aio_max_nr), 239 .mode = 0644, 239 .mode = 0644, 240 .proc_handler = proc_doulong 240 .proc_handler = proc_doulongvec_minmax, 241 }, 241 }, >> 242 {} 242 }; 243 }; 243 244 244 static void __init aio_sysctl_init(void) 245 static void __init aio_sysctl_init(void) 245 { 246 { 246 register_sysctl_init("fs", aio_sysctls 247 register_sysctl_init("fs", aio_sysctls); 247 } 248 } 248 #else 249 #else 249 #define aio_sysctl_init() do { } while (0) 250 #define aio_sysctl_init() do { } while (0) 250 #endif 251 #endif 251 252 252 static struct kmem_cache *kiocb_cachep; 253 static struct kmem_cache *kiocb_cachep; 253 static struct kmem_cache *kioctx_cachep 254 static struct kmem_cache *kioctx_cachep; 254 255 255 static struct vfsmount *aio_mnt; 256 static struct vfsmount *aio_mnt; 256 257 257 static const struct file_operations aio_ring_f 258 static const struct file_operations aio_ring_fops; 258 static const struct address_space_operations a 259 static const struct address_space_operations aio_ctx_aops; 259 260 260 static struct file *aio_private_file(struct ki 261 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) 261 { 262 { 262 struct file *file; 263 struct file *file; 263 struct inode *inode = alloc_anon_inode 264 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); 264 if (IS_ERR(inode)) 265 if (IS_ERR(inode)) 265 return ERR_CAST(inode); 266 return ERR_CAST(inode); 266 267 267 inode->i_mapping->a_ops = &aio_ctx_aop 268 inode->i_mapping->a_ops = &aio_ctx_aops; 268 inode->i_mapping->i_private_data = ctx !! 269 inode->i_mapping->private_data = ctx; 269 inode->i_size = PAGE_SIZE * nr_pages; 270 inode->i_size = PAGE_SIZE * nr_pages; 270 271 271 file = alloc_file_pseudo(inode, aio_mn 272 file = alloc_file_pseudo(inode, aio_mnt, "[aio]", 272 O_RDWR, &aio_r 273 O_RDWR, &aio_ring_fops); 273 if (IS_ERR(file)) 274 if (IS_ERR(file)) 274 iput(inode); 275 iput(inode); 275 return file; 276 return file; 276 } 277 } 277 278 278 static int aio_init_fs_context(struct fs_conte 279 static int aio_init_fs_context(struct fs_context *fc) 279 { 280 { 280 if (!init_pseudo(fc, AIO_RING_MAGIC)) 281 if (!init_pseudo(fc, AIO_RING_MAGIC)) 281 return -ENOMEM; 282 return -ENOMEM; 282 fc->s_iflags |= SB_I_NOEXEC; 283 fc->s_iflags |= SB_I_NOEXEC; 283 return 0; 284 return 0; 284 } 285 } 285 286 286 /* aio_setup 287 /* aio_setup 287 * Creates the slab caches used by the ai 288 * Creates the slab caches used by the aio routines, panic on 288 * failure as this is done early during t 289 * failure as this is done early during the boot sequence. 289 */ 290 */ 290 static int __init aio_setup(void) 291 static int __init aio_setup(void) 291 { 292 { 292 static struct file_system_type aio_fs 293 static struct file_system_type aio_fs = { 293 .name = "aio", 294 .name = "aio", 294 .init_fs_context = aio_init_fs 295 .init_fs_context = aio_init_fs_context, 295 .kill_sb = kill_anon_su 296 .kill_sb = kill_anon_super, 296 }; 297 }; 297 aio_mnt = kern_mount(&aio_fs); 298 aio_mnt = kern_mount(&aio_fs); 298 if (IS_ERR(aio_mnt)) 299 if (IS_ERR(aio_mnt)) 299 panic("Failed to create aio fs 300 panic("Failed to create aio fs mount."); 300 301 301 kiocb_cachep = KMEM_CACHE(aio_kiocb, S 302 kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 302 kioctx_cachep = KMEM_CACHE(kioctx,SLAB 303 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 303 aio_sysctl_init(); 304 aio_sysctl_init(); 304 return 0; 305 return 0; 305 } 306 } 306 __initcall(aio_setup); 307 __initcall(aio_setup); 307 308 308 static void put_aio_ring_file(struct kioctx *c 309 static void put_aio_ring_file(struct kioctx *ctx) 309 { 310 { 310 struct file *aio_ring_file = ctx->aio_ 311 struct file *aio_ring_file = ctx->aio_ring_file; 311 struct address_space *i_mapping; 312 struct address_space *i_mapping; 312 313 313 if (aio_ring_file) { 314 if (aio_ring_file) { 314 truncate_setsize(file_inode(ai 315 truncate_setsize(file_inode(aio_ring_file), 0); 315 316 316 /* Prevent further access to t 317 /* Prevent further access to the kioctx from migratepages */ 317 i_mapping = aio_ring_file->f_m 318 i_mapping = aio_ring_file->f_mapping; 318 spin_lock(&i_mapping->i_privat !! 319 spin_lock(&i_mapping->private_lock); 319 i_mapping->i_private_data = NU !! 320 i_mapping->private_data = NULL; 320 ctx->aio_ring_file = NULL; 321 ctx->aio_ring_file = NULL; 321 spin_unlock(&i_mapping->i_priv !! 322 spin_unlock(&i_mapping->private_lock); 322 323 323 fput(aio_ring_file); 324 fput(aio_ring_file); 324 } 325 } 325 } 326 } 326 327 327 static void aio_free_ring(struct kioctx *ctx) 328 static void aio_free_ring(struct kioctx *ctx) 328 { 329 { 329 int i; 330 int i; 330 331 331 /* Disconnect the kiotx from the ring 332 /* Disconnect the kiotx from the ring file. This prevents future 332 * accesses to the kioctx from page mi 333 * accesses to the kioctx from page migration. 333 */ 334 */ 334 put_aio_ring_file(ctx); 335 put_aio_ring_file(ctx); 335 336 336 for (i = 0; i < ctx->nr_pages; i++) { 337 for (i = 0; i < ctx->nr_pages; i++) { 337 struct folio *folio = ctx->rin !! 338 struct page *page; 338 !! 339 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 339 if (!folio) !! 340 page_count(ctx->ring_pages[i])); >> 341 page = ctx->ring_pages[i]; >> 342 if (!page) 340 continue; 343 continue; 341 !! 344 ctx->ring_pages[i] = NULL; 342 pr_debug("pid(%d) [%d] folio-> !! 345 put_page(page); 343 folio_ref_count(folio << 344 ctx->ring_folios[i] = NULL; << 345 folio_put(folio); << 346 } 346 } 347 347 348 if (ctx->ring_folios && ctx->ring_foli !! 348 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { 349 kfree(ctx->ring_folios); !! 349 kfree(ctx->ring_pages); 350 ctx->ring_folios = NULL; !! 350 ctx->ring_pages = NULL; 351 } 351 } 352 } 352 } 353 353 354 static int aio_ring_mremap(struct vm_area_stru 354 static int aio_ring_mremap(struct vm_area_struct *vma) 355 { 355 { 356 struct file *file = vma->vm_file; 356 struct file *file = vma->vm_file; 357 struct mm_struct *mm = vma->vm_mm; 357 struct mm_struct *mm = vma->vm_mm; 358 struct kioctx_table *table; 358 struct kioctx_table *table; 359 int i, res = -EINVAL; 359 int i, res = -EINVAL; 360 360 361 spin_lock(&mm->ioctx_lock); 361 spin_lock(&mm->ioctx_lock); 362 rcu_read_lock(); 362 rcu_read_lock(); 363 table = rcu_dereference(mm->ioctx_tabl 363 table = rcu_dereference(mm->ioctx_table); 364 if (!table) 364 if (!table) 365 goto out_unlock; 365 goto out_unlock; 366 366 367 for (i = 0; i < table->nr; i++) { 367 for (i = 0; i < table->nr; i++) { 368 struct kioctx *ctx; 368 struct kioctx *ctx; 369 369 370 ctx = rcu_dereference(table->t 370 ctx = rcu_dereference(table->table[i]); 371 if (ctx && ctx->aio_ring_file 371 if (ctx && ctx->aio_ring_file == file) { 372 if (!atomic_read(&ctx- 372 if (!atomic_read(&ctx->dead)) { 373 ctx->user_id = 373 ctx->user_id = ctx->mmap_base = vma->vm_start; 374 res = 0; 374 res = 0; 375 } 375 } 376 break; 376 break; 377 } 377 } 378 } 378 } 379 379 380 out_unlock: 380 out_unlock: 381 rcu_read_unlock(); 381 rcu_read_unlock(); 382 spin_unlock(&mm->ioctx_lock); 382 spin_unlock(&mm->ioctx_lock); 383 return res; 383 return res; 384 } 384 } 385 385 386 static const struct vm_operations_struct aio_r 386 static const struct vm_operations_struct aio_ring_vm_ops = { 387 .mremap = aio_ring_mremap, 387 .mremap = aio_ring_mremap, 388 #if IS_ENABLED(CONFIG_MMU) 388 #if IS_ENABLED(CONFIG_MMU) 389 .fault = filemap_fault, 389 .fault = filemap_fault, 390 .map_pages = filemap_map_pages, 390 .map_pages = filemap_map_pages, 391 .page_mkwrite = filemap_page_mkwrite 391 .page_mkwrite = filemap_page_mkwrite, 392 #endif 392 #endif 393 }; 393 }; 394 394 395 static int aio_ring_mmap(struct file *file, st 395 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) 396 { 396 { 397 vm_flags_set(vma, VM_DONTEXPAND); !! 397 vma->vm_flags |= VM_DONTEXPAND; 398 vma->vm_ops = &aio_ring_vm_ops; 398 vma->vm_ops = &aio_ring_vm_ops; 399 return 0; 399 return 0; 400 } 400 } 401 401 402 static const struct file_operations aio_ring_f 402 static const struct file_operations aio_ring_fops = { 403 .mmap = aio_ring_mmap, 403 .mmap = aio_ring_mmap, 404 }; 404 }; 405 405 406 #if IS_ENABLED(CONFIG_MIGRATION) 406 #if IS_ENABLED(CONFIG_MIGRATION) 407 static int aio_migrate_folio(struct address_sp 407 static int aio_migrate_folio(struct address_space *mapping, struct folio *dst, 408 struct folio *src, enu 408 struct folio *src, enum migrate_mode mode) 409 { 409 { 410 struct kioctx *ctx; 410 struct kioctx *ctx; 411 unsigned long flags; 411 unsigned long flags; 412 pgoff_t idx; 412 pgoff_t idx; 413 int rc = 0; !! 413 int rc; 414 414 415 /* mapping->i_private_lock here protec !! 415 /* 416 spin_lock(&mapping->i_private_lock); !! 416 * We cannot support the _NO_COPY case here, because copy needs to 417 ctx = mapping->i_private_data; !! 417 * happen under the ctx->completion_lock. That does not work with the >> 418 * migration workflow of MIGRATE_SYNC_NO_COPY. >> 419 */ >> 420 if (mode == MIGRATE_SYNC_NO_COPY) >> 421 return -EINVAL; >> 422 >> 423 rc = 0; >> 424 >> 425 /* mapping->private_lock here protects against the kioctx teardown. */ >> 426 spin_lock(&mapping->private_lock); >> 427 ctx = mapping->private_data; 418 if (!ctx) { 428 if (!ctx) { 419 rc = -EINVAL; 429 rc = -EINVAL; 420 goto out; 430 goto out; 421 } 431 } 422 432 423 /* The ring_lock mutex. The prevents 433 /* The ring_lock mutex. The prevents aio_read_events() from writing 424 * to the ring's head, and prevents pa 434 * to the ring's head, and prevents page migration from mucking in 425 * a partially initialized kiotx. 435 * a partially initialized kiotx. 426 */ 436 */ 427 if (!mutex_trylock(&ctx->ring_lock)) { 437 if (!mutex_trylock(&ctx->ring_lock)) { 428 rc = -EAGAIN; 438 rc = -EAGAIN; 429 goto out; 439 goto out; 430 } 440 } 431 441 432 idx = src->index; 442 idx = src->index; 433 if (idx < (pgoff_t)ctx->nr_pages) { 443 if (idx < (pgoff_t)ctx->nr_pages) { 434 /* Make sure the old folio has 444 /* Make sure the old folio hasn't already been changed */ 435 if (ctx->ring_folios[idx] != s !! 445 if (ctx->ring_pages[idx] != &src->page) 436 rc = -EAGAIN; 446 rc = -EAGAIN; 437 } else 447 } else 438 rc = -EINVAL; 448 rc = -EINVAL; 439 449 440 if (rc != 0) 450 if (rc != 0) 441 goto out_unlock; 451 goto out_unlock; 442 452 443 /* Writeback must be complete */ 453 /* Writeback must be complete */ 444 BUG_ON(folio_test_writeback(src)); 454 BUG_ON(folio_test_writeback(src)); 445 folio_get(dst); 455 folio_get(dst); 446 456 447 rc = folio_migrate_mapping(mapping, ds 457 rc = folio_migrate_mapping(mapping, dst, src, 1); 448 if (rc != MIGRATEPAGE_SUCCESS) { 458 if (rc != MIGRATEPAGE_SUCCESS) { 449 folio_put(dst); 459 folio_put(dst); 450 goto out_unlock; 460 goto out_unlock; 451 } 461 } 452 462 453 /* Take completion_lock to prevent oth 463 /* Take completion_lock to prevent other writes to the ring buffer 454 * while the old folio is copied to th 464 * while the old folio is copied to the new. This prevents new 455 * events from being lost. 465 * events from being lost. 456 */ 466 */ 457 spin_lock_irqsave(&ctx->completion_loc 467 spin_lock_irqsave(&ctx->completion_lock, flags); 458 folio_copy(dst, src); !! 468 folio_migrate_copy(dst, src); 459 folio_migrate_flags(dst, src); !! 469 BUG_ON(ctx->ring_pages[idx] != &src->page); 460 BUG_ON(ctx->ring_folios[idx] != src); !! 470 ctx->ring_pages[idx] = &dst->page; 461 ctx->ring_folios[idx] = dst; << 462 spin_unlock_irqrestore(&ctx->completio 471 spin_unlock_irqrestore(&ctx->completion_lock, flags); 463 472 464 /* The old folio is no longer accessib 473 /* The old folio is no longer accessible. */ 465 folio_put(src); 474 folio_put(src); 466 475 467 out_unlock: 476 out_unlock: 468 mutex_unlock(&ctx->ring_lock); 477 mutex_unlock(&ctx->ring_lock); 469 out: 478 out: 470 spin_unlock(&mapping->i_private_lock); !! 479 spin_unlock(&mapping->private_lock); 471 return rc; 480 return rc; 472 } 481 } 473 #else 482 #else 474 #define aio_migrate_folio NULL 483 #define aio_migrate_folio NULL 475 #endif 484 #endif 476 485 477 static const struct address_space_operations a 486 static const struct address_space_operations aio_ctx_aops = { 478 .dirty_folio = noop_dirty_folio, 487 .dirty_folio = noop_dirty_folio, 479 .migrate_folio = aio_migrate_folio, 488 .migrate_folio = aio_migrate_folio, 480 }; 489 }; 481 490 482 static int aio_setup_ring(struct kioctx *ctx, 491 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) 483 { 492 { 484 struct aio_ring *ring; 493 struct aio_ring *ring; 485 struct mm_struct *mm = current->mm; 494 struct mm_struct *mm = current->mm; 486 unsigned long size, unused; 495 unsigned long size, unused; 487 int nr_pages; 496 int nr_pages; 488 int i; 497 int i; 489 struct file *file; 498 struct file *file; 490 499 491 /* Compensate for the ring buffer's he 500 /* Compensate for the ring buffer's head/tail overlap entry */ 492 nr_events += 2; /* 1 is required, 2 fo 501 nr_events += 2; /* 1 is required, 2 for good luck */ 493 502 494 size = sizeof(struct aio_ring); 503 size = sizeof(struct aio_ring); 495 size += sizeof(struct io_event) * nr_e 504 size += sizeof(struct io_event) * nr_events; 496 505 497 nr_pages = PFN_UP(size); 506 nr_pages = PFN_UP(size); 498 if (nr_pages < 0) 507 if (nr_pages < 0) 499 return -EINVAL; 508 return -EINVAL; 500 509 501 file = aio_private_file(ctx, nr_pages) 510 file = aio_private_file(ctx, nr_pages); 502 if (IS_ERR(file)) { 511 if (IS_ERR(file)) { 503 ctx->aio_ring_file = NULL; 512 ctx->aio_ring_file = NULL; 504 return -ENOMEM; 513 return -ENOMEM; 505 } 514 } 506 515 507 ctx->aio_ring_file = file; 516 ctx->aio_ring_file = file; 508 nr_events = (PAGE_SIZE * nr_pages - si 517 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) 509 / sizeof(struct io_eve 518 / sizeof(struct io_event); 510 519 511 ctx->ring_folios = ctx->internal_folio !! 520 ctx->ring_pages = ctx->internal_pages; 512 if (nr_pages > AIO_RING_PAGES) { 521 if (nr_pages > AIO_RING_PAGES) { 513 ctx->ring_folios = kcalloc(nr_ !! 522 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), 514 GFP !! 523 GFP_KERNEL); 515 if (!ctx->ring_folios) { !! 524 if (!ctx->ring_pages) { 516 put_aio_ring_file(ctx) 525 put_aio_ring_file(ctx); 517 return -ENOMEM; 526 return -ENOMEM; 518 } 527 } 519 } 528 } 520 529 521 for (i = 0; i < nr_pages; i++) { 530 for (i = 0; i < nr_pages; i++) { 522 struct folio *folio; !! 531 struct page *page; 523 !! 532 page = find_or_create_page(file->f_mapping, 524 folio = __filemap_get_folio(fi !! 533 i, GFP_HIGHUSER | __GFP_ZERO); 525 FG !! 534 if (!page) 526 GF << 527 if (IS_ERR(folio)) << 528 break; 535 break; >> 536 pr_debug("pid(%d) page[%d]->count=%d\n", >> 537 current->pid, i, page_count(page)); >> 538 SetPageUptodate(page); >> 539 unlock_page(page); 529 540 530 pr_debug("pid(%d) [%d] folio-> !! 541 ctx->ring_pages[i] = page; 531 folio_ref_count(folio << 532 folio_end_read(folio, true); << 533 << 534 ctx->ring_folios[i] = folio; << 535 } 542 } 536 ctx->nr_pages = i; 543 ctx->nr_pages = i; 537 544 538 if (unlikely(i != nr_pages)) { 545 if (unlikely(i != nr_pages)) { 539 aio_free_ring(ctx); 546 aio_free_ring(ctx); 540 return -ENOMEM; 547 return -ENOMEM; 541 } 548 } 542 549 543 ctx->mmap_size = nr_pages * PAGE_SIZE; 550 ctx->mmap_size = nr_pages * PAGE_SIZE; 544 pr_debug("attempting mmap of %lu bytes 551 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); 545 552 546 if (mmap_write_lock_killable(mm)) { 553 if (mmap_write_lock_killable(mm)) { 547 ctx->mmap_size = 0; 554 ctx->mmap_size = 0; 548 aio_free_ring(ctx); 555 aio_free_ring(ctx); 549 return -EINTR; 556 return -EINTR; 550 } 557 } 551 558 552 ctx->mmap_base = do_mmap(ctx->aio_ring 559 ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size, 553 PROT_READ | P 560 PROT_READ | PROT_WRITE, 554 MAP_SHARED, 0 !! 561 MAP_SHARED, 0, &unused, NULL); 555 mmap_write_unlock(mm); 562 mmap_write_unlock(mm); 556 if (IS_ERR((void *)ctx->mmap_base)) { 563 if (IS_ERR((void *)ctx->mmap_base)) { 557 ctx->mmap_size = 0; 564 ctx->mmap_size = 0; 558 aio_free_ring(ctx); 565 aio_free_ring(ctx); 559 return -ENOMEM; 566 return -ENOMEM; 560 } 567 } 561 568 562 pr_debug("mmap address: 0x%08lx\n", ct 569 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); 563 570 564 ctx->user_id = ctx->mmap_base; 571 ctx->user_id = ctx->mmap_base; 565 ctx->nr_events = nr_events; /* trusted 572 ctx->nr_events = nr_events; /* trusted copy */ 566 573 567 ring = folio_address(ctx->ring_folios[ !! 574 ring = kmap_atomic(ctx->ring_pages[0]); 568 ring->nr = nr_events; /* user copy * 575 ring->nr = nr_events; /* user copy */ 569 ring->id = ~0U; 576 ring->id = ~0U; 570 ring->head = ring->tail = 0; 577 ring->head = ring->tail = 0; 571 ring->magic = AIO_RING_MAGIC; 578 ring->magic = AIO_RING_MAGIC; 572 ring->compat_features = AIO_RING_COMPA 579 ring->compat_features = AIO_RING_COMPAT_FEATURES; 573 ring->incompat_features = AIO_RING_INC 580 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 574 ring->header_length = sizeof(struct ai 581 ring->header_length = sizeof(struct aio_ring); 575 flush_dcache_folio(ctx->ring_folios[0] !! 582 kunmap_atomic(ring); >> 583 flush_dcache_page(ctx->ring_pages[0]); 576 584 577 return 0; 585 return 0; 578 } 586 } 579 587 580 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / s 588 #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 581 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - 589 #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 582 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PE 590 #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 583 591 584 void kiocb_set_cancel_fn(struct kiocb *iocb, k 592 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel) 585 { 593 { 586 struct aio_kiocb *req; 594 struct aio_kiocb *req; 587 struct kioctx *ctx; 595 struct kioctx *ctx; 588 unsigned long flags; 596 unsigned long flags; 589 597 590 /* 598 /* 591 * kiocb didn't come from aio or is ne 599 * kiocb didn't come from aio or is neither a read nor a write, hence 592 * ignore it. 600 * ignore it. 593 */ 601 */ 594 if (!(iocb->ki_flags & IOCB_AIO_RW)) 602 if (!(iocb->ki_flags & IOCB_AIO_RW)) 595 return; 603 return; 596 604 597 req = container_of(iocb, struct aio_ki 605 req = container_of(iocb, struct aio_kiocb, rw); 598 606 599 if (WARN_ON_ONCE(!list_empty(&req->ki_ 607 if (WARN_ON_ONCE(!list_empty(&req->ki_list))) 600 return; 608 return; 601 609 602 ctx = req->ki_ctx; 610 ctx = req->ki_ctx; 603 611 604 spin_lock_irqsave(&ctx->ctx_lock, flag 612 spin_lock_irqsave(&ctx->ctx_lock, flags); 605 list_add_tail(&req->ki_list, &ctx->act 613 list_add_tail(&req->ki_list, &ctx->active_reqs); 606 req->ki_cancel = cancel; 614 req->ki_cancel = cancel; 607 spin_unlock_irqrestore(&ctx->ctx_lock, 615 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 608 } 616 } 609 EXPORT_SYMBOL(kiocb_set_cancel_fn); 617 EXPORT_SYMBOL(kiocb_set_cancel_fn); 610 618 611 /* 619 /* 612 * free_ioctx() should be RCU delayed to synch 620 * free_ioctx() should be RCU delayed to synchronize against the RCU 613 * protected lookup_ioctx() and also needs pro 621 * protected lookup_ioctx() and also needs process context to call 614 * aio_free_ring(). Use rcu_work. 622 * aio_free_ring(). Use rcu_work. 615 */ 623 */ 616 static void free_ioctx(struct work_struct *wor 624 static void free_ioctx(struct work_struct *work) 617 { 625 { 618 struct kioctx *ctx = container_of(to_r 626 struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx, 619 free 627 free_rwork); 620 pr_debug("freeing %p\n", ctx); 628 pr_debug("freeing %p\n", ctx); 621 629 622 aio_free_ring(ctx); 630 aio_free_ring(ctx); 623 free_percpu(ctx->cpu); 631 free_percpu(ctx->cpu); 624 percpu_ref_exit(&ctx->reqs); 632 percpu_ref_exit(&ctx->reqs); 625 percpu_ref_exit(&ctx->users); 633 percpu_ref_exit(&ctx->users); 626 kmem_cache_free(kioctx_cachep, ctx); 634 kmem_cache_free(kioctx_cachep, ctx); 627 } 635 } 628 636 629 static void free_ioctx_reqs(struct percpu_ref 637 static void free_ioctx_reqs(struct percpu_ref *ref) 630 { 638 { 631 struct kioctx *ctx = container_of(ref, 639 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); 632 640 633 /* At this point we know that there ar 641 /* At this point we know that there are no any in-flight requests */ 634 if (ctx->rq_wait && atomic_dec_and_tes 642 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) 635 complete(&ctx->rq_wait->comp); 643 complete(&ctx->rq_wait->comp); 636 644 637 /* Synchronize against RCU protected t 645 /* Synchronize against RCU protected table->table[] dereferences */ 638 INIT_RCU_WORK(&ctx->free_rwork, free_i 646 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx); 639 queue_rcu_work(system_wq, &ctx->free_r 647 queue_rcu_work(system_wq, &ctx->free_rwork); 640 } 648 } 641 649 642 /* 650 /* 643 * When this function runs, the kioctx has bee 651 * When this function runs, the kioctx has been removed from the "hash table" 644 * and ctx->users has dropped to 0, so we know 652 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - 645 * now it's safe to cancel any that need to be 653 * now it's safe to cancel any that need to be. 646 */ 654 */ 647 static void free_ioctx_users(struct percpu_ref 655 static void free_ioctx_users(struct percpu_ref *ref) 648 { 656 { 649 struct kioctx *ctx = container_of(ref, 657 struct kioctx *ctx = container_of(ref, struct kioctx, users); 650 struct aio_kiocb *req; 658 struct aio_kiocb *req; 651 659 652 spin_lock_irq(&ctx->ctx_lock); 660 spin_lock_irq(&ctx->ctx_lock); 653 661 654 while (!list_empty(&ctx->active_reqs)) 662 while (!list_empty(&ctx->active_reqs)) { 655 req = list_first_entry(&ctx->a 663 req = list_first_entry(&ctx->active_reqs, 656 struct 664 struct aio_kiocb, ki_list); 657 req->ki_cancel(&req->rw); 665 req->ki_cancel(&req->rw); 658 list_del_init(&req->ki_list); 666 list_del_init(&req->ki_list); 659 } 667 } 660 668 661 spin_unlock_irq(&ctx->ctx_lock); 669 spin_unlock_irq(&ctx->ctx_lock); 662 670 663 percpu_ref_kill(&ctx->reqs); 671 percpu_ref_kill(&ctx->reqs); 664 percpu_ref_put(&ctx->reqs); 672 percpu_ref_put(&ctx->reqs); 665 } 673 } 666 674 667 static int ioctx_add_table(struct kioctx *ctx, 675 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) 668 { 676 { 669 unsigned i, new_nr; 677 unsigned i, new_nr; 670 struct kioctx_table *table, *old; 678 struct kioctx_table *table, *old; 671 struct aio_ring *ring; 679 struct aio_ring *ring; 672 680 673 spin_lock(&mm->ioctx_lock); 681 spin_lock(&mm->ioctx_lock); 674 table = rcu_dereference_raw(mm->ioctx_ 682 table = rcu_dereference_raw(mm->ioctx_table); 675 683 676 while (1) { 684 while (1) { 677 if (table) 685 if (table) 678 for (i = 0; i < table- 686 for (i = 0; i < table->nr; i++) 679 if (!rcu_acces 687 if (!rcu_access_pointer(table->table[i])) { 680 ctx->i 688 ctx->id = i; 681 rcu_as 689 rcu_assign_pointer(table->table[i], ctx); 682 spin_u 690 spin_unlock(&mm->ioctx_lock); 683 691 684 /* Whi 692 /* While kioctx setup is in progress, 685 * we 693 * we are protected from page migration 686 * cha !! 694 * changes ring_pages by ->ring_lock. 687 */ 695 */ 688 ring = !! 696 ring = kmap_atomic(ctx->ring_pages[0]); 689 ring-> 697 ring->id = ctx->id; >> 698 kunmap_atomic(ring); 690 return 699 return 0; 691 } 700 } 692 701 693 new_nr = (table ? table->nr : 702 new_nr = (table ? table->nr : 1) * 4; 694 spin_unlock(&mm->ioctx_lock); 703 spin_unlock(&mm->ioctx_lock); 695 704 696 table = kzalloc(struct_size(ta 705 table = kzalloc(struct_size(table, table, new_nr), GFP_KERNEL); 697 if (!table) 706 if (!table) 698 return -ENOMEM; 707 return -ENOMEM; 699 708 700 table->nr = new_nr; 709 table->nr = new_nr; 701 710 702 spin_lock(&mm->ioctx_lock); 711 spin_lock(&mm->ioctx_lock); 703 old = rcu_dereference_raw(mm-> 712 old = rcu_dereference_raw(mm->ioctx_table); 704 713 705 if (!old) { 714 if (!old) { 706 rcu_assign_pointer(mm- 715 rcu_assign_pointer(mm->ioctx_table, table); 707 } else if (table->nr > old->nr 716 } else if (table->nr > old->nr) { 708 memcpy(table->table, o 717 memcpy(table->table, old->table, 709 old->nr * sizeo 718 old->nr * sizeof(struct kioctx *)); 710 719 711 rcu_assign_pointer(mm- 720 rcu_assign_pointer(mm->ioctx_table, table); 712 kfree_rcu(old, rcu); 721 kfree_rcu(old, rcu); 713 } else { 722 } else { 714 kfree(table); 723 kfree(table); 715 table = old; 724 table = old; 716 } 725 } 717 } 726 } 718 } 727 } 719 728 720 static void aio_nr_sub(unsigned nr) 729 static void aio_nr_sub(unsigned nr) 721 { 730 { 722 spin_lock(&aio_nr_lock); 731 spin_lock(&aio_nr_lock); 723 if (WARN_ON(aio_nr - nr > aio_nr)) 732 if (WARN_ON(aio_nr - nr > aio_nr)) 724 aio_nr = 0; 733 aio_nr = 0; 725 else 734 else 726 aio_nr -= nr; 735 aio_nr -= nr; 727 spin_unlock(&aio_nr_lock); 736 spin_unlock(&aio_nr_lock); 728 } 737 } 729 738 730 /* ioctx_alloc 739 /* ioctx_alloc 731 * Allocates and initializes an ioctx. R 740 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 732 */ 741 */ 733 static struct kioctx *ioctx_alloc(unsigned nr_ 742 static struct kioctx *ioctx_alloc(unsigned nr_events) 734 { 743 { 735 struct mm_struct *mm = current->mm; 744 struct mm_struct *mm = current->mm; 736 struct kioctx *ctx; 745 struct kioctx *ctx; 737 int err = -ENOMEM; 746 int err = -ENOMEM; 738 747 739 /* 748 /* 740 * Store the original nr_events -- wha 749 * Store the original nr_events -- what userspace passed to io_setup(), 741 * for counting against the global lim 750 * for counting against the global limit -- before it changes. 742 */ 751 */ 743 unsigned int max_reqs = nr_events; 752 unsigned int max_reqs = nr_events; 744 753 745 /* 754 /* 746 * We keep track of the number of avai 755 * We keep track of the number of available ringbuffer slots, to prevent 747 * overflow (reqs_available), and we a 756 * overflow (reqs_available), and we also use percpu counters for this. 748 * 757 * 749 * So since up to half the slots might 758 * So since up to half the slots might be on other cpu's percpu counters 750 * and unavailable, double nr_events s 759 * and unavailable, double nr_events so userspace sees what they 751 * expected: additionally, we move req 760 * expected: additionally, we move req_batch slots to/from percpu 752 * counters at a time, so make sure th 761 * counters at a time, so make sure that isn't 0: 753 */ 762 */ 754 nr_events = max(nr_events, num_possibl 763 nr_events = max(nr_events, num_possible_cpus() * 4); 755 nr_events *= 2; 764 nr_events *= 2; 756 765 757 /* Prevent overflows */ 766 /* Prevent overflows */ 758 if (nr_events > (0x10000000U / sizeof( 767 if (nr_events > (0x10000000U / sizeof(struct io_event))) { 759 pr_debug("ENOMEM: nr_events to 768 pr_debug("ENOMEM: nr_events too high\n"); 760 return ERR_PTR(-EINVAL); 769 return ERR_PTR(-EINVAL); 761 } 770 } 762 771 763 if (!nr_events || (unsigned long)max_r 772 if (!nr_events || (unsigned long)max_reqs > aio_max_nr) 764 return ERR_PTR(-EAGAIN); 773 return ERR_PTR(-EAGAIN); 765 774 766 ctx = kmem_cache_zalloc(kioctx_cachep, 775 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 767 if (!ctx) 776 if (!ctx) 768 return ERR_PTR(-ENOMEM); 777 return ERR_PTR(-ENOMEM); 769 778 770 ctx->max_reqs = max_reqs; 779 ctx->max_reqs = max_reqs; 771 780 772 spin_lock_init(&ctx->ctx_lock); 781 spin_lock_init(&ctx->ctx_lock); 773 spin_lock_init(&ctx->completion_lock); 782 spin_lock_init(&ctx->completion_lock); 774 mutex_init(&ctx->ring_lock); 783 mutex_init(&ctx->ring_lock); 775 /* Protect against page migration thro 784 /* Protect against page migration throughout kiotx setup by keeping 776 * the ring_lock mutex held until setu 785 * the ring_lock mutex held until setup is complete. */ 777 mutex_lock(&ctx->ring_lock); 786 mutex_lock(&ctx->ring_lock); 778 init_waitqueue_head(&ctx->wait); 787 init_waitqueue_head(&ctx->wait); 779 788 780 INIT_LIST_HEAD(&ctx->active_reqs); 789 INIT_LIST_HEAD(&ctx->active_reqs); 781 790 782 if (percpu_ref_init(&ctx->users, free_ 791 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) 783 goto err; 792 goto err; 784 793 785 if (percpu_ref_init(&ctx->reqs, free_i 794 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) 786 goto err; 795 goto err; 787 796 788 ctx->cpu = alloc_percpu(struct kioctx_ 797 ctx->cpu = alloc_percpu(struct kioctx_cpu); 789 if (!ctx->cpu) 798 if (!ctx->cpu) 790 goto err; 799 goto err; 791 800 792 err = aio_setup_ring(ctx, nr_events); 801 err = aio_setup_ring(ctx, nr_events); 793 if (err < 0) 802 if (err < 0) 794 goto err; 803 goto err; 795 804 796 atomic_set(&ctx->reqs_available, ctx-> 805 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); 797 ctx->req_batch = (ctx->nr_events - 1) 806 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); 798 if (ctx->req_batch < 1) 807 if (ctx->req_batch < 1) 799 ctx->req_batch = 1; 808 ctx->req_batch = 1; 800 809 801 /* limit the number of system wide aio 810 /* limit the number of system wide aios */ 802 spin_lock(&aio_nr_lock); 811 spin_lock(&aio_nr_lock); 803 if (aio_nr + ctx->max_reqs > aio_max_n 812 if (aio_nr + ctx->max_reqs > aio_max_nr || 804 aio_nr + ctx->max_reqs < aio_nr) { 813 aio_nr + ctx->max_reqs < aio_nr) { 805 spin_unlock(&aio_nr_lock); 814 spin_unlock(&aio_nr_lock); 806 err = -EAGAIN; 815 err = -EAGAIN; 807 goto err_ctx; 816 goto err_ctx; 808 } 817 } 809 aio_nr += ctx->max_reqs; 818 aio_nr += ctx->max_reqs; 810 spin_unlock(&aio_nr_lock); 819 spin_unlock(&aio_nr_lock); 811 820 812 percpu_ref_get(&ctx->users); /* io_ 821 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ 813 percpu_ref_get(&ctx->reqs); /* fre 822 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ 814 823 815 err = ioctx_add_table(ctx, mm); 824 err = ioctx_add_table(ctx, mm); 816 if (err) 825 if (err) 817 goto err_cleanup; 826 goto err_cleanup; 818 827 819 /* Release the ring_lock mutex now tha 828 /* Release the ring_lock mutex now that all setup is complete. */ 820 mutex_unlock(&ctx->ring_lock); 829 mutex_unlock(&ctx->ring_lock); 821 830 822 pr_debug("allocated ioctx %p[%ld]: mm= 831 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", 823 ctx, ctx->user_id, mm, ctx->n 832 ctx, ctx->user_id, mm, ctx->nr_events); 824 return ctx; 833 return ctx; 825 834 826 err_cleanup: 835 err_cleanup: 827 aio_nr_sub(ctx->max_reqs); 836 aio_nr_sub(ctx->max_reqs); 828 err_ctx: 837 err_ctx: 829 atomic_set(&ctx->dead, 1); 838 atomic_set(&ctx->dead, 1); 830 if (ctx->mmap_size) 839 if (ctx->mmap_size) 831 vm_munmap(ctx->mmap_base, ctx- 840 vm_munmap(ctx->mmap_base, ctx->mmap_size); 832 aio_free_ring(ctx); 841 aio_free_ring(ctx); 833 err: 842 err: 834 mutex_unlock(&ctx->ring_lock); 843 mutex_unlock(&ctx->ring_lock); 835 free_percpu(ctx->cpu); 844 free_percpu(ctx->cpu); 836 percpu_ref_exit(&ctx->reqs); 845 percpu_ref_exit(&ctx->reqs); 837 percpu_ref_exit(&ctx->users); 846 percpu_ref_exit(&ctx->users); 838 kmem_cache_free(kioctx_cachep, ctx); 847 kmem_cache_free(kioctx_cachep, ctx); 839 pr_debug("error allocating ioctx %d\n" 848 pr_debug("error allocating ioctx %d\n", err); 840 return ERR_PTR(err); 849 return ERR_PTR(err); 841 } 850 } 842 851 843 /* kill_ioctx 852 /* kill_ioctx 844 * Cancels all outstanding aio requests o 853 * Cancels all outstanding aio requests on an aio context. Used 845 * when the processes owning a context ha 854 * when the processes owning a context have all exited to encourage 846 * the rapid destruction of the kioctx. 855 * the rapid destruction of the kioctx. 847 */ 856 */ 848 static int kill_ioctx(struct mm_struct *mm, st 857 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, 849 struct ctx_rq_wait *wait 858 struct ctx_rq_wait *wait) 850 { 859 { 851 struct kioctx_table *table; 860 struct kioctx_table *table; 852 861 853 spin_lock(&mm->ioctx_lock); 862 spin_lock(&mm->ioctx_lock); 854 if (atomic_xchg(&ctx->dead, 1)) { 863 if (atomic_xchg(&ctx->dead, 1)) { 855 spin_unlock(&mm->ioctx_lock); 864 spin_unlock(&mm->ioctx_lock); 856 return -EINVAL; 865 return -EINVAL; 857 } 866 } 858 867 859 table = rcu_dereference_raw(mm->ioctx_ 868 table = rcu_dereference_raw(mm->ioctx_table); 860 WARN_ON(ctx != rcu_access_pointer(tabl 869 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); 861 RCU_INIT_POINTER(table->table[ctx->id] 870 RCU_INIT_POINTER(table->table[ctx->id], NULL); 862 spin_unlock(&mm->ioctx_lock); 871 spin_unlock(&mm->ioctx_lock); 863 872 864 /* free_ioctx_reqs() will do the neces 873 /* free_ioctx_reqs() will do the necessary RCU synchronization */ 865 wake_up_all(&ctx->wait); 874 wake_up_all(&ctx->wait); 866 875 867 /* 876 /* 868 * It'd be more correct to do this in 877 * It'd be more correct to do this in free_ioctx(), after all 869 * the outstanding kiocbs have finishe 878 * the outstanding kiocbs have finished - but by then io_destroy 870 * has already returned, so io_setup() 879 * has already returned, so io_setup() could potentially return 871 * -EAGAIN with no ioctxs actually in 880 * -EAGAIN with no ioctxs actually in use (as far as userspace 872 * could tell). 881 * could tell). 873 */ 882 */ 874 aio_nr_sub(ctx->max_reqs); 883 aio_nr_sub(ctx->max_reqs); 875 884 876 if (ctx->mmap_size) 885 if (ctx->mmap_size) 877 vm_munmap(ctx->mmap_base, ctx- 886 vm_munmap(ctx->mmap_base, ctx->mmap_size); 878 887 879 ctx->rq_wait = wait; 888 ctx->rq_wait = wait; 880 percpu_ref_kill(&ctx->users); 889 percpu_ref_kill(&ctx->users); 881 return 0; 890 return 0; 882 } 891 } 883 892 884 /* 893 /* 885 * exit_aio: called when the last user of mm g 894 * exit_aio: called when the last user of mm goes away. At this point, there is 886 * no way for any new requests to be submited 895 * no way for any new requests to be submited or any of the io_* syscalls to be 887 * called on the context. 896 * called on the context. 888 * 897 * 889 * There may be outstanding kiocbs, but free_i 898 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on 890 * them. 899 * them. 891 */ 900 */ 892 void exit_aio(struct mm_struct *mm) 901 void exit_aio(struct mm_struct *mm) 893 { 902 { 894 struct kioctx_table *table = rcu_deref 903 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table); 895 struct ctx_rq_wait wait; 904 struct ctx_rq_wait wait; 896 int i, skipped; 905 int i, skipped; 897 906 898 if (!table) 907 if (!table) 899 return; 908 return; 900 909 901 atomic_set(&wait.count, table->nr); 910 atomic_set(&wait.count, table->nr); 902 init_completion(&wait.comp); 911 init_completion(&wait.comp); 903 912 904 skipped = 0; 913 skipped = 0; 905 for (i = 0; i < table->nr; ++i) { 914 for (i = 0; i < table->nr; ++i) { 906 struct kioctx *ctx = 915 struct kioctx *ctx = 907 rcu_dereference_protec 916 rcu_dereference_protected(table->table[i], true); 908 917 909 if (!ctx) { 918 if (!ctx) { 910 skipped++; 919 skipped++; 911 continue; 920 continue; 912 } 921 } 913 922 914 /* 923 /* 915 * We don't need to bother wit 924 * We don't need to bother with munmap() here - exit_mmap(mm) 916 * is coming and it'll unmap e 925 * is coming and it'll unmap everything. And we simply can't, 917 * this is not necessarily our 926 * this is not necessarily our ->mm. 918 * Since kill_ioctx() uses non 927 * Since kill_ioctx() uses non-zero ->mmap_size as indicator 919 * that it needs to unmap the 928 * that it needs to unmap the area, just set it to 0. 920 */ 929 */ 921 ctx->mmap_size = 0; 930 ctx->mmap_size = 0; 922 kill_ioctx(mm, ctx, &wait); 931 kill_ioctx(mm, ctx, &wait); 923 } 932 } 924 933 925 if (!atomic_sub_and_test(skipped, &wai 934 if (!atomic_sub_and_test(skipped, &wait.count)) { 926 /* Wait until all IO for the c 935 /* Wait until all IO for the context are done. */ 927 wait_for_completion(&wait.comp 936 wait_for_completion(&wait.comp); 928 } 937 } 929 938 930 RCU_INIT_POINTER(mm->ioctx_table, NULL 939 RCU_INIT_POINTER(mm->ioctx_table, NULL); 931 kfree(table); 940 kfree(table); 932 } 941 } 933 942 934 static void put_reqs_available(struct kioctx * 943 static void put_reqs_available(struct kioctx *ctx, unsigned nr) 935 { 944 { 936 struct kioctx_cpu *kcpu; 945 struct kioctx_cpu *kcpu; 937 unsigned long flags; 946 unsigned long flags; 938 947 939 local_irq_save(flags); 948 local_irq_save(flags); 940 kcpu = this_cpu_ptr(ctx->cpu); 949 kcpu = this_cpu_ptr(ctx->cpu); 941 kcpu->reqs_available += nr; 950 kcpu->reqs_available += nr; 942 951 943 while (kcpu->reqs_available >= ctx->re 952 while (kcpu->reqs_available >= ctx->req_batch * 2) { 944 kcpu->reqs_available -= ctx->r 953 kcpu->reqs_available -= ctx->req_batch; 945 atomic_add(ctx->req_batch, &ct 954 atomic_add(ctx->req_batch, &ctx->reqs_available); 946 } 955 } 947 956 948 local_irq_restore(flags); 957 local_irq_restore(flags); 949 } 958 } 950 959 951 static bool __get_reqs_available(struct kioctx 960 static bool __get_reqs_available(struct kioctx *ctx) 952 { 961 { 953 struct kioctx_cpu *kcpu; 962 struct kioctx_cpu *kcpu; 954 bool ret = false; 963 bool ret = false; 955 unsigned long flags; 964 unsigned long flags; 956 965 957 local_irq_save(flags); 966 local_irq_save(flags); 958 kcpu = this_cpu_ptr(ctx->cpu); 967 kcpu = this_cpu_ptr(ctx->cpu); 959 if (!kcpu->reqs_available) { 968 if (!kcpu->reqs_available) { 960 int avail = atomic_read(&ctx-> 969 int avail = atomic_read(&ctx->reqs_available); 961 970 962 do { 971 do { 963 if (avail < ctx->req_b 972 if (avail < ctx->req_batch) 964 goto out; 973 goto out; 965 } while (!atomic_try_cmpxchg(& 974 } while (!atomic_try_cmpxchg(&ctx->reqs_available, 966 & 975 &avail, avail - ctx->req_batch)); 967 976 968 kcpu->reqs_available += ctx->r 977 kcpu->reqs_available += ctx->req_batch; 969 } 978 } 970 979 971 ret = true; 980 ret = true; 972 kcpu->reqs_available--; 981 kcpu->reqs_available--; 973 out: 982 out: 974 local_irq_restore(flags); 983 local_irq_restore(flags); 975 return ret; 984 return ret; 976 } 985 } 977 986 978 /* refill_reqs_available 987 /* refill_reqs_available 979 * Updates the reqs_available reference c 988 * Updates the reqs_available reference counts used for tracking the 980 * number of free slots in the completion 989 * number of free slots in the completion ring. This can be called 981 * from aio_complete() (to optimistically 990 * from aio_complete() (to optimistically update reqs_available) or 982 * from aio_get_req() (the we're out of e 991 * from aio_get_req() (the we're out of events case). It must be 983 * called holding ctx->completion_lock. 992 * called holding ctx->completion_lock. 984 */ 993 */ 985 static void refill_reqs_available(struct kioct 994 static void refill_reqs_available(struct kioctx *ctx, unsigned head, 986 unsigned tai 995 unsigned tail) 987 { 996 { 988 unsigned events_in_ring, completed; 997 unsigned events_in_ring, completed; 989 998 990 /* Clamp head since userland can write 999 /* Clamp head since userland can write to it. */ 991 head %= ctx->nr_events; 1000 head %= ctx->nr_events; 992 if (head <= tail) 1001 if (head <= tail) 993 events_in_ring = tail - head; 1002 events_in_ring = tail - head; 994 else 1003 else 995 events_in_ring = ctx->nr_event 1004 events_in_ring = ctx->nr_events - (head - tail); 996 1005 997 completed = ctx->completed_events; 1006 completed = ctx->completed_events; 998 if (events_in_ring < completed) 1007 if (events_in_ring < completed) 999 completed -= events_in_ring; 1008 completed -= events_in_ring; 1000 else 1009 else 1001 completed = 0; 1010 completed = 0; 1002 1011 1003 if (!completed) 1012 if (!completed) 1004 return; 1013 return; 1005 1014 1006 ctx->completed_events -= completed; 1015 ctx->completed_events -= completed; 1007 put_reqs_available(ctx, completed); 1016 put_reqs_available(ctx, completed); 1008 } 1017 } 1009 1018 1010 /* user_refill_reqs_available 1019 /* user_refill_reqs_available 1011 * Called to refill reqs_available when 1020 * Called to refill reqs_available when aio_get_req() encounters an 1012 * out of space in the completion ring. 1021 * out of space in the completion ring. 1013 */ 1022 */ 1014 static void user_refill_reqs_available(struct 1023 static void user_refill_reqs_available(struct kioctx *ctx) 1015 { 1024 { 1016 spin_lock_irq(&ctx->completion_lock); 1025 spin_lock_irq(&ctx->completion_lock); 1017 if (ctx->completed_events) { 1026 if (ctx->completed_events) { 1018 struct aio_ring *ring; 1027 struct aio_ring *ring; 1019 unsigned head; 1028 unsigned head; 1020 1029 1021 /* Access of ring->head may r 1030 /* Access of ring->head may race with aio_read_events_ring() 1022 * here, but that's okay sinc 1031 * here, but that's okay since whether we read the old version 1023 * or the new version, and ei 1032 * or the new version, and either will be valid. The important 1024 * part is that head cannot p 1033 * part is that head cannot pass tail since we prevent 1025 * aio_complete() from updati 1034 * aio_complete() from updating tail by holding 1026 * ctx->completion_lock. Eve 1035 * ctx->completion_lock. Even if head is invalid, the check 1027 * against ctx->completed_eve 1036 * against ctx->completed_events below will make sure we do the 1028 * safe/right thing. 1037 * safe/right thing. 1029 */ 1038 */ 1030 ring = folio_address(ctx->rin !! 1039 ring = kmap_atomic(ctx->ring_pages[0]); 1031 head = ring->head; 1040 head = ring->head; >> 1041 kunmap_atomic(ring); 1032 1042 1033 refill_reqs_available(ctx, he 1043 refill_reqs_available(ctx, head, ctx->tail); 1034 } 1044 } 1035 1045 1036 spin_unlock_irq(&ctx->completion_lock 1046 spin_unlock_irq(&ctx->completion_lock); 1037 } 1047 } 1038 1048 1039 static bool get_reqs_available(struct kioctx 1049 static bool get_reqs_available(struct kioctx *ctx) 1040 { 1050 { 1041 if (__get_reqs_available(ctx)) 1051 if (__get_reqs_available(ctx)) 1042 return true; 1052 return true; 1043 user_refill_reqs_available(ctx); 1053 user_refill_reqs_available(ctx); 1044 return __get_reqs_available(ctx); 1054 return __get_reqs_available(ctx); 1045 } 1055 } 1046 1056 1047 /* aio_get_req 1057 /* aio_get_req 1048 * Allocate a slot for an aio request. 1058 * Allocate a slot for an aio request. 1049 * Returns NULL if no requests are free. 1059 * Returns NULL if no requests are free. 1050 * 1060 * 1051 * The refcount is initialized to 2 - one for 1061 * The refcount is initialized to 2 - one for the async op completion, 1052 * one for the synchronous code that does thi 1062 * one for the synchronous code that does this. 1053 */ 1063 */ 1054 static inline struct aio_kiocb *aio_get_req(s 1064 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) 1055 { 1065 { 1056 struct aio_kiocb *req; 1066 struct aio_kiocb *req; 1057 1067 1058 req = kmem_cache_alloc(kiocb_cachep, 1068 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); 1059 if (unlikely(!req)) 1069 if (unlikely(!req)) 1060 return NULL; 1070 return NULL; 1061 1071 1062 if (unlikely(!get_reqs_available(ctx) 1072 if (unlikely(!get_reqs_available(ctx))) { 1063 kmem_cache_free(kiocb_cachep, 1073 kmem_cache_free(kiocb_cachep, req); 1064 return NULL; 1074 return NULL; 1065 } 1075 } 1066 1076 1067 percpu_ref_get(&ctx->reqs); 1077 percpu_ref_get(&ctx->reqs); 1068 req->ki_ctx = ctx; 1078 req->ki_ctx = ctx; 1069 INIT_LIST_HEAD(&req->ki_list); 1079 INIT_LIST_HEAD(&req->ki_list); 1070 refcount_set(&req->ki_refcnt, 2); 1080 refcount_set(&req->ki_refcnt, 2); 1071 req->ki_eventfd = NULL; 1081 req->ki_eventfd = NULL; 1072 return req; 1082 return req; 1073 } 1083 } 1074 1084 1075 static struct kioctx *lookup_ioctx(unsigned l 1085 static struct kioctx *lookup_ioctx(unsigned long ctx_id) 1076 { 1086 { 1077 struct aio_ring __user *ring = (void 1087 struct aio_ring __user *ring = (void __user *)ctx_id; 1078 struct mm_struct *mm = current->mm; 1088 struct mm_struct *mm = current->mm; 1079 struct kioctx *ctx, *ret = NULL; 1089 struct kioctx *ctx, *ret = NULL; 1080 struct kioctx_table *table; 1090 struct kioctx_table *table; 1081 unsigned id; 1091 unsigned id; 1082 1092 1083 if (get_user(id, &ring->id)) 1093 if (get_user(id, &ring->id)) 1084 return NULL; 1094 return NULL; 1085 1095 1086 rcu_read_lock(); 1096 rcu_read_lock(); 1087 table = rcu_dereference(mm->ioctx_tab 1097 table = rcu_dereference(mm->ioctx_table); 1088 1098 1089 if (!table || id >= table->nr) 1099 if (!table || id >= table->nr) 1090 goto out; 1100 goto out; 1091 1101 1092 id = array_index_nospec(id, table->nr 1102 id = array_index_nospec(id, table->nr); 1093 ctx = rcu_dereference(table->table[id 1103 ctx = rcu_dereference(table->table[id]); 1094 if (ctx && ctx->user_id == ctx_id) { 1104 if (ctx && ctx->user_id == ctx_id) { 1095 if (percpu_ref_tryget_live(&c 1105 if (percpu_ref_tryget_live(&ctx->users)) 1096 ret = ctx; 1106 ret = ctx; 1097 } 1107 } 1098 out: 1108 out: 1099 rcu_read_unlock(); 1109 rcu_read_unlock(); 1100 return ret; 1110 return ret; 1101 } 1111 } 1102 1112 1103 static inline void iocb_destroy(struct aio_ki 1113 static inline void iocb_destroy(struct aio_kiocb *iocb) 1104 { 1114 { 1105 if (iocb->ki_eventfd) 1115 if (iocb->ki_eventfd) 1106 eventfd_ctx_put(iocb->ki_even 1116 eventfd_ctx_put(iocb->ki_eventfd); 1107 if (iocb->ki_filp) 1117 if (iocb->ki_filp) 1108 fput(iocb->ki_filp); 1118 fput(iocb->ki_filp); 1109 percpu_ref_put(&iocb->ki_ctx->reqs); 1119 percpu_ref_put(&iocb->ki_ctx->reqs); 1110 kmem_cache_free(kiocb_cachep, iocb); 1120 kmem_cache_free(kiocb_cachep, iocb); 1111 } 1121 } 1112 1122 1113 struct aio_waiter { << 1114 struct wait_queue_entry w; << 1115 size_t min_nr; << 1116 }; << 1117 << 1118 /* aio_complete 1123 /* aio_complete 1119 * Called when the io request on the giv 1124 * Called when the io request on the given iocb is complete. 1120 */ 1125 */ 1121 static void aio_complete(struct aio_kiocb *io 1126 static void aio_complete(struct aio_kiocb *iocb) 1122 { 1127 { 1123 struct kioctx *ctx = iocb->ki_ctx; 1128 struct kioctx *ctx = iocb->ki_ctx; 1124 struct aio_ring *ring; 1129 struct aio_ring *ring; 1125 struct io_event *ev_page, *event; 1130 struct io_event *ev_page, *event; 1126 unsigned tail, pos, head, avail; !! 1131 unsigned tail, pos, head; 1127 unsigned long flags; 1132 unsigned long flags; 1128 1133 1129 /* 1134 /* 1130 * Add a completion event to the ring 1135 * Add a completion event to the ring buffer. Must be done holding 1131 * ctx->completion_lock to prevent ot 1136 * ctx->completion_lock to prevent other code from messing with the tail 1132 * pointer since we might be called f 1137 * pointer since we might be called from irq context. 1133 */ 1138 */ 1134 spin_lock_irqsave(&ctx->completion_lo 1139 spin_lock_irqsave(&ctx->completion_lock, flags); 1135 1140 1136 tail = ctx->tail; 1141 tail = ctx->tail; 1137 pos = tail + AIO_EVENTS_OFFSET; 1142 pos = tail + AIO_EVENTS_OFFSET; 1138 1143 1139 if (++tail >= ctx->nr_events) 1144 if (++tail >= ctx->nr_events) 1140 tail = 0; 1145 tail = 0; 1141 1146 1142 ev_page = folio_address(ctx->ring_fol !! 1147 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1143 event = ev_page + pos % AIO_EVENTS_PE 1148 event = ev_page + pos % AIO_EVENTS_PER_PAGE; 1144 1149 1145 *event = iocb->ki_res; 1150 *event = iocb->ki_res; 1146 1151 1147 flush_dcache_folio(ctx->ring_folios[p !! 1152 kunmap_atomic(ev_page); >> 1153 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1148 1154 1149 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\ 1155 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, 1150 (void __user *)(unsigned lon 1156 (void __user *)(unsigned long)iocb->ki_res.obj, 1151 iocb->ki_res.data, iocb->ki_ 1157 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2); 1152 1158 1153 /* after flagging the request as done 1159 /* after flagging the request as done, we 1154 * must never even look at it again 1160 * must never even look at it again 1155 */ 1161 */ 1156 smp_wmb(); /* make event visible 1162 smp_wmb(); /* make event visible before updating tail */ 1157 1163 1158 ctx->tail = tail; 1164 ctx->tail = tail; 1159 1165 1160 ring = folio_address(ctx->ring_folios !! 1166 ring = kmap_atomic(ctx->ring_pages[0]); 1161 head = ring->head; 1167 head = ring->head; 1162 ring->tail = tail; 1168 ring->tail = tail; 1163 flush_dcache_folio(ctx->ring_folios[0 !! 1169 kunmap_atomic(ring); >> 1170 flush_dcache_page(ctx->ring_pages[0]); 1164 1171 1165 ctx->completed_events++; 1172 ctx->completed_events++; 1166 if (ctx->completed_events > 1) 1173 if (ctx->completed_events > 1) 1167 refill_reqs_available(ctx, he 1174 refill_reqs_available(ctx, head, tail); 1168 << 1169 avail = tail > head << 1170 ? tail - head << 1171 : tail + ctx->nr_events - hea << 1172 spin_unlock_irqrestore(&ctx->completi 1175 spin_unlock_irqrestore(&ctx->completion_lock, flags); 1173 1176 1174 pr_debug("added to ring %p at [%u]\n" 1177 pr_debug("added to ring %p at [%u]\n", iocb, tail); 1175 1178 1176 /* 1179 /* 1177 * Check if the user asked us to deli 1180 * Check if the user asked us to deliver the result through an 1178 * eventfd. The eventfd_signal() func 1181 * eventfd. The eventfd_signal() function is safe to be called 1179 * from IRQ context. 1182 * from IRQ context. 1180 */ 1183 */ 1181 if (iocb->ki_eventfd) 1184 if (iocb->ki_eventfd) 1182 eventfd_signal(iocb->ki_event !! 1185 eventfd_signal(iocb->ki_eventfd, 1); 1183 1186 1184 /* 1187 /* 1185 * We have to order our ring_info tai 1188 * We have to order our ring_info tail store above and test 1186 * of the wait list below outside the 1189 * of the wait list below outside the wait lock. This is 1187 * like in wake_up_bit() where cleari 1190 * like in wake_up_bit() where clearing a bit has to be 1188 * ordered with the unlocked test. 1191 * ordered with the unlocked test. 1189 */ 1192 */ 1190 smp_mb(); 1193 smp_mb(); 1191 1194 1192 if (waitqueue_active(&ctx->wait)) { !! 1195 if (waitqueue_active(&ctx->wait)) 1193 struct aio_waiter *curr, *nex !! 1196 wake_up(&ctx->wait); 1194 unsigned long flags; << 1195 << 1196 spin_lock_irqsave(&ctx->wait. << 1197 list_for_each_entry_safe(curr << 1198 if (avail >= curr->mi << 1199 wake_up_proce << 1200 list_del_init << 1201 } << 1202 spin_unlock_irqrestore(&ctx-> << 1203 } << 1204 } 1197 } 1205 1198 1206 static inline void iocb_put(struct aio_kiocb 1199 static inline void iocb_put(struct aio_kiocb *iocb) 1207 { 1200 { 1208 if (refcount_dec_and_test(&iocb->ki_r 1201 if (refcount_dec_and_test(&iocb->ki_refcnt)) { 1209 aio_complete(iocb); 1202 aio_complete(iocb); 1210 iocb_destroy(iocb); 1203 iocb_destroy(iocb); 1211 } 1204 } 1212 } 1205 } 1213 1206 1214 /* aio_read_events_ring 1207 /* aio_read_events_ring 1215 * Pull an event off of the ioctx's even 1208 * Pull an event off of the ioctx's event ring. Returns the number of 1216 * events fetched 1209 * events fetched 1217 */ 1210 */ 1218 static long aio_read_events_ring(struct kioct 1211 static long aio_read_events_ring(struct kioctx *ctx, 1219 struct io_ev 1212 struct io_event __user *event, long nr) 1220 { 1213 { 1221 struct aio_ring *ring; 1214 struct aio_ring *ring; 1222 unsigned head, tail, pos; 1215 unsigned head, tail, pos; 1223 long ret = 0; 1216 long ret = 0; 1224 int copy_ret; 1217 int copy_ret; 1225 1218 1226 /* 1219 /* 1227 * The mutex can block and wake us up 1220 * The mutex can block and wake us up and that will cause 1228 * wait_event_interruptible_hrtimeout 1221 * wait_event_interruptible_hrtimeout() to schedule without sleeping 1229 * and repeat. This should be rare en 1222 * and repeat. This should be rare enough that it doesn't cause 1230 * peformance issues. See the comment 1223 * peformance issues. See the comment in read_events() for more detail. 1231 */ 1224 */ 1232 sched_annotate_sleep(); 1225 sched_annotate_sleep(); 1233 mutex_lock(&ctx->ring_lock); 1226 mutex_lock(&ctx->ring_lock); 1234 1227 1235 /* Access to ->ring_folios here is pr !! 1228 /* Access to ->ring_pages here is protected by ctx->ring_lock. */ 1236 ring = folio_address(ctx->ring_folios !! 1229 ring = kmap_atomic(ctx->ring_pages[0]); 1237 head = ring->head; 1230 head = ring->head; 1238 tail = ring->tail; 1231 tail = ring->tail; >> 1232 kunmap_atomic(ring); 1239 1233 1240 /* 1234 /* 1241 * Ensure that once we've read the cu 1235 * Ensure that once we've read the current tail pointer, that 1242 * we also see the events that were s 1236 * we also see the events that were stored up to the tail. 1243 */ 1237 */ 1244 smp_rmb(); 1238 smp_rmb(); 1245 1239 1246 pr_debug("h%u t%u m%u\n", head, tail, 1240 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); 1247 1241 1248 if (head == tail) 1242 if (head == tail) 1249 goto out; 1243 goto out; 1250 1244 1251 head %= ctx->nr_events; 1245 head %= ctx->nr_events; 1252 tail %= ctx->nr_events; 1246 tail %= ctx->nr_events; 1253 1247 1254 while (ret < nr) { 1248 while (ret < nr) { 1255 long avail; 1249 long avail; 1256 struct io_event *ev; 1250 struct io_event *ev; 1257 struct folio *folio; !! 1251 struct page *page; 1258 1252 1259 avail = (head <= tail ? tail 1253 avail = (head <= tail ? tail : ctx->nr_events) - head; 1260 if (head == tail) 1254 if (head == tail) 1261 break; 1255 break; 1262 1256 1263 pos = head + AIO_EVENTS_OFFSE 1257 pos = head + AIO_EVENTS_OFFSET; 1264 folio = ctx->ring_folios[pos !! 1258 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; 1265 pos %= AIO_EVENTS_PER_PAGE; 1259 pos %= AIO_EVENTS_PER_PAGE; 1266 1260 1267 avail = min(avail, nr - ret); 1261 avail = min(avail, nr - ret); 1268 avail = min_t(long, avail, AI 1262 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos); 1269 1263 1270 ev = folio_address(folio); !! 1264 ev = kmap(page); 1271 copy_ret = copy_to_user(event 1265 copy_ret = copy_to_user(event + ret, ev + pos, 1272 sizeo 1266 sizeof(*ev) * avail); >> 1267 kunmap(page); 1273 1268 1274 if (unlikely(copy_ret)) { 1269 if (unlikely(copy_ret)) { 1275 ret = -EFAULT; 1270 ret = -EFAULT; 1276 goto out; 1271 goto out; 1277 } 1272 } 1278 1273 1279 ret += avail; 1274 ret += avail; 1280 head += avail; 1275 head += avail; 1281 head %= ctx->nr_events; 1276 head %= ctx->nr_events; 1282 } 1277 } 1283 1278 1284 ring = folio_address(ctx->ring_folios !! 1279 ring = kmap_atomic(ctx->ring_pages[0]); 1285 ring->head = head; 1280 ring->head = head; 1286 flush_dcache_folio(ctx->ring_folios[0 !! 1281 kunmap_atomic(ring); >> 1282 flush_dcache_page(ctx->ring_pages[0]); 1287 1283 1288 pr_debug("%li h%u t%u\n", ret, head, 1284 pr_debug("%li h%u t%u\n", ret, head, tail); 1289 out: 1285 out: 1290 mutex_unlock(&ctx->ring_lock); 1286 mutex_unlock(&ctx->ring_lock); 1291 1287 1292 return ret; 1288 return ret; 1293 } 1289 } 1294 1290 1295 static bool aio_read_events(struct kioctx *ct 1291 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, 1296 struct io_event _ 1292 struct io_event __user *event, long *i) 1297 { 1293 { 1298 long ret = aio_read_events_ring(ctx, 1294 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); 1299 1295 1300 if (ret > 0) 1296 if (ret > 0) 1301 *i += ret; 1297 *i += ret; 1302 1298 1303 if (unlikely(atomic_read(&ctx->dead)) 1299 if (unlikely(atomic_read(&ctx->dead))) 1304 ret = -EINVAL; 1300 ret = -EINVAL; 1305 1301 1306 if (!*i) 1302 if (!*i) 1307 *i = ret; 1303 *i = ret; 1308 1304 1309 return ret < 0 || *i >= min_nr; 1305 return ret < 0 || *i >= min_nr; 1310 } 1306 } 1311 1307 1312 static long read_events(struct kioctx *ctx, l 1308 static long read_events(struct kioctx *ctx, long min_nr, long nr, 1313 struct io_event __use 1309 struct io_event __user *event, 1314 ktime_t until) 1310 ktime_t until) 1315 { 1311 { 1316 struct hrtimer_sleeper t; !! 1312 long ret = 0; 1317 struct aio_waiter w; << 1318 long ret = 0, ret2 = 0; << 1319 1313 1320 /* 1314 /* 1321 * Note that aio_read_events() is bei 1315 * Note that aio_read_events() is being called as the conditional - i.e. 1322 * we're calling it after prepare_to_ 1316 * we're calling it after prepare_to_wait() has set task state to 1323 * TASK_INTERRUPTIBLE. 1317 * TASK_INTERRUPTIBLE. 1324 * 1318 * 1325 * But aio_read_events() can block, a 1319 * But aio_read_events() can block, and if it blocks it's going to flip 1326 * the task state back to TASK_RUNNIN 1320 * the task state back to TASK_RUNNING. 1327 * 1321 * 1328 * This should be ok, provided it doe 1322 * This should be ok, provided it doesn't flip the state back to 1329 * TASK_RUNNING and return 0 too much 1323 * TASK_RUNNING and return 0 too much - that causes us to spin. That 1330 * will only happen if the mutex_lock 1324 * will only happen if the mutex_lock() call blocks, and we then find 1331 * the ringbuffer empty. So in practi 1325 * the ringbuffer empty. So in practice we should be ok, but it's 1332 * something to be aware of when touc 1326 * something to be aware of when touching this code. 1333 */ 1327 */ 1334 aio_read_events(ctx, min_nr, nr, even !! 1328 if (until == 0) 1335 if (until == 0 || ret < 0 || ret >= m !! 1329 aio_read_events(ctx, min_nr, nr, event, &ret); 1336 return ret; !! 1330 else 1337 !! 1331 wait_event_interruptible_hrtimeout(ctx->wait, 1338 hrtimer_init_sleeper_on_stack(&t, CLO !! 1332 aio_read_events(ctx, min_nr, nr, event, &ret), 1339 if (until != KTIME_MAX) { !! 1333 until); 1340 hrtimer_set_expires_range_ns( << 1341 hrtimer_sleeper_start_expires << 1342 } << 1343 << 1344 init_wait(&w.w); << 1345 << 1346 while (1) { << 1347 unsigned long nr_got = ret; << 1348 << 1349 w.min_nr = min_nr - ret; << 1350 << 1351 ret2 = prepare_to_wait_event( << 1352 if (!ret2 && !t.task) << 1353 ret2 = -ETIME; << 1354 << 1355 if (aio_read_events(ctx, min_ << 1356 break; << 1357 << 1358 if (nr_got == ret) << 1359 schedule(); << 1360 } << 1361 << 1362 finish_wait(&ctx->wait, &w.w); << 1363 hrtimer_cancel(&t.timer); << 1364 destroy_hrtimer_on_stack(&t.timer); << 1365 << 1366 return ret; 1334 return ret; 1367 } 1335 } 1368 1336 1369 /* sys_io_setup: 1337 /* sys_io_setup: 1370 * Create an aio_context capable of rece 1338 * Create an aio_context capable of receiving at least nr_events. 1371 * ctxp must not point to an aio_context 1339 * ctxp must not point to an aio_context that already exists, and 1372 * must be initialized to 0 prior to the 1340 * must be initialized to 0 prior to the call. On successful 1373 * creation of the aio_context, *ctxp is 1341 * creation of the aio_context, *ctxp is filled in with the resulting 1374 * handle. May fail with -EINVAL if *ct 1342 * handle. May fail with -EINVAL if *ctxp is not initialized, 1375 * if the specified nr_events exceeds in 1343 * if the specified nr_events exceeds internal limits. May fail 1376 * with -EAGAIN if the specified nr_even 1344 * with -EAGAIN if the specified nr_events exceeds the user's limit 1377 * of available events. May fail with - 1345 * of available events. May fail with -ENOMEM if insufficient kernel 1378 * resources are available. May fail wi 1346 * resources are available. May fail with -EFAULT if an invalid 1379 * pointer is passed for ctxp. Will fai 1347 * pointer is passed for ctxp. Will fail with -ENOSYS if not 1380 * implemented. 1348 * implemented. 1381 */ 1349 */ 1382 SYSCALL_DEFINE2(io_setup, unsigned, nr_events 1350 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) 1383 { 1351 { 1384 struct kioctx *ioctx = NULL; 1352 struct kioctx *ioctx = NULL; 1385 unsigned long ctx; 1353 unsigned long ctx; 1386 long ret; 1354 long ret; 1387 1355 1388 ret = get_user(ctx, ctxp); 1356 ret = get_user(ctx, ctxp); 1389 if (unlikely(ret)) 1357 if (unlikely(ret)) 1390 goto out; 1358 goto out; 1391 1359 1392 ret = -EINVAL; 1360 ret = -EINVAL; 1393 if (unlikely(ctx || nr_events == 0)) 1361 if (unlikely(ctx || nr_events == 0)) { 1394 pr_debug("EINVAL: ctx %lu nr_ 1362 pr_debug("EINVAL: ctx %lu nr_events %u\n", 1395 ctx, nr_events); 1363 ctx, nr_events); 1396 goto out; 1364 goto out; 1397 } 1365 } 1398 1366 1399 ioctx = ioctx_alloc(nr_events); 1367 ioctx = ioctx_alloc(nr_events); 1400 ret = PTR_ERR(ioctx); 1368 ret = PTR_ERR(ioctx); 1401 if (!IS_ERR(ioctx)) { 1369 if (!IS_ERR(ioctx)) { 1402 ret = put_user(ioctx->user_id 1370 ret = put_user(ioctx->user_id, ctxp); 1403 if (ret) 1371 if (ret) 1404 kill_ioctx(current->m 1372 kill_ioctx(current->mm, ioctx, NULL); 1405 percpu_ref_put(&ioctx->users) 1373 percpu_ref_put(&ioctx->users); 1406 } 1374 } 1407 1375 1408 out: 1376 out: 1409 return ret; 1377 return ret; 1410 } 1378 } 1411 1379 1412 #ifdef CONFIG_COMPAT 1380 #ifdef CONFIG_COMPAT 1413 COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr 1381 COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p) 1414 { 1382 { 1415 struct kioctx *ioctx = NULL; 1383 struct kioctx *ioctx = NULL; 1416 unsigned long ctx; 1384 unsigned long ctx; 1417 long ret; 1385 long ret; 1418 1386 1419 ret = get_user(ctx, ctx32p); 1387 ret = get_user(ctx, ctx32p); 1420 if (unlikely(ret)) 1388 if (unlikely(ret)) 1421 goto out; 1389 goto out; 1422 1390 1423 ret = -EINVAL; 1391 ret = -EINVAL; 1424 if (unlikely(ctx || nr_events == 0)) 1392 if (unlikely(ctx || nr_events == 0)) { 1425 pr_debug("EINVAL: ctx %lu nr_ 1393 pr_debug("EINVAL: ctx %lu nr_events %u\n", 1426 ctx, nr_events); 1394 ctx, nr_events); 1427 goto out; 1395 goto out; 1428 } 1396 } 1429 1397 1430 ioctx = ioctx_alloc(nr_events); 1398 ioctx = ioctx_alloc(nr_events); 1431 ret = PTR_ERR(ioctx); 1399 ret = PTR_ERR(ioctx); 1432 if (!IS_ERR(ioctx)) { 1400 if (!IS_ERR(ioctx)) { 1433 /* truncating is ok because i 1401 /* truncating is ok because it's a user address */ 1434 ret = put_user((u32)ioctx->us 1402 ret = put_user((u32)ioctx->user_id, ctx32p); 1435 if (ret) 1403 if (ret) 1436 kill_ioctx(current->m 1404 kill_ioctx(current->mm, ioctx, NULL); 1437 percpu_ref_put(&ioctx->users) 1405 percpu_ref_put(&ioctx->users); 1438 } 1406 } 1439 1407 1440 out: 1408 out: 1441 return ret; 1409 return ret; 1442 } 1410 } 1443 #endif 1411 #endif 1444 1412 1445 /* sys_io_destroy: 1413 /* sys_io_destroy: 1446 * Destroy the aio_context specified. M 1414 * Destroy the aio_context specified. May cancel any outstanding 1447 * AIOs and block on completion. Will f 1415 * AIOs and block on completion. Will fail with -ENOSYS if not 1448 * implemented. May fail with -EINVAL i 1416 * implemented. May fail with -EINVAL if the context pointed to 1449 * is invalid. 1417 * is invalid. 1450 */ 1418 */ 1451 SYSCALL_DEFINE1(io_destroy, aio_context_t, ct 1419 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) 1452 { 1420 { 1453 struct kioctx *ioctx = lookup_ioctx(c 1421 struct kioctx *ioctx = lookup_ioctx(ctx); 1454 if (likely(NULL != ioctx)) { 1422 if (likely(NULL != ioctx)) { 1455 struct ctx_rq_wait wait; 1423 struct ctx_rq_wait wait; 1456 int ret; 1424 int ret; 1457 1425 1458 init_completion(&wait.comp); 1426 init_completion(&wait.comp); 1459 atomic_set(&wait.count, 1); 1427 atomic_set(&wait.count, 1); 1460 1428 1461 /* Pass requests_done to kill 1429 /* Pass requests_done to kill_ioctx() where it can be set 1462 * in a thread-safe way. If w 1430 * in a thread-safe way. If we try to set it here then we have 1463 * a race condition if two io 1431 * a race condition if two io_destroy() called simultaneously. 1464 */ 1432 */ 1465 ret = kill_ioctx(current->mm, 1433 ret = kill_ioctx(current->mm, ioctx, &wait); 1466 percpu_ref_put(&ioctx->users) 1434 percpu_ref_put(&ioctx->users); 1467 1435 1468 /* Wait until all IO for the 1436 /* Wait until all IO for the context are done. Otherwise kernel 1469 * keep using user-space buff 1437 * keep using user-space buffers even if user thinks the context 1470 * is destroyed. 1438 * is destroyed. 1471 */ 1439 */ 1472 if (!ret) 1440 if (!ret) 1473 wait_for_completion(& 1441 wait_for_completion(&wait.comp); 1474 1442 1475 return ret; 1443 return ret; 1476 } 1444 } 1477 pr_debug("EINVAL: invalid context id\ 1445 pr_debug("EINVAL: invalid context id\n"); 1478 return -EINVAL; 1446 return -EINVAL; 1479 } 1447 } 1480 1448 1481 static void aio_remove_iocb(struct aio_kiocb 1449 static void aio_remove_iocb(struct aio_kiocb *iocb) 1482 { 1450 { 1483 struct kioctx *ctx = iocb->ki_ctx; 1451 struct kioctx *ctx = iocb->ki_ctx; 1484 unsigned long flags; 1452 unsigned long flags; 1485 1453 1486 spin_lock_irqsave(&ctx->ctx_lock, fla 1454 spin_lock_irqsave(&ctx->ctx_lock, flags); 1487 list_del(&iocb->ki_list); 1455 list_del(&iocb->ki_list); 1488 spin_unlock_irqrestore(&ctx->ctx_lock 1456 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1489 } 1457 } 1490 1458 1491 static void aio_complete_rw(struct kiocb *kio 1459 static void aio_complete_rw(struct kiocb *kiocb, long res) 1492 { 1460 { 1493 struct aio_kiocb *iocb = container_of 1461 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw); 1494 1462 1495 if (!list_empty_careful(&iocb->ki_lis 1463 if (!list_empty_careful(&iocb->ki_list)) 1496 aio_remove_iocb(iocb); 1464 aio_remove_iocb(iocb); 1497 1465 1498 if (kiocb->ki_flags & IOCB_WRITE) { 1466 if (kiocb->ki_flags & IOCB_WRITE) { 1499 struct inode *inode = file_in 1467 struct inode *inode = file_inode(kiocb->ki_filp); 1500 1468 >> 1469 /* >> 1470 * Tell lockdep we inherited freeze protection from submission >> 1471 * thread. >> 1472 */ 1501 if (S_ISREG(inode->i_mode)) 1473 if (S_ISREG(inode->i_mode)) 1502 kiocb_end_write(kiocb !! 1474 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); >> 1475 file_end_write(kiocb->ki_filp); 1503 } 1476 } 1504 1477 1505 iocb->ki_res.res = res; 1478 iocb->ki_res.res = res; 1506 iocb->ki_res.res2 = 0; 1479 iocb->ki_res.res2 = 0; 1507 iocb_put(iocb); 1480 iocb_put(iocb); 1508 } 1481 } 1509 1482 1510 static int aio_prep_rw(struct kiocb *req, con !! 1483 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) 1511 { 1484 { 1512 int ret; 1485 int ret; 1513 1486 1514 req->ki_complete = aio_complete_rw; 1487 req->ki_complete = aio_complete_rw; 1515 req->private = NULL; 1488 req->private = NULL; 1516 req->ki_pos = iocb->aio_offset; 1489 req->ki_pos = iocb->aio_offset; 1517 req->ki_flags = req->ki_filp->f_iocb_ 1490 req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW; 1518 if (iocb->aio_flags & IOCB_FLAG_RESFD 1491 if (iocb->aio_flags & IOCB_FLAG_RESFD) 1519 req->ki_flags |= IOCB_EVENTFD 1492 req->ki_flags |= IOCB_EVENTFD; 1520 if (iocb->aio_flags & IOCB_FLAG_IOPRI 1493 if (iocb->aio_flags & IOCB_FLAG_IOPRIO) { 1521 /* 1494 /* 1522 * If the IOCB_FLAG_IOPRIO fl 1495 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then 1523 * aio_reqprio is interpreted 1496 * aio_reqprio is interpreted as an I/O scheduling 1524 * class and priority. 1497 * class and priority. 1525 */ 1498 */ 1526 ret = ioprio_check_cap(iocb-> 1499 ret = ioprio_check_cap(iocb->aio_reqprio); 1527 if (ret) { 1500 if (ret) { 1528 pr_debug("aio ioprio 1501 pr_debug("aio ioprio check cap error: %d\n", ret); 1529 return ret; 1502 return ret; 1530 } 1503 } 1531 1504 1532 req->ki_ioprio = iocb->aio_re 1505 req->ki_ioprio = iocb->aio_reqprio; 1533 } else 1506 } else 1534 req->ki_ioprio = get_current_ 1507 req->ki_ioprio = get_current_ioprio(); 1535 1508 1536 ret = kiocb_set_rw_flags(req, iocb->a !! 1509 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags); 1537 if (unlikely(ret)) 1510 if (unlikely(ret)) 1538 return ret; 1511 return ret; 1539 1512 1540 req->ki_flags &= ~IOCB_HIPRI; /* no o 1513 req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */ 1541 return 0; 1514 return 0; 1542 } 1515 } 1543 1516 1544 static ssize_t aio_setup_rw(int rw, const str 1517 static ssize_t aio_setup_rw(int rw, const struct iocb *iocb, 1545 struct iovec **iovec, bool ve 1518 struct iovec **iovec, bool vectored, bool compat, 1546 struct iov_iter *iter) 1519 struct iov_iter *iter) 1547 { 1520 { 1548 void __user *buf = (void __user *)(ui 1521 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; 1549 size_t len = iocb->aio_nbytes; 1522 size_t len = iocb->aio_nbytes; 1550 1523 1551 if (!vectored) { 1524 if (!vectored) { 1552 ssize_t ret = import_ubuf(rw, !! 1525 ssize_t ret = import_single_range(rw, buf, len, *iovec, iter); 1553 *iovec = NULL; 1526 *iovec = NULL; 1554 return ret; 1527 return ret; 1555 } 1528 } 1556 1529 1557 return __import_iovec(rw, buf, len, U 1530 return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat); 1558 } 1531 } 1559 1532 1560 static inline void aio_rw_done(struct kiocb * 1533 static inline void aio_rw_done(struct kiocb *req, ssize_t ret) 1561 { 1534 { 1562 switch (ret) { 1535 switch (ret) { 1563 case -EIOCBQUEUED: 1536 case -EIOCBQUEUED: 1564 break; 1537 break; 1565 case -ERESTARTSYS: 1538 case -ERESTARTSYS: 1566 case -ERESTARTNOINTR: 1539 case -ERESTARTNOINTR: 1567 case -ERESTARTNOHAND: 1540 case -ERESTARTNOHAND: 1568 case -ERESTART_RESTARTBLOCK: 1541 case -ERESTART_RESTARTBLOCK: 1569 /* 1542 /* 1570 * There's no easy way to res 1543 * There's no easy way to restart the syscall since other AIO's 1571 * may be already running. Ju 1544 * may be already running. Just fail this IO with EINTR. 1572 */ 1545 */ 1573 ret = -EINTR; 1546 ret = -EINTR; 1574 fallthrough; 1547 fallthrough; 1575 default: 1548 default: 1576 req->ki_complete(req, ret); 1549 req->ki_complete(req, ret); 1577 } 1550 } 1578 } 1551 } 1579 1552 1580 static int aio_read(struct kiocb *req, const 1553 static int aio_read(struct kiocb *req, const struct iocb *iocb, 1581 bool vectored, bool c 1554 bool vectored, bool compat) 1582 { 1555 { 1583 struct iovec inline_vecs[UIO_FASTIOV] 1556 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1584 struct iov_iter iter; 1557 struct iov_iter iter; 1585 struct file *file; 1558 struct file *file; 1586 int ret; 1559 int ret; 1587 1560 1588 ret = aio_prep_rw(req, iocb, READ); !! 1561 ret = aio_prep_rw(req, iocb); 1589 if (ret) 1562 if (ret) 1590 return ret; 1563 return ret; 1591 file = req->ki_filp; 1564 file = req->ki_filp; 1592 if (unlikely(!(file->f_mode & FMODE_R 1565 if (unlikely(!(file->f_mode & FMODE_READ))) 1593 return -EBADF; 1566 return -EBADF; 1594 if (unlikely(!file->f_op->read_iter)) 1567 if (unlikely(!file->f_op->read_iter)) 1595 return -EINVAL; 1568 return -EINVAL; 1596 1569 1597 ret = aio_setup_rw(ITER_DEST, iocb, & 1570 ret = aio_setup_rw(ITER_DEST, iocb, &iovec, vectored, compat, &iter); 1598 if (ret < 0) 1571 if (ret < 0) 1599 return ret; 1572 return ret; 1600 ret = rw_verify_area(READ, file, &req 1573 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); 1601 if (!ret) 1574 if (!ret) 1602 aio_rw_done(req, file->f_op-> !! 1575 aio_rw_done(req, call_read_iter(file, req, &iter)); 1603 kfree(iovec); 1576 kfree(iovec); 1604 return ret; 1577 return ret; 1605 } 1578 } 1606 1579 1607 static int aio_write(struct kiocb *req, const 1580 static int aio_write(struct kiocb *req, const struct iocb *iocb, 1608 bool vectored, bool 1581 bool vectored, bool compat) 1609 { 1582 { 1610 struct iovec inline_vecs[UIO_FASTIOV] 1583 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1611 struct iov_iter iter; 1584 struct iov_iter iter; 1612 struct file *file; 1585 struct file *file; 1613 int ret; 1586 int ret; 1614 1587 1615 ret = aio_prep_rw(req, iocb, WRITE); !! 1588 ret = aio_prep_rw(req, iocb); 1616 if (ret) 1589 if (ret) 1617 return ret; 1590 return ret; 1618 file = req->ki_filp; 1591 file = req->ki_filp; 1619 1592 1620 if (unlikely(!(file->f_mode & FMODE_W 1593 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1621 return -EBADF; 1594 return -EBADF; 1622 if (unlikely(!file->f_op->write_iter) 1595 if (unlikely(!file->f_op->write_iter)) 1623 return -EINVAL; 1596 return -EINVAL; 1624 1597 1625 ret = aio_setup_rw(ITER_SOURCE, iocb, 1598 ret = aio_setup_rw(ITER_SOURCE, iocb, &iovec, vectored, compat, &iter); 1626 if (ret < 0) 1599 if (ret < 0) 1627 return ret; 1600 return ret; 1628 ret = rw_verify_area(WRITE, file, &re 1601 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); 1629 if (!ret) { 1602 if (!ret) { 1630 if (S_ISREG(file_inode(file)- !! 1603 /* 1631 kiocb_start_write(req !! 1604 * Open-code file_start_write here to grab freeze protection, >> 1605 * which will be released by another thread in >> 1606 * aio_complete_rw(). Fool lockdep by telling it the lock got >> 1607 * released so that it doesn't complain about the held lock when >> 1608 * we return to userspace. >> 1609 */ >> 1610 if (S_ISREG(file_inode(file)->i_mode)) { >> 1611 sb_start_write(file_inode(file)->i_sb); >> 1612 __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); >> 1613 } 1632 req->ki_flags |= IOCB_WRITE; 1614 req->ki_flags |= IOCB_WRITE; 1633 aio_rw_done(req, file->f_op-> !! 1615 aio_rw_done(req, call_write_iter(file, req, &iter)); 1634 } 1616 } 1635 kfree(iovec); 1617 kfree(iovec); 1636 return ret; 1618 return ret; 1637 } 1619 } 1638 1620 1639 static void aio_fsync_work(struct work_struct 1621 static void aio_fsync_work(struct work_struct *work) 1640 { 1622 { 1641 struct aio_kiocb *iocb = container_of 1623 struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); 1642 const struct cred *old_cred = overrid 1624 const struct cred *old_cred = override_creds(iocb->fsync.creds); 1643 1625 1644 iocb->ki_res.res = vfs_fsync(iocb->fs 1626 iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); 1645 revert_creds(old_cred); 1627 revert_creds(old_cred); 1646 put_cred(iocb->fsync.creds); 1628 put_cred(iocb->fsync.creds); 1647 iocb_put(iocb); 1629 iocb_put(iocb); 1648 } 1630 } 1649 1631 1650 static int aio_fsync(struct fsync_iocb *req, 1632 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, 1651 bool datasync) 1633 bool datasync) 1652 { 1634 { 1653 if (unlikely(iocb->aio_buf || iocb->a 1635 if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes || 1654 iocb->aio_rw_flags)) 1636 iocb->aio_rw_flags)) 1655 return -EINVAL; 1637 return -EINVAL; 1656 1638 1657 if (unlikely(!req->file->f_op->fsync) 1639 if (unlikely(!req->file->f_op->fsync)) 1658 return -EINVAL; 1640 return -EINVAL; 1659 1641 1660 req->creds = prepare_creds(); 1642 req->creds = prepare_creds(); 1661 if (!req->creds) 1643 if (!req->creds) 1662 return -ENOMEM; 1644 return -ENOMEM; 1663 1645 1664 req->datasync = datasync; 1646 req->datasync = datasync; 1665 INIT_WORK(&req->work, aio_fsync_work) 1647 INIT_WORK(&req->work, aio_fsync_work); 1666 schedule_work(&req->work); 1648 schedule_work(&req->work); 1667 return 0; 1649 return 0; 1668 } 1650 } 1669 1651 1670 static void aio_poll_put_work(struct work_str 1652 static void aio_poll_put_work(struct work_struct *work) 1671 { 1653 { 1672 struct poll_iocb *req = container_of( 1654 struct poll_iocb *req = container_of(work, struct poll_iocb, work); 1673 struct aio_kiocb *iocb = container_of 1655 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1674 1656 1675 iocb_put(iocb); 1657 iocb_put(iocb); 1676 } 1658 } 1677 1659 1678 /* 1660 /* 1679 * Safely lock the waitqueue which the reques 1661 * Safely lock the waitqueue which the request is on, synchronizing with the 1680 * case where the ->poll() provider decides t 1662 * case where the ->poll() provider decides to free its waitqueue early. 1681 * 1663 * 1682 * Returns true on success, meaning that req- 1664 * Returns true on success, meaning that req->head->lock was locked, req->wait 1683 * is on req->head, and an RCU read lock was 1665 * is on req->head, and an RCU read lock was taken. Returns false if the 1684 * request was already removed from its waitq 1666 * request was already removed from its waitqueue (which might no longer exist). 1685 */ 1667 */ 1686 static bool poll_iocb_lock_wq(struct poll_ioc 1668 static bool poll_iocb_lock_wq(struct poll_iocb *req) 1687 { 1669 { 1688 wait_queue_head_t *head; 1670 wait_queue_head_t *head; 1689 1671 1690 /* 1672 /* 1691 * While we hold the waitqueue lock a 1673 * While we hold the waitqueue lock and the waitqueue is nonempty, 1692 * wake_up_pollfree() will wait for u 1674 * wake_up_pollfree() will wait for us. However, taking the waitqueue 1693 * lock in the first place can race w 1675 * lock in the first place can race with the waitqueue being freed. 1694 * 1676 * 1695 * We solve this as eventpoll does: b 1677 * We solve this as eventpoll does: by taking advantage of the fact that 1696 * all users of wake_up_pollfree() wi 1678 * all users of wake_up_pollfree() will RCU-delay the actual free. If 1697 * we enter rcu_read_lock() and see t 1679 * we enter rcu_read_lock() and see that the pointer to the queue is 1698 * non-NULL, we can then lock it with 1680 * non-NULL, we can then lock it without the memory being freed out from 1699 * under us, then check whether the r 1681 * under us, then check whether the request is still on the queue. 1700 * 1682 * 1701 * Keep holding rcu_read_lock() as lo 1683 * Keep holding rcu_read_lock() as long as we hold the queue lock, in 1702 * case the caller deletes the entry 1684 * case the caller deletes the entry from the queue, leaving it empty. 1703 * In that case, only RCU prevents th 1685 * In that case, only RCU prevents the queue memory from being freed. 1704 */ 1686 */ 1705 rcu_read_lock(); 1687 rcu_read_lock(); 1706 head = smp_load_acquire(&req->head); 1688 head = smp_load_acquire(&req->head); 1707 if (head) { 1689 if (head) { 1708 spin_lock(&head->lock); 1690 spin_lock(&head->lock); 1709 if (!list_empty(&req->wait.en 1691 if (!list_empty(&req->wait.entry)) 1710 return true; 1692 return true; 1711 spin_unlock(&head->lock); 1693 spin_unlock(&head->lock); 1712 } 1694 } 1713 rcu_read_unlock(); 1695 rcu_read_unlock(); 1714 return false; 1696 return false; 1715 } 1697 } 1716 1698 1717 static void poll_iocb_unlock_wq(struct poll_i 1699 static void poll_iocb_unlock_wq(struct poll_iocb *req) 1718 { 1700 { 1719 spin_unlock(&req->head->lock); 1701 spin_unlock(&req->head->lock); 1720 rcu_read_unlock(); 1702 rcu_read_unlock(); 1721 } 1703 } 1722 1704 1723 static void aio_poll_complete_work(struct wor 1705 static void aio_poll_complete_work(struct work_struct *work) 1724 { 1706 { 1725 struct poll_iocb *req = container_of( 1707 struct poll_iocb *req = container_of(work, struct poll_iocb, work); 1726 struct aio_kiocb *iocb = container_of 1708 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1727 struct poll_table_struct pt = { ._key 1709 struct poll_table_struct pt = { ._key = req->events }; 1728 struct kioctx *ctx = iocb->ki_ctx; 1710 struct kioctx *ctx = iocb->ki_ctx; 1729 __poll_t mask = 0; 1711 __poll_t mask = 0; 1730 1712 1731 if (!READ_ONCE(req->cancelled)) 1713 if (!READ_ONCE(req->cancelled)) 1732 mask = vfs_poll(req->file, &p 1714 mask = vfs_poll(req->file, &pt) & req->events; 1733 1715 1734 /* 1716 /* 1735 * Note that ->ki_cancel callers also 1717 * Note that ->ki_cancel callers also delete iocb from active_reqs after 1736 * calling ->ki_cancel. We need the 1718 * calling ->ki_cancel. We need the ctx_lock roundtrip here to 1737 * synchronize with them. In the can 1719 * synchronize with them. In the cancellation case the list_del_init 1738 * itself is not actually needed, but 1720 * itself is not actually needed, but harmless so we keep it in to 1739 * avoid further branches in the fast 1721 * avoid further branches in the fast path. 1740 */ 1722 */ 1741 spin_lock_irq(&ctx->ctx_lock); 1723 spin_lock_irq(&ctx->ctx_lock); 1742 if (poll_iocb_lock_wq(req)) { 1724 if (poll_iocb_lock_wq(req)) { 1743 if (!mask && !READ_ONCE(req-> 1725 if (!mask && !READ_ONCE(req->cancelled)) { 1744 /* 1726 /* 1745 * The request isn't 1727 * The request isn't actually ready to be completed yet. 1746 * Reschedule complet 1728 * Reschedule completion if another wakeup came in. 1747 */ 1729 */ 1748 if (req->work_need_re 1730 if (req->work_need_resched) { 1749 schedule_work 1731 schedule_work(&req->work); 1750 req->work_nee 1732 req->work_need_resched = false; 1751 } else { 1733 } else { 1752 req->work_sch 1734 req->work_scheduled = false; 1753 } 1735 } 1754 poll_iocb_unlock_wq(r 1736 poll_iocb_unlock_wq(req); 1755 spin_unlock_irq(&ctx- 1737 spin_unlock_irq(&ctx->ctx_lock); 1756 return; 1738 return; 1757 } 1739 } 1758 list_del_init(&req->wait.entr 1740 list_del_init(&req->wait.entry); 1759 poll_iocb_unlock_wq(req); 1741 poll_iocb_unlock_wq(req); 1760 } /* else, POLLFREE has freed the wai 1742 } /* else, POLLFREE has freed the waitqueue, so we must complete */ 1761 list_del_init(&iocb->ki_list); 1743 list_del_init(&iocb->ki_list); 1762 iocb->ki_res.res = mangle_poll(mask); 1744 iocb->ki_res.res = mangle_poll(mask); 1763 spin_unlock_irq(&ctx->ctx_lock); 1745 spin_unlock_irq(&ctx->ctx_lock); 1764 1746 1765 iocb_put(iocb); 1747 iocb_put(iocb); 1766 } 1748 } 1767 1749 1768 /* assumes we are called with irqs disabled * 1750 /* assumes we are called with irqs disabled */ 1769 static int aio_poll_cancel(struct kiocb *iocb 1751 static int aio_poll_cancel(struct kiocb *iocb) 1770 { 1752 { 1771 struct aio_kiocb *aiocb = container_o 1753 struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw); 1772 struct poll_iocb *req = &aiocb->poll; 1754 struct poll_iocb *req = &aiocb->poll; 1773 1755 1774 if (poll_iocb_lock_wq(req)) { 1756 if (poll_iocb_lock_wq(req)) { 1775 WRITE_ONCE(req->cancelled, tr 1757 WRITE_ONCE(req->cancelled, true); 1776 if (!req->work_scheduled) { 1758 if (!req->work_scheduled) { 1777 schedule_work(&aiocb- 1759 schedule_work(&aiocb->poll.work); 1778 req->work_scheduled = 1760 req->work_scheduled = true; 1779 } 1761 } 1780 poll_iocb_unlock_wq(req); 1762 poll_iocb_unlock_wq(req); 1781 } /* else, the request was force-canc 1763 } /* else, the request was force-cancelled by POLLFREE already */ 1782 1764 1783 return 0; 1765 return 0; 1784 } 1766 } 1785 1767 1786 static int aio_poll_wake(struct wait_queue_en 1768 static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, 1787 void *key) 1769 void *key) 1788 { 1770 { 1789 struct poll_iocb *req = container_of( 1771 struct poll_iocb *req = container_of(wait, struct poll_iocb, wait); 1790 struct aio_kiocb *iocb = container_of 1772 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); 1791 __poll_t mask = key_to_poll(key); 1773 __poll_t mask = key_to_poll(key); 1792 unsigned long flags; 1774 unsigned long flags; 1793 1775 1794 /* for instances that support it chec 1776 /* for instances that support it check for an event match first: */ 1795 if (mask && !(mask & req->events)) 1777 if (mask && !(mask & req->events)) 1796 return 0; 1778 return 0; 1797 1779 1798 /* 1780 /* 1799 * Complete the request inline if pos 1781 * Complete the request inline if possible. This requires that three 1800 * conditions be met: 1782 * conditions be met: 1801 * 1. An event mask must have been 1783 * 1. An event mask must have been passed. If a plain wakeup was done 1802 * instead, then mask == 0 and w 1784 * instead, then mask == 0 and we have to call vfs_poll() to get 1803 * the events, so inline complet 1785 * the events, so inline completion isn't possible. 1804 * 2. The completion work must not 1786 * 2. The completion work must not have already been scheduled. 1805 * 3. ctx_lock must not be busy. W 1787 * 3. ctx_lock must not be busy. We have to use trylock because we 1806 * already hold the waitqueue lo 1788 * already hold the waitqueue lock, so this inverts the normal 1807 * locking order. Use irqsave/i 1789 * locking order. Use irqsave/irqrestore because not all 1808 * filesystems (e.g. fuse) call 1790 * filesystems (e.g. fuse) call this function with IRQs disabled, 1809 * yet IRQs have to be disabled 1791 * yet IRQs have to be disabled before ctx_lock is obtained. 1810 */ 1792 */ 1811 if (mask && !req->work_scheduled && 1793 if (mask && !req->work_scheduled && 1812 spin_trylock_irqsave(&iocb->ki_ct 1794 spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { 1813 struct kioctx *ctx = iocb->ki 1795 struct kioctx *ctx = iocb->ki_ctx; 1814 1796 1815 list_del_init(&req->wait.entr 1797 list_del_init(&req->wait.entry); 1816 list_del(&iocb->ki_list); 1798 list_del(&iocb->ki_list); 1817 iocb->ki_res.res = mangle_pol 1799 iocb->ki_res.res = mangle_poll(mask); 1818 if (iocb->ki_eventfd && !even 1800 if (iocb->ki_eventfd && !eventfd_signal_allowed()) { 1819 iocb = NULL; 1801 iocb = NULL; 1820 INIT_WORK(&req->work, 1802 INIT_WORK(&req->work, aio_poll_put_work); 1821 schedule_work(&req->w 1803 schedule_work(&req->work); 1822 } 1804 } 1823 spin_unlock_irqrestore(&ctx-> 1805 spin_unlock_irqrestore(&ctx->ctx_lock, flags); 1824 if (iocb) 1806 if (iocb) 1825 iocb_put(iocb); 1807 iocb_put(iocb); 1826 } else { 1808 } else { 1827 /* 1809 /* 1828 * Schedule the completion wo 1810 * Schedule the completion work if needed. If it was already 1829 * scheduled, record that ano 1811 * scheduled, record that another wakeup came in. 1830 * 1812 * 1831 * Don't remove the request f 1813 * Don't remove the request from the waitqueue here, as it might 1832 * not actually be complete y 1814 * not actually be complete yet (we won't know until vfs_poll() 1833 * is called), and we must no 1815 * is called), and we must not miss any wakeups. POLLFREE is an 1834 * exception to this; see bel 1816 * exception to this; see below. 1835 */ 1817 */ 1836 if (req->work_scheduled) { 1818 if (req->work_scheduled) { 1837 req->work_need_resche 1819 req->work_need_resched = true; 1838 } else { 1820 } else { 1839 schedule_work(&req->w 1821 schedule_work(&req->work); 1840 req->work_scheduled = 1822 req->work_scheduled = true; 1841 } 1823 } 1842 1824 1843 /* 1825 /* 1844 * If the waitqueue is being 1826 * If the waitqueue is being freed early but we can't complete 1845 * the request inline, we hav 1827 * the request inline, we have to tear down the request as best 1846 * we can. That means immedi 1828 * we can. That means immediately removing the request from its 1847 * waitqueue and preventing a 1829 * waitqueue and preventing all further accesses to the 1848 * waitqueue via the request. 1830 * waitqueue via the request. We also need to schedule the 1849 * completion work (done abov 1831 * completion work (done above). Also mark the request as 1850 * cancelled, to potentially 1832 * cancelled, to potentially skip an unneeded call to ->poll(). 1851 */ 1833 */ 1852 if (mask & POLLFREE) { 1834 if (mask & POLLFREE) { 1853 WRITE_ONCE(req->cance 1835 WRITE_ONCE(req->cancelled, true); 1854 list_del_init(&req->w 1836 list_del_init(&req->wait.entry); 1855 1837 1856 /* 1838 /* 1857 * Careful: this *mus 1839 * Careful: this *must* be the last step, since as soon 1858 * as req->head is NU 1840 * as req->head is NULL'ed out, the request can be 1859 * completed and free 1841 * completed and freed, since aio_poll_complete_work() 1860 * will no longer nee 1842 * will no longer need to take the waitqueue lock. 1861 */ 1843 */ 1862 smp_store_release(&re 1844 smp_store_release(&req->head, NULL); 1863 } 1845 } 1864 } 1846 } 1865 return 1; 1847 return 1; 1866 } 1848 } 1867 1849 1868 struct aio_poll_table { 1850 struct aio_poll_table { 1869 struct poll_table_struct pt; 1851 struct poll_table_struct pt; 1870 struct aio_kiocb *iocb 1852 struct aio_kiocb *iocb; 1871 bool queue 1853 bool queued; 1872 int error 1854 int error; 1873 }; 1855 }; 1874 1856 1875 static void 1857 static void 1876 aio_poll_queue_proc(struct file *file, struct 1858 aio_poll_queue_proc(struct file *file, struct wait_queue_head *head, 1877 struct poll_table_struct *p) 1859 struct poll_table_struct *p) 1878 { 1860 { 1879 struct aio_poll_table *pt = container 1861 struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt); 1880 1862 1881 /* multiple wait queues per file are 1863 /* multiple wait queues per file are not supported */ 1882 if (unlikely(pt->queued)) { 1864 if (unlikely(pt->queued)) { 1883 pt->error = -EINVAL; 1865 pt->error = -EINVAL; 1884 return; 1866 return; 1885 } 1867 } 1886 1868 1887 pt->queued = true; 1869 pt->queued = true; 1888 pt->error = 0; 1870 pt->error = 0; 1889 pt->iocb->poll.head = head; 1871 pt->iocb->poll.head = head; 1890 add_wait_queue(head, &pt->iocb->poll. 1872 add_wait_queue(head, &pt->iocb->poll.wait); 1891 } 1873 } 1892 1874 1893 static int aio_poll(struct aio_kiocb *aiocb, 1875 static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) 1894 { 1876 { 1895 struct kioctx *ctx = aiocb->ki_ctx; 1877 struct kioctx *ctx = aiocb->ki_ctx; 1896 struct poll_iocb *req = &aiocb->poll; 1878 struct poll_iocb *req = &aiocb->poll; 1897 struct aio_poll_table apt; 1879 struct aio_poll_table apt; 1898 bool cancel = false; 1880 bool cancel = false; 1899 __poll_t mask; 1881 __poll_t mask; 1900 1882 1901 /* reject any unknown events outside 1883 /* reject any unknown events outside the normal event mask. */ 1902 if ((u16)iocb->aio_buf != iocb->aio_b 1884 if ((u16)iocb->aio_buf != iocb->aio_buf) 1903 return -EINVAL; 1885 return -EINVAL; 1904 /* reject fields that are not defined 1886 /* reject fields that are not defined for poll */ 1905 if (iocb->aio_offset || iocb->aio_nby 1887 if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags) 1906 return -EINVAL; 1888 return -EINVAL; 1907 1889 1908 INIT_WORK(&req->work, aio_poll_comple 1890 INIT_WORK(&req->work, aio_poll_complete_work); 1909 req->events = demangle_poll(iocb->aio 1891 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; 1910 1892 1911 req->head = NULL; 1893 req->head = NULL; 1912 req->cancelled = false; 1894 req->cancelled = false; 1913 req->work_scheduled = false; 1895 req->work_scheduled = false; 1914 req->work_need_resched = false; 1896 req->work_need_resched = false; 1915 1897 1916 apt.pt._qproc = aio_poll_queue_proc; 1898 apt.pt._qproc = aio_poll_queue_proc; 1917 apt.pt._key = req->events; 1899 apt.pt._key = req->events; 1918 apt.iocb = aiocb; 1900 apt.iocb = aiocb; 1919 apt.queued = false; 1901 apt.queued = false; 1920 apt.error = -EINVAL; /* same as no su 1902 apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */ 1921 1903 1922 /* initialized the list so that we ca 1904 /* initialized the list so that we can do list_empty checks */ 1923 INIT_LIST_HEAD(&req->wait.entry); 1905 INIT_LIST_HEAD(&req->wait.entry); 1924 init_waitqueue_func_entry(&req->wait, 1906 init_waitqueue_func_entry(&req->wait, aio_poll_wake); 1925 1907 1926 mask = vfs_poll(req->file, &apt.pt) & 1908 mask = vfs_poll(req->file, &apt.pt) & req->events; 1927 spin_lock_irq(&ctx->ctx_lock); 1909 spin_lock_irq(&ctx->ctx_lock); 1928 if (likely(apt.queued)) { 1910 if (likely(apt.queued)) { 1929 bool on_queue = poll_iocb_loc 1911 bool on_queue = poll_iocb_lock_wq(req); 1930 1912 1931 if (!on_queue || req->work_sc 1913 if (!on_queue || req->work_scheduled) { 1932 /* 1914 /* 1933 * aio_poll_wake() al 1915 * aio_poll_wake() already either scheduled the async 1934 * completion work, o 1916 * completion work, or completed the request inline. 1935 */ 1917 */ 1936 if (apt.error) /* uns 1918 if (apt.error) /* unsupported case: multiple queues */ 1937 cancel = true 1919 cancel = true; 1938 apt.error = 0; 1920 apt.error = 0; 1939 mask = 0; 1921 mask = 0; 1940 } 1922 } 1941 if (mask || apt.error) { 1923 if (mask || apt.error) { 1942 /* Steal to complete 1924 /* Steal to complete synchronously. */ 1943 list_del_init(&req->w 1925 list_del_init(&req->wait.entry); 1944 } else if (cancel) { 1926 } else if (cancel) { 1945 /* Cancel if possible 1927 /* Cancel if possible (may be too late though). */ 1946 WRITE_ONCE(req->cance 1928 WRITE_ONCE(req->cancelled, true); 1947 } else if (on_queue) { 1929 } else if (on_queue) { 1948 /* 1930 /* 1949 * Actually waiting f 1931 * Actually waiting for an event, so add the request to 1950 * active_reqs so tha 1932 * active_reqs so that it can be cancelled if needed. 1951 */ 1933 */ 1952 list_add_tail(&aiocb- 1934 list_add_tail(&aiocb->ki_list, &ctx->active_reqs); 1953 aiocb->ki_cancel = ai 1935 aiocb->ki_cancel = aio_poll_cancel; 1954 } 1936 } 1955 if (on_queue) 1937 if (on_queue) 1956 poll_iocb_unlock_wq(r 1938 poll_iocb_unlock_wq(req); 1957 } 1939 } 1958 if (mask) { /* no async, we'd stolen 1940 if (mask) { /* no async, we'd stolen it */ 1959 aiocb->ki_res.res = mangle_po 1941 aiocb->ki_res.res = mangle_poll(mask); 1960 apt.error = 0; 1942 apt.error = 0; 1961 } 1943 } 1962 spin_unlock_irq(&ctx->ctx_lock); 1944 spin_unlock_irq(&ctx->ctx_lock); 1963 if (mask) 1945 if (mask) 1964 iocb_put(aiocb); 1946 iocb_put(aiocb); 1965 return apt.error; 1947 return apt.error; 1966 } 1948 } 1967 1949 1968 static int __io_submit_one(struct kioctx *ctx 1950 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, 1969 struct iocb __user 1951 struct iocb __user *user_iocb, struct aio_kiocb *req, 1970 bool compat) 1952 bool compat) 1971 { 1953 { 1972 req->ki_filp = fget(iocb->aio_fildes) 1954 req->ki_filp = fget(iocb->aio_fildes); 1973 if (unlikely(!req->ki_filp)) 1955 if (unlikely(!req->ki_filp)) 1974 return -EBADF; 1956 return -EBADF; 1975 1957 1976 if (iocb->aio_flags & IOCB_FLAG_RESFD 1958 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1977 struct eventfd_ctx *eventfd; 1959 struct eventfd_ctx *eventfd; 1978 /* 1960 /* 1979 * If the IOCB_FLAG_RESFD fla 1961 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1980 * instance of the file* now. 1962 * instance of the file* now. The file descriptor must be 1981 * an eventfd() fd, and will 1963 * an eventfd() fd, and will be signaled for each completed 1982 * event using the eventfd_si 1964 * event using the eventfd_signal() function. 1983 */ 1965 */ 1984 eventfd = eventfd_ctx_fdget(i 1966 eventfd = eventfd_ctx_fdget(iocb->aio_resfd); 1985 if (IS_ERR(eventfd)) 1967 if (IS_ERR(eventfd)) 1986 return PTR_ERR(eventf 1968 return PTR_ERR(eventfd); 1987 1969 1988 req->ki_eventfd = eventfd; 1970 req->ki_eventfd = eventfd; 1989 } 1971 } 1990 1972 1991 if (unlikely(put_user(KIOCB_KEY, &use 1973 if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) { 1992 pr_debug("EFAULT: aio_key\n") 1974 pr_debug("EFAULT: aio_key\n"); 1993 return -EFAULT; 1975 return -EFAULT; 1994 } 1976 } 1995 1977 1996 req->ki_res.obj = (u64)(unsigned long 1978 req->ki_res.obj = (u64)(unsigned long)user_iocb; 1997 req->ki_res.data = iocb->aio_data; 1979 req->ki_res.data = iocb->aio_data; 1998 req->ki_res.res = 0; 1980 req->ki_res.res = 0; 1999 req->ki_res.res2 = 0; 1981 req->ki_res.res2 = 0; 2000 1982 2001 switch (iocb->aio_lio_opcode) { 1983 switch (iocb->aio_lio_opcode) { 2002 case IOCB_CMD_PREAD: 1984 case IOCB_CMD_PREAD: 2003 return aio_read(&req->rw, ioc 1985 return aio_read(&req->rw, iocb, false, compat); 2004 case IOCB_CMD_PWRITE: 1986 case IOCB_CMD_PWRITE: 2005 return aio_write(&req->rw, io 1987 return aio_write(&req->rw, iocb, false, compat); 2006 case IOCB_CMD_PREADV: 1988 case IOCB_CMD_PREADV: 2007 return aio_read(&req->rw, ioc 1989 return aio_read(&req->rw, iocb, true, compat); 2008 case IOCB_CMD_PWRITEV: 1990 case IOCB_CMD_PWRITEV: 2009 return aio_write(&req->rw, io 1991 return aio_write(&req->rw, iocb, true, compat); 2010 case IOCB_CMD_FSYNC: 1992 case IOCB_CMD_FSYNC: 2011 return aio_fsync(&req->fsync, 1993 return aio_fsync(&req->fsync, iocb, false); 2012 case IOCB_CMD_FDSYNC: 1994 case IOCB_CMD_FDSYNC: 2013 return aio_fsync(&req->fsync, 1995 return aio_fsync(&req->fsync, iocb, true); 2014 case IOCB_CMD_POLL: 1996 case IOCB_CMD_POLL: 2015 return aio_poll(req, iocb); 1997 return aio_poll(req, iocb); 2016 default: 1998 default: 2017 pr_debug("invalid aio operati 1999 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); 2018 return -EINVAL; 2000 return -EINVAL; 2019 } 2001 } 2020 } 2002 } 2021 2003 2022 static int io_submit_one(struct kioctx *ctx, 2004 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 2023 bool compat) 2005 bool compat) 2024 { 2006 { 2025 struct aio_kiocb *req; 2007 struct aio_kiocb *req; 2026 struct iocb iocb; 2008 struct iocb iocb; 2027 int err; 2009 int err; 2028 2010 2029 if (unlikely(copy_from_user(&iocb, us 2011 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) 2030 return -EFAULT; 2012 return -EFAULT; 2031 2013 2032 /* enforce forwards compatibility on 2014 /* enforce forwards compatibility on users */ 2033 if (unlikely(iocb.aio_reserved2)) { 2015 if (unlikely(iocb.aio_reserved2)) { 2034 pr_debug("EINVAL: reserve fie 2016 pr_debug("EINVAL: reserve field set\n"); 2035 return -EINVAL; 2017 return -EINVAL; 2036 } 2018 } 2037 2019 2038 /* prevent overflows */ 2020 /* prevent overflows */ 2039 if (unlikely( 2021 if (unlikely( 2040 (iocb.aio_buf != (unsigned long)i 2022 (iocb.aio_buf != (unsigned long)iocb.aio_buf) || 2041 (iocb.aio_nbytes != (size_t)iocb. 2023 (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) || 2042 ((ssize_t)iocb.aio_nbytes < 0) 2024 ((ssize_t)iocb.aio_nbytes < 0) 2043 )) { 2025 )) { 2044 pr_debug("EINVAL: overflow ch 2026 pr_debug("EINVAL: overflow check\n"); 2045 return -EINVAL; 2027 return -EINVAL; 2046 } 2028 } 2047 2029 2048 req = aio_get_req(ctx); 2030 req = aio_get_req(ctx); 2049 if (unlikely(!req)) 2031 if (unlikely(!req)) 2050 return -EAGAIN; 2032 return -EAGAIN; 2051 2033 2052 err = __io_submit_one(ctx, &iocb, use 2034 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat); 2053 2035 2054 /* Done with the synchronous referenc 2036 /* Done with the synchronous reference */ 2055 iocb_put(req); 2037 iocb_put(req); 2056 2038 2057 /* 2039 /* 2058 * If err is 0, we'd either done aio_ 2040 * If err is 0, we'd either done aio_complete() ourselves or have 2059 * arranged for that to be done async 2041 * arranged for that to be done asynchronously. Anything non-zero 2060 * means that we need to destroy req 2042 * means that we need to destroy req ourselves. 2061 */ 2043 */ 2062 if (unlikely(err)) { 2044 if (unlikely(err)) { 2063 iocb_destroy(req); 2045 iocb_destroy(req); 2064 put_reqs_available(ctx, 1); 2046 put_reqs_available(ctx, 1); 2065 } 2047 } 2066 return err; 2048 return err; 2067 } 2049 } 2068 2050 2069 /* sys_io_submit: 2051 /* sys_io_submit: 2070 * Queue the nr iocbs pointed to by iocb 2052 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 2071 * the number of iocbs queued. May retu 2053 * the number of iocbs queued. May return -EINVAL if the aio_context 2072 * specified by ctx_id is invalid, if nr 2054 * specified by ctx_id is invalid, if nr is < 0, if the iocb at 2073 * *iocbpp[0] is not properly initialize 2055 * *iocbpp[0] is not properly initialized, if the operation specified 2074 * is invalid for the file descriptor in 2056 * is invalid for the file descriptor in the iocb. May fail with 2075 * -EFAULT if any of the data structures 2057 * -EFAULT if any of the data structures point to invalid data. May 2076 * fail with -EBADF if the file descript 2058 * fail with -EBADF if the file descriptor specified in the first 2077 * iocb is invalid. May fail with -EAGA 2059 * iocb is invalid. May fail with -EAGAIN if insufficient resources 2078 * are available to queue any iocbs. Wi 2060 * are available to queue any iocbs. Will return 0 if nr is 0. Will 2079 * fail with -ENOSYS if not implemented. 2061 * fail with -ENOSYS if not implemented. 2080 */ 2062 */ 2081 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx 2063 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, 2082 struct iocb __user * __user * 2064 struct iocb __user * __user *, iocbpp) 2083 { 2065 { 2084 struct kioctx *ctx; 2066 struct kioctx *ctx; 2085 long ret = 0; 2067 long ret = 0; 2086 int i = 0; 2068 int i = 0; 2087 struct blk_plug plug; 2069 struct blk_plug plug; 2088 2070 2089 if (unlikely(nr < 0)) 2071 if (unlikely(nr < 0)) 2090 return -EINVAL; 2072 return -EINVAL; 2091 2073 2092 ctx = lookup_ioctx(ctx_id); 2074 ctx = lookup_ioctx(ctx_id); 2093 if (unlikely(!ctx)) { 2075 if (unlikely(!ctx)) { 2094 pr_debug("EINVAL: invalid con 2076 pr_debug("EINVAL: invalid context id\n"); 2095 return -EINVAL; 2077 return -EINVAL; 2096 } 2078 } 2097 2079 2098 if (nr > ctx->nr_events) 2080 if (nr > ctx->nr_events) 2099 nr = ctx->nr_events; 2081 nr = ctx->nr_events; 2100 2082 2101 if (nr > AIO_PLUG_THRESHOLD) 2083 if (nr > AIO_PLUG_THRESHOLD) 2102 blk_start_plug(&plug); 2084 blk_start_plug(&plug); 2103 for (i = 0; i < nr; i++) { 2085 for (i = 0; i < nr; i++) { 2104 struct iocb __user *user_iocb 2086 struct iocb __user *user_iocb; 2105 2087 2106 if (unlikely(get_user(user_io 2088 if (unlikely(get_user(user_iocb, iocbpp + i))) { 2107 ret = -EFAULT; 2089 ret = -EFAULT; 2108 break; 2090 break; 2109 } 2091 } 2110 2092 2111 ret = io_submit_one(ctx, user 2093 ret = io_submit_one(ctx, user_iocb, false); 2112 if (ret) 2094 if (ret) 2113 break; 2095 break; 2114 } 2096 } 2115 if (nr > AIO_PLUG_THRESHOLD) 2097 if (nr > AIO_PLUG_THRESHOLD) 2116 blk_finish_plug(&plug); 2098 blk_finish_plug(&plug); 2117 2099 2118 percpu_ref_put(&ctx->users); 2100 percpu_ref_put(&ctx->users); 2119 return i ? i : ret; 2101 return i ? i : ret; 2120 } 2102 } 2121 2103 2122 #ifdef CONFIG_COMPAT 2104 #ifdef CONFIG_COMPAT 2123 COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_ 2105 COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id, 2124 int, nr, compat_uptr_t 2106 int, nr, compat_uptr_t __user *, iocbpp) 2125 { 2107 { 2126 struct kioctx *ctx; 2108 struct kioctx *ctx; 2127 long ret = 0; 2109 long ret = 0; 2128 int i = 0; 2110 int i = 0; 2129 struct blk_plug plug; 2111 struct blk_plug plug; 2130 2112 2131 if (unlikely(nr < 0)) 2113 if (unlikely(nr < 0)) 2132 return -EINVAL; 2114 return -EINVAL; 2133 2115 2134 ctx = lookup_ioctx(ctx_id); 2116 ctx = lookup_ioctx(ctx_id); 2135 if (unlikely(!ctx)) { 2117 if (unlikely(!ctx)) { 2136 pr_debug("EINVAL: invalid con 2118 pr_debug("EINVAL: invalid context id\n"); 2137 return -EINVAL; 2119 return -EINVAL; 2138 } 2120 } 2139 2121 2140 if (nr > ctx->nr_events) 2122 if (nr > ctx->nr_events) 2141 nr = ctx->nr_events; 2123 nr = ctx->nr_events; 2142 2124 2143 if (nr > AIO_PLUG_THRESHOLD) 2125 if (nr > AIO_PLUG_THRESHOLD) 2144 blk_start_plug(&plug); 2126 blk_start_plug(&plug); 2145 for (i = 0; i < nr; i++) { 2127 for (i = 0; i < nr; i++) { 2146 compat_uptr_t user_iocb; 2128 compat_uptr_t user_iocb; 2147 2129 2148 if (unlikely(get_user(user_io 2130 if (unlikely(get_user(user_iocb, iocbpp + i))) { 2149 ret = -EFAULT; 2131 ret = -EFAULT; 2150 break; 2132 break; 2151 } 2133 } 2152 2134 2153 ret = io_submit_one(ctx, comp 2135 ret = io_submit_one(ctx, compat_ptr(user_iocb), true); 2154 if (ret) 2136 if (ret) 2155 break; 2137 break; 2156 } 2138 } 2157 if (nr > AIO_PLUG_THRESHOLD) 2139 if (nr > AIO_PLUG_THRESHOLD) 2158 blk_finish_plug(&plug); 2140 blk_finish_plug(&plug); 2159 2141 2160 percpu_ref_put(&ctx->users); 2142 percpu_ref_put(&ctx->users); 2161 return i ? i : ret; 2143 return i ? i : ret; 2162 } 2144 } 2163 #endif 2145 #endif 2164 2146 2165 /* sys_io_cancel: 2147 /* sys_io_cancel: 2166 * Attempts to cancel an iocb previously 2148 * Attempts to cancel an iocb previously passed to io_submit. If 2167 * the operation is successfully cancell 2149 * the operation is successfully cancelled, the resulting event is 2168 * copied into the memory pointed to by 2150 * copied into the memory pointed to by result without being placed 2169 * into the completion queue and 0 is re 2151 * into the completion queue and 0 is returned. May fail with 2170 * -EFAULT if any of the data structures 2152 * -EFAULT if any of the data structures pointed to are invalid. 2171 * May fail with -EINVAL if aio_context 2153 * May fail with -EINVAL if aio_context specified by ctx_id is 2172 * invalid. May fail with -EAGAIN if th 2154 * invalid. May fail with -EAGAIN if the iocb specified was not 2173 * cancelled. Will fail with -ENOSYS if 2155 * cancelled. Will fail with -ENOSYS if not implemented. 2174 */ 2156 */ 2175 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx 2157 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, 2176 struct io_event __user *, res 2158 struct io_event __user *, result) 2177 { 2159 { 2178 struct kioctx *ctx; 2160 struct kioctx *ctx; 2179 struct aio_kiocb *kiocb; 2161 struct aio_kiocb *kiocb; 2180 int ret = -EINVAL; 2162 int ret = -EINVAL; 2181 u32 key; 2163 u32 key; 2182 u64 obj = (u64)(unsigned long)iocb; 2164 u64 obj = (u64)(unsigned long)iocb; 2183 2165 2184 if (unlikely(get_user(key, &iocb->aio 2166 if (unlikely(get_user(key, &iocb->aio_key))) 2185 return -EFAULT; 2167 return -EFAULT; 2186 if (unlikely(key != KIOCB_KEY)) 2168 if (unlikely(key != KIOCB_KEY)) 2187 return -EINVAL; 2169 return -EINVAL; 2188 2170 2189 ctx = lookup_ioctx(ctx_id); 2171 ctx = lookup_ioctx(ctx_id); 2190 if (unlikely(!ctx)) 2172 if (unlikely(!ctx)) 2191 return -EINVAL; 2173 return -EINVAL; 2192 2174 2193 spin_lock_irq(&ctx->ctx_lock); 2175 spin_lock_irq(&ctx->ctx_lock); 2194 /* TODO: use a hash or array, this su 2176 /* TODO: use a hash or array, this sucks. */ 2195 list_for_each_entry(kiocb, &ctx->acti 2177 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { 2196 if (kiocb->ki_res.obj == obj) 2178 if (kiocb->ki_res.obj == obj) { 2197 ret = kiocb->ki_cance 2179 ret = kiocb->ki_cancel(&kiocb->rw); 2198 list_del_init(&kiocb- 2180 list_del_init(&kiocb->ki_list); 2199 break; 2181 break; 2200 } 2182 } 2201 } 2183 } 2202 spin_unlock_irq(&ctx->ctx_lock); 2184 spin_unlock_irq(&ctx->ctx_lock); 2203 2185 2204 if (!ret) { 2186 if (!ret) { 2205 /* 2187 /* 2206 * The result argument is no 2188 * The result argument is no longer used - the io_event is 2207 * always delivered via the r 2189 * always delivered via the ring buffer. -EINPROGRESS indicates 2208 * cancellation is progress: 2190 * cancellation is progress: 2209 */ 2191 */ 2210 ret = -EINPROGRESS; 2192 ret = -EINPROGRESS; 2211 } 2193 } 2212 2194 2213 percpu_ref_put(&ctx->users); 2195 percpu_ref_put(&ctx->users); 2214 2196 2215 return ret; 2197 return ret; 2216 } 2198 } 2217 2199 2218 static long do_io_getevents(aio_context_t ctx 2200 static long do_io_getevents(aio_context_t ctx_id, 2219 long min_nr, 2201 long min_nr, 2220 long nr, 2202 long nr, 2221 struct io_event __user *event 2203 struct io_event __user *events, 2222 struct timespec64 *ts) 2204 struct timespec64 *ts) 2223 { 2205 { 2224 ktime_t until = ts ? timespec64_to_kt 2206 ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX; 2225 struct kioctx *ioctx = lookup_ioctx(c 2207 struct kioctx *ioctx = lookup_ioctx(ctx_id); 2226 long ret = -EINVAL; 2208 long ret = -EINVAL; 2227 2209 2228 if (likely(ioctx)) { 2210 if (likely(ioctx)) { 2229 if (likely(min_nr <= nr && mi 2211 if (likely(min_nr <= nr && min_nr >= 0)) 2230 ret = read_events(ioc 2212 ret = read_events(ioctx, min_nr, nr, events, until); 2231 percpu_ref_put(&ioctx->users) 2213 percpu_ref_put(&ioctx->users); 2232 } 2214 } 2233 2215 2234 return ret; 2216 return ret; 2235 } 2217 } 2236 2218 2237 /* io_getevents: 2219 /* io_getevents: 2238 * Attempts to read at least min_nr even 2220 * Attempts to read at least min_nr events and up to nr events from 2239 * the completion queue for the aio_cont 2221 * the completion queue for the aio_context specified by ctx_id. If 2240 * it succeeds, the number of read event 2222 * it succeeds, the number of read events is returned. May fail with 2241 * -EINVAL if ctx_id is invalid, if min_ 2223 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is 2242 * out of range, if timeout is out of ra 2224 * out of range, if timeout is out of range. May fail with -EFAULT 2243 * if any of the memory specified is inv 2225 * if any of the memory specified is invalid. May return 0 or 2244 * < min_nr if the timeout specified by 2226 * < min_nr if the timeout specified by timeout has elapsed 2245 * before sufficient events are availabl 2227 * before sufficient events are available, where timeout == NULL 2246 * specifies an infinite timeout. Note t 2228 * specifies an infinite timeout. Note that the timeout pointed to by 2247 * timeout is relative. Will fail with 2229 * timeout is relative. Will fail with -ENOSYS if not implemented. 2248 */ 2230 */ 2249 #ifdef CONFIG_64BIT 2231 #ifdef CONFIG_64BIT 2250 2232 2251 SYSCALL_DEFINE5(io_getevents, aio_context_t, 2233 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 2252 long, min_nr, 2234 long, min_nr, 2253 long, nr, 2235 long, nr, 2254 struct io_event __user *, eve 2236 struct io_event __user *, events, 2255 struct __kernel_timespec __us 2237 struct __kernel_timespec __user *, timeout) 2256 { 2238 { 2257 struct timespec64 ts; 2239 struct timespec64 ts; 2258 int ret; 2240 int ret; 2259 2241 2260 if (timeout && unlikely(get_timespec6 2242 if (timeout && unlikely(get_timespec64(&ts, timeout))) 2261 return -EFAULT; 2243 return -EFAULT; 2262 2244 2263 ret = do_io_getevents(ctx_id, min_nr, 2245 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2264 if (!ret && signal_pending(current)) 2246 if (!ret && signal_pending(current)) 2265 ret = -EINTR; 2247 ret = -EINTR; 2266 return ret; 2248 return ret; 2267 } 2249 } 2268 2250 2269 #endif 2251 #endif 2270 2252 2271 struct __aio_sigset { 2253 struct __aio_sigset { 2272 const sigset_t __user *sigmask; 2254 const sigset_t __user *sigmask; 2273 size_t sigsetsize; 2255 size_t sigsetsize; 2274 }; 2256 }; 2275 2257 2276 SYSCALL_DEFINE6(io_pgetevents, 2258 SYSCALL_DEFINE6(io_pgetevents, 2277 aio_context_t, ctx_id, 2259 aio_context_t, ctx_id, 2278 long, min_nr, 2260 long, min_nr, 2279 long, nr, 2261 long, nr, 2280 struct io_event __user *, eve 2262 struct io_event __user *, events, 2281 struct __kernel_timespec __us 2263 struct __kernel_timespec __user *, timeout, 2282 const struct __aio_sigset __u 2264 const struct __aio_sigset __user *, usig) 2283 { 2265 { 2284 struct __aio_sigset ksig = { NULL 2266 struct __aio_sigset ksig = { NULL, }; 2285 struct timespec64 ts; 2267 struct timespec64 ts; 2286 bool interrupted; 2268 bool interrupted; 2287 int ret; 2269 int ret; 2288 2270 2289 if (timeout && unlikely(get_timespec6 2271 if (timeout && unlikely(get_timespec64(&ts, timeout))) 2290 return -EFAULT; 2272 return -EFAULT; 2291 2273 2292 if (usig && copy_from_user(&ksig, usi 2274 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2293 return -EFAULT; 2275 return -EFAULT; 2294 2276 2295 ret = set_user_sigmask(ksig.sigmask, 2277 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize); 2296 if (ret) 2278 if (ret) 2297 return ret; 2279 return ret; 2298 2280 2299 ret = do_io_getevents(ctx_id, min_nr, 2281 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2300 2282 2301 interrupted = signal_pending(current) 2283 interrupted = signal_pending(current); 2302 restore_saved_sigmask_unless(interrup 2284 restore_saved_sigmask_unless(interrupted); 2303 if (interrupted && !ret) 2285 if (interrupted && !ret) 2304 ret = -ERESTARTNOHAND; 2286 ret = -ERESTARTNOHAND; 2305 2287 2306 return ret; 2288 return ret; 2307 } 2289 } 2308 2290 2309 #if defined(CONFIG_COMPAT_32BIT_TIME) && !def 2291 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT) 2310 2292 2311 SYSCALL_DEFINE6(io_pgetevents_time32, 2293 SYSCALL_DEFINE6(io_pgetevents_time32, 2312 aio_context_t, ctx_id, 2294 aio_context_t, ctx_id, 2313 long, min_nr, 2295 long, min_nr, 2314 long, nr, 2296 long, nr, 2315 struct io_event __user *, eve 2297 struct io_event __user *, events, 2316 struct old_timespec32 __user 2298 struct old_timespec32 __user *, timeout, 2317 const struct __aio_sigset __u 2299 const struct __aio_sigset __user *, usig) 2318 { 2300 { 2319 struct __aio_sigset ksig = { NULL 2301 struct __aio_sigset ksig = { NULL, }; 2320 struct timespec64 ts; 2302 struct timespec64 ts; 2321 bool interrupted; 2303 bool interrupted; 2322 int ret; 2304 int ret; 2323 2305 2324 if (timeout && unlikely(get_old_times 2306 if (timeout && unlikely(get_old_timespec32(&ts, timeout))) 2325 return -EFAULT; 2307 return -EFAULT; 2326 2308 2327 if (usig && copy_from_user(&ksig, usi 2309 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2328 return -EFAULT; 2310 return -EFAULT; 2329 2311 2330 2312 2331 ret = set_user_sigmask(ksig.sigmask, 2313 ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize); 2332 if (ret) 2314 if (ret) 2333 return ret; 2315 return ret; 2334 2316 2335 ret = do_io_getevents(ctx_id, min_nr, 2317 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL); 2336 2318 2337 interrupted = signal_pending(current) 2319 interrupted = signal_pending(current); 2338 restore_saved_sigmask_unless(interrup 2320 restore_saved_sigmask_unless(interrupted); 2339 if (interrupted && !ret) 2321 if (interrupted && !ret) 2340 ret = -ERESTARTNOHAND; 2322 ret = -ERESTARTNOHAND; 2341 2323 2342 return ret; 2324 return ret; 2343 } 2325 } 2344 2326 2345 #endif 2327 #endif 2346 2328 2347 #if defined(CONFIG_COMPAT_32BIT_TIME) 2329 #if defined(CONFIG_COMPAT_32BIT_TIME) 2348 2330 2349 SYSCALL_DEFINE5(io_getevents_time32, __u32, c 2331 SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id, 2350 __s32, min_nr, 2332 __s32, min_nr, 2351 __s32, nr, 2333 __s32, nr, 2352 struct io_event __user *, eve 2334 struct io_event __user *, events, 2353 struct old_timespec32 __user 2335 struct old_timespec32 __user *, timeout) 2354 { 2336 { 2355 struct timespec64 t; 2337 struct timespec64 t; 2356 int ret; 2338 int ret; 2357 2339 2358 if (timeout && get_old_timespec32(&t, 2340 if (timeout && get_old_timespec32(&t, timeout)) 2359 return -EFAULT; 2341 return -EFAULT; 2360 2342 2361 ret = do_io_getevents(ctx_id, min_nr, 2343 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2362 if (!ret && signal_pending(current)) 2344 if (!ret && signal_pending(current)) 2363 ret = -EINTR; 2345 ret = -EINTR; 2364 return ret; 2346 return ret; 2365 } 2347 } 2366 2348 2367 #endif 2349 #endif 2368 2350 2369 #ifdef CONFIG_COMPAT 2351 #ifdef CONFIG_COMPAT 2370 2352 2371 struct __compat_aio_sigset { 2353 struct __compat_aio_sigset { 2372 compat_uptr_t sigmask; 2354 compat_uptr_t sigmask; 2373 compat_size_t sigsetsize; 2355 compat_size_t sigsetsize; 2374 }; 2356 }; 2375 2357 2376 #if defined(CONFIG_COMPAT_32BIT_TIME) 2358 #if defined(CONFIG_COMPAT_32BIT_TIME) 2377 2359 2378 COMPAT_SYSCALL_DEFINE6(io_pgetevents, 2360 COMPAT_SYSCALL_DEFINE6(io_pgetevents, 2379 compat_aio_context_t, ctx_id, 2361 compat_aio_context_t, ctx_id, 2380 compat_long_t, min_nr, 2362 compat_long_t, min_nr, 2381 compat_long_t, nr, 2363 compat_long_t, nr, 2382 struct io_event __user *, eve 2364 struct io_event __user *, events, 2383 struct old_timespec32 __user 2365 struct old_timespec32 __user *, timeout, 2384 const struct __compat_aio_sig 2366 const struct __compat_aio_sigset __user *, usig) 2385 { 2367 { 2386 struct __compat_aio_sigset ksig = { 0 2368 struct __compat_aio_sigset ksig = { 0, }; 2387 struct timespec64 t; 2369 struct timespec64 t; 2388 bool interrupted; 2370 bool interrupted; 2389 int ret; 2371 int ret; 2390 2372 2391 if (timeout && get_old_timespec32(&t, 2373 if (timeout && get_old_timespec32(&t, timeout)) 2392 return -EFAULT; 2374 return -EFAULT; 2393 2375 2394 if (usig && copy_from_user(&ksig, usi 2376 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2395 return -EFAULT; 2377 return -EFAULT; 2396 2378 2397 ret = set_compat_user_sigmask(compat_ 2379 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize); 2398 if (ret) 2380 if (ret) 2399 return ret; 2381 return ret; 2400 2382 2401 ret = do_io_getevents(ctx_id, min_nr, 2383 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2402 2384 2403 interrupted = signal_pending(current) 2385 interrupted = signal_pending(current); 2404 restore_saved_sigmask_unless(interrup 2386 restore_saved_sigmask_unless(interrupted); 2405 if (interrupted && !ret) 2387 if (interrupted && !ret) 2406 ret = -ERESTARTNOHAND; 2388 ret = -ERESTARTNOHAND; 2407 2389 2408 return ret; 2390 return ret; 2409 } 2391 } 2410 2392 2411 #endif 2393 #endif 2412 2394 2413 COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, 2395 COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64, 2414 compat_aio_context_t, ctx_id, 2396 compat_aio_context_t, ctx_id, 2415 compat_long_t, min_nr, 2397 compat_long_t, min_nr, 2416 compat_long_t, nr, 2398 compat_long_t, nr, 2417 struct io_event __user *, eve 2399 struct io_event __user *, events, 2418 struct __kernel_timespec __us 2400 struct __kernel_timespec __user *, timeout, 2419 const struct __compat_aio_sig 2401 const struct __compat_aio_sigset __user *, usig) 2420 { 2402 { 2421 struct __compat_aio_sigset ksig = { 0 2403 struct __compat_aio_sigset ksig = { 0, }; 2422 struct timespec64 t; 2404 struct timespec64 t; 2423 bool interrupted; 2405 bool interrupted; 2424 int ret; 2406 int ret; 2425 2407 2426 if (timeout && get_timespec64(&t, tim 2408 if (timeout && get_timespec64(&t, timeout)) 2427 return -EFAULT; 2409 return -EFAULT; 2428 2410 2429 if (usig && copy_from_user(&ksig, usi 2411 if (usig && copy_from_user(&ksig, usig, sizeof(ksig))) 2430 return -EFAULT; 2412 return -EFAULT; 2431 2413 2432 ret = set_compat_user_sigmask(compat_ 2414 ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize); 2433 if (ret) 2415 if (ret) 2434 return ret; 2416 return ret; 2435 2417 2436 ret = do_io_getevents(ctx_id, min_nr, 2418 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL); 2437 2419 2438 interrupted = signal_pending(current) 2420 interrupted = signal_pending(current); 2439 restore_saved_sigmask_unless(interrup 2421 restore_saved_sigmask_unless(interrupted); 2440 if (interrupted && !ret) 2422 if (interrupted && !ret) 2441 ret = -ERESTARTNOHAND; 2423 ret = -ERESTARTNOHAND; 2442 2424 2443 return ret; 2425 return ret; 2444 } 2426 } 2445 #endif 2427 #endif 2446 2428
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.