~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/aio.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /fs/aio.c (Version linux-6.12-rc7) and /fs/aio.c (Version linux-5.11.22)


  1 /*                                                  1 /*
  2  *      An async IO implementation for Linux        2  *      An async IO implementation for Linux
  3  *      Written by Benjamin LaHaise <bcrl@kvac      3  *      Written by Benjamin LaHaise <bcrl@kvack.org>
  4  *                                                  4  *
  5  *      Implements an efficient asynchronous i      5  *      Implements an efficient asynchronous io interface.
  6  *                                                  6  *
  7  *      Copyright 2000, 2001, 2002 Red Hat, In      7  *      Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
  8  *      Copyright 2018 Christoph Hellwig.           8  *      Copyright 2018 Christoph Hellwig.
  9  *                                                  9  *
 10  *      See ../COPYING for licensing terms.        10  *      See ../COPYING for licensing terms.
 11  */                                                11  */
 12 #define pr_fmt(fmt) "%s: " fmt, __func__           12 #define pr_fmt(fmt) "%s: " fmt, __func__
 13                                                    13 
 14 #include <linux/kernel.h>                          14 #include <linux/kernel.h>
 15 #include <linux/init.h>                            15 #include <linux/init.h>
 16 #include <linux/errno.h>                           16 #include <linux/errno.h>
 17 #include <linux/time.h>                            17 #include <linux/time.h>
 18 #include <linux/aio_abi.h>                         18 #include <linux/aio_abi.h>
 19 #include <linux/export.h>                          19 #include <linux/export.h>
 20 #include <linux/syscalls.h>                        20 #include <linux/syscalls.h>
 21 #include <linux/backing-dev.h>                     21 #include <linux/backing-dev.h>
 22 #include <linux/refcount.h>                        22 #include <linux/refcount.h>
 23 #include <linux/uio.h>                             23 #include <linux/uio.h>
 24                                                    24 
 25 #include <linux/sched/signal.h>                    25 #include <linux/sched/signal.h>
 26 #include <linux/fs.h>                              26 #include <linux/fs.h>
 27 #include <linux/file.h>                            27 #include <linux/file.h>
 28 #include <linux/mm.h>                              28 #include <linux/mm.h>
 29 #include <linux/mman.h>                            29 #include <linux/mman.h>
 30 #include <linux/percpu.h>                          30 #include <linux/percpu.h>
 31 #include <linux/slab.h>                            31 #include <linux/slab.h>
 32 #include <linux/timer.h>                           32 #include <linux/timer.h>
 33 #include <linux/aio.h>                             33 #include <linux/aio.h>
 34 #include <linux/highmem.h>                         34 #include <linux/highmem.h>
 35 #include <linux/workqueue.h>                       35 #include <linux/workqueue.h>
 36 #include <linux/security.h>                        36 #include <linux/security.h>
 37 #include <linux/eventfd.h>                         37 #include <linux/eventfd.h>
 38 #include <linux/blkdev.h>                          38 #include <linux/blkdev.h>
 39 #include <linux/compat.h>                          39 #include <linux/compat.h>
 40 #include <linux/migrate.h>                         40 #include <linux/migrate.h>
 41 #include <linux/ramfs.h>                           41 #include <linux/ramfs.h>
 42 #include <linux/percpu-refcount.h>                 42 #include <linux/percpu-refcount.h>
 43 #include <linux/mount.h>                           43 #include <linux/mount.h>
 44 #include <linux/pseudo_fs.h>                       44 #include <linux/pseudo_fs.h>
 45                                                    45 
 46 #include <linux/uaccess.h>                         46 #include <linux/uaccess.h>
 47 #include <linux/nospec.h>                          47 #include <linux/nospec.h>
 48                                                    48 
 49 #include "internal.h"                              49 #include "internal.h"
 50                                                    50 
 51 #define KIOCB_KEY               0                  51 #define KIOCB_KEY               0
 52                                                    52 
 53 #define AIO_RING_MAGIC                  0xa10a     53 #define AIO_RING_MAGIC                  0xa10a10a1
 54 #define AIO_RING_COMPAT_FEATURES        1          54 #define AIO_RING_COMPAT_FEATURES        1
 55 #define AIO_RING_INCOMPAT_FEATURES      0          55 #define AIO_RING_INCOMPAT_FEATURES      0
 56 struct aio_ring {                                  56 struct aio_ring {
 57         unsigned        id;     /* kernel inte     57         unsigned        id;     /* kernel internal index number */
 58         unsigned        nr;     /* number of i     58         unsigned        nr;     /* number of io_events */
 59         unsigned        head;   /* Written to      59         unsigned        head;   /* Written to by userland or under ring_lock
 60                                  * mutex by ai     60                                  * mutex by aio_read_events_ring(). */
 61         unsigned        tail;                      61         unsigned        tail;
 62                                                    62 
 63         unsigned        magic;                     63         unsigned        magic;
 64         unsigned        compat_features;           64         unsigned        compat_features;
 65         unsigned        incompat_features;         65         unsigned        incompat_features;
 66         unsigned        header_length;  /* siz     66         unsigned        header_length;  /* size of aio_ring */
 67                                                    67 
 68                                                    68 
 69         struct io_event         io_events[];       69         struct io_event         io_events[];
 70 }; /* 128 bytes + ring size */                     70 }; /* 128 bytes + ring size */
 71                                                    71 
 72 /*                                                 72 /*
 73  * Plugging is meant to work with larger batch     73  * Plugging is meant to work with larger batches of IOs. If we don't
 74  * have more than the below, then don't bother     74  * have more than the below, then don't bother setting up a plug.
 75  */                                                75  */
 76 #define AIO_PLUG_THRESHOLD      2                  76 #define AIO_PLUG_THRESHOLD      2
 77                                                    77 
 78 #define AIO_RING_PAGES  8                          78 #define AIO_RING_PAGES  8
 79                                                    79 
 80 struct kioctx_table {                              80 struct kioctx_table {
 81         struct rcu_head         rcu;               81         struct rcu_head         rcu;
 82         unsigned                nr;                82         unsigned                nr;
 83         struct kioctx __rcu     *table[] __cou !!  83         struct kioctx __rcu     *table[];
 84 };                                                 84 };
 85                                                    85 
 86 struct kioctx_cpu {                                86 struct kioctx_cpu {
 87         unsigned                reqs_available     87         unsigned                reqs_available;
 88 };                                                 88 };
 89                                                    89 
 90 struct ctx_rq_wait {                               90 struct ctx_rq_wait {
 91         struct completion comp;                    91         struct completion comp;
 92         atomic_t count;                            92         atomic_t count;
 93 };                                                 93 };
 94                                                    94 
 95 struct kioctx {                                    95 struct kioctx {
 96         struct percpu_ref       users;             96         struct percpu_ref       users;
 97         atomic_t                dead;              97         atomic_t                dead;
 98                                                    98 
 99         struct percpu_ref       reqs;              99         struct percpu_ref       reqs;
100                                                   100 
101         unsigned long           user_id;          101         unsigned long           user_id;
102                                                   102 
103         struct kioctx_cpu __percpu *cpu;       !! 103         struct __percpu kioctx_cpu *cpu;
104                                                   104 
105         /*                                        105         /*
106          * For percpu reqs_available, number o    106          * For percpu reqs_available, number of slots we move to/from global
107          * counter at a time:                     107          * counter at a time:
108          */                                       108          */
109         unsigned                req_batch;        109         unsigned                req_batch;
110         /*                                        110         /*
111          * This is what userspace passed to io    111          * This is what userspace passed to io_setup(), it's not used for
112          * anything but counting against the g    112          * anything but counting against the global max_reqs quota.
113          *                                        113          *
114          * The real limit is nr_events - 1, wh    114          * The real limit is nr_events - 1, which will be larger (see
115          * aio_setup_ring())                      115          * aio_setup_ring())
116          */                                       116          */
117         unsigned                max_reqs;         117         unsigned                max_reqs;
118                                                   118 
119         /* Size of ringbuffer, in units of str    119         /* Size of ringbuffer, in units of struct io_event */
120         unsigned                nr_events;        120         unsigned                nr_events;
121                                                   121 
122         unsigned long           mmap_base;        122         unsigned long           mmap_base;
123         unsigned long           mmap_size;        123         unsigned long           mmap_size;
124                                                   124 
125         struct folio            **ring_folios; !! 125         struct page             **ring_pages;
126         long                    nr_pages;         126         long                    nr_pages;
127                                                   127 
128         struct rcu_work         free_rwork;       128         struct rcu_work         free_rwork;     /* see free_ioctx() */
129                                                   129 
130         /*                                        130         /*
131          * signals when all in-flight requests    131          * signals when all in-flight requests are done
132          */                                       132          */
133         struct ctx_rq_wait      *rq_wait;         133         struct ctx_rq_wait      *rq_wait;
134                                                   134 
135         struct {                                  135         struct {
136                 /*                                136                 /*
137                  * This counts the number of a    137                  * This counts the number of available slots in the ringbuffer,
138                  * so we avoid overflowing it:    138                  * so we avoid overflowing it: it's decremented (if positive)
139                  * when allocating a kiocb and    139                  * when allocating a kiocb and incremented when the resulting
140                  * io_event is pulled off the     140                  * io_event is pulled off the ringbuffer.
141                  *                                141                  *
142                  * We batch accesses to it wit    142                  * We batch accesses to it with a percpu version.
143                  */                               143                  */
144                 atomic_t        reqs_available    144                 atomic_t        reqs_available;
145         } ____cacheline_aligned_in_smp;           145         } ____cacheline_aligned_in_smp;
146                                                   146 
147         struct {                                  147         struct {
148                 spinlock_t      ctx_lock;         148                 spinlock_t      ctx_lock;
149                 struct list_head active_reqs;     149                 struct list_head active_reqs;   /* used for cancellation */
150         } ____cacheline_aligned_in_smp;           150         } ____cacheline_aligned_in_smp;
151                                                   151 
152         struct {                                  152         struct {
153                 struct mutex    ring_lock;        153                 struct mutex    ring_lock;
154                 wait_queue_head_t wait;           154                 wait_queue_head_t wait;
155         } ____cacheline_aligned_in_smp;           155         } ____cacheline_aligned_in_smp;
156                                                   156 
157         struct {                                  157         struct {
158                 unsigned        tail;             158                 unsigned        tail;
159                 unsigned        completed_even    159                 unsigned        completed_events;
160                 spinlock_t      completion_loc    160                 spinlock_t      completion_lock;
161         } ____cacheline_aligned_in_smp;           161         } ____cacheline_aligned_in_smp;
162                                                   162 
163         struct folio            *internal_foli !! 163         struct page             *internal_pages[AIO_RING_PAGES];
164         struct file             *aio_ring_file    164         struct file             *aio_ring_file;
165                                                   165 
166         unsigned                id;               166         unsigned                id;
167 };                                                167 };
168                                                   168 
169 /*                                                169 /*
170  * First field must be the file pointer in all    170  * First field must be the file pointer in all the
171  * iocb unions! See also 'struct kiocb' in <li    171  * iocb unions! See also 'struct kiocb' in <linux/fs.h>
172  */                                               172  */
173 struct fsync_iocb {                               173 struct fsync_iocb {
174         struct file             *file;            174         struct file             *file;
175         struct work_struct      work;             175         struct work_struct      work;
176         bool                    datasync;         176         bool                    datasync;
177         struct cred             *creds;           177         struct cred             *creds;
178 };                                                178 };
179                                                   179 
180 struct poll_iocb {                                180 struct poll_iocb {
181         struct file             *file;            181         struct file             *file;
182         struct wait_queue_head  *head;            182         struct wait_queue_head  *head;
183         __poll_t                events;           183         __poll_t                events;
                                                   >> 184         bool                    done;
184         bool                    cancelled;        185         bool                    cancelled;
185         bool                    work_scheduled << 
186         bool                    work_need_resc << 
187         struct wait_queue_entry wait;             186         struct wait_queue_entry wait;
188         struct work_struct      work;             187         struct work_struct      work;
189 };                                                188 };
190                                                   189 
191 /*                                                190 /*
192  * NOTE! Each of the iocb union members has th    191  * NOTE! Each of the iocb union members has the file pointer
193  * as the first entry in their struct definiti    192  * as the first entry in their struct definition. So you can
194  * access the file pointer through any of the     193  * access the file pointer through any of the sub-structs,
195  * or directly as just 'ki_filp' in this struc    194  * or directly as just 'ki_filp' in this struct.
196  */                                               195  */
197 struct aio_kiocb {                                196 struct aio_kiocb {
198         union {                                   197         union {
199                 struct file             *ki_fi    198                 struct file             *ki_filp;
200                 struct kiocb            rw;       199                 struct kiocb            rw;
201                 struct fsync_iocb       fsync;    200                 struct fsync_iocb       fsync;
202                 struct poll_iocb        poll;     201                 struct poll_iocb        poll;
203         };                                        202         };
204                                                   203 
205         struct kioctx           *ki_ctx;          204         struct kioctx           *ki_ctx;
206         kiocb_cancel_fn         *ki_cancel;       205         kiocb_cancel_fn         *ki_cancel;
207                                                   206 
208         struct io_event         ki_res;           207         struct io_event         ki_res;
209                                                   208 
210         struct list_head        ki_list;          209         struct list_head        ki_list;        /* the aio core uses this
211                                                   210                                                  * for cancellation */
212         refcount_t              ki_refcnt;        211         refcount_t              ki_refcnt;
213                                                   212 
214         /*                                        213         /*
215          * If the aio_resfd field of the users    214          * If the aio_resfd field of the userspace iocb is not zero,
216          * this is the underlying eventfd cont    215          * this is the underlying eventfd context to deliver events to.
217          */                                       216          */
218         struct eventfd_ctx      *ki_eventfd;      217         struct eventfd_ctx      *ki_eventfd;
219 };                                                218 };
220                                                   219 
221 /*------ sysctl variables----*/                   220 /*------ sysctl variables----*/
222 static DEFINE_SPINLOCK(aio_nr_lock);              221 static DEFINE_SPINLOCK(aio_nr_lock);
223 static unsigned long aio_nr;            /* cur !! 222 unsigned long aio_nr;           /* current system wide number of aio requests */
224 static unsigned long aio_max_nr = 0x10000; /*  !! 223 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
225 /*----end sysctl variables---*/                   224 /*----end sysctl variables---*/
226 #ifdef CONFIG_SYSCTL                           << 
227 static struct ctl_table aio_sysctls[] = {      << 
228         {                                      << 
229                 .procname       = "aio-nr",    << 
230                 .data           = &aio_nr,     << 
231                 .maxlen         = sizeof(aio_n << 
232                 .mode           = 0444,        << 
233                 .proc_handler   = proc_doulong << 
234         },                                     << 
235         {                                      << 
236                 .procname       = "aio-max-nr" << 
237                 .data           = &aio_max_nr, << 
238                 .maxlen         = sizeof(aio_m << 
239                 .mode           = 0644,        << 
240                 .proc_handler   = proc_doulong << 
241         },                                     << 
242 };                                             << 
243                                                << 
244 static void __init aio_sysctl_init(void)       << 
245 {                                              << 
246         register_sysctl_init("fs", aio_sysctls << 
247 }                                              << 
248 #else                                          << 
249 #define aio_sysctl_init() do { } while (0)     << 
250 #endif                                         << 
251                                                   225 
252 static struct kmem_cache        *kiocb_cachep;    226 static struct kmem_cache        *kiocb_cachep;
253 static struct kmem_cache        *kioctx_cachep    227 static struct kmem_cache        *kioctx_cachep;
254                                                   228 
255 static struct vfsmount *aio_mnt;                  229 static struct vfsmount *aio_mnt;
256                                                   230 
257 static const struct file_operations aio_ring_f    231 static const struct file_operations aio_ring_fops;
258 static const struct address_space_operations a    232 static const struct address_space_operations aio_ctx_aops;
259                                                   233 
260 static struct file *aio_private_file(struct ki    234 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
261 {                                                 235 {
262         struct file *file;                        236         struct file *file;
263         struct inode *inode = alloc_anon_inode    237         struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
264         if (IS_ERR(inode))                        238         if (IS_ERR(inode))
265                 return ERR_CAST(inode);           239                 return ERR_CAST(inode);
266                                                   240 
267         inode->i_mapping->a_ops = &aio_ctx_aop    241         inode->i_mapping->a_ops = &aio_ctx_aops;
268         inode->i_mapping->i_private_data = ctx !! 242         inode->i_mapping->private_data = ctx;
269         inode->i_size = PAGE_SIZE * nr_pages;     243         inode->i_size = PAGE_SIZE * nr_pages;
270                                                   244 
271         file = alloc_file_pseudo(inode, aio_mn    245         file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
272                                 O_RDWR, &aio_r    246                                 O_RDWR, &aio_ring_fops);
273         if (IS_ERR(file))                         247         if (IS_ERR(file))
274                 iput(inode);                      248                 iput(inode);
275         return file;                              249         return file;
276 }                                                 250 }
277                                                   251 
278 static int aio_init_fs_context(struct fs_conte    252 static int aio_init_fs_context(struct fs_context *fc)
279 {                                                 253 {
280         if (!init_pseudo(fc, AIO_RING_MAGIC))     254         if (!init_pseudo(fc, AIO_RING_MAGIC))
281                 return -ENOMEM;                   255                 return -ENOMEM;
282         fc->s_iflags |= SB_I_NOEXEC;              256         fc->s_iflags |= SB_I_NOEXEC;
283         return 0;                                 257         return 0;
284 }                                                 258 }
285                                                   259 
286 /* aio_setup                                      260 /* aio_setup
287  *      Creates the slab caches used by the ai    261  *      Creates the slab caches used by the aio routines, panic on
288  *      failure as this is done early during t    262  *      failure as this is done early during the boot sequence.
289  */                                               263  */
290 static int __init aio_setup(void)                 264 static int __init aio_setup(void)
291 {                                                 265 {
292         static struct file_system_type aio_fs     266         static struct file_system_type aio_fs = {
293                 .name           = "aio",          267                 .name           = "aio",
294                 .init_fs_context = aio_init_fs    268                 .init_fs_context = aio_init_fs_context,
295                 .kill_sb        = kill_anon_su    269                 .kill_sb        = kill_anon_super,
296         };                                        270         };
297         aio_mnt = kern_mount(&aio_fs);            271         aio_mnt = kern_mount(&aio_fs);
298         if (IS_ERR(aio_mnt))                      272         if (IS_ERR(aio_mnt))
299                 panic("Failed to create aio fs    273                 panic("Failed to create aio fs mount.");
300                                                   274 
301         kiocb_cachep = KMEM_CACHE(aio_kiocb, S    275         kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
302         kioctx_cachep = KMEM_CACHE(kioctx,SLAB    276         kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
303         aio_sysctl_init();                     << 
304         return 0;                                 277         return 0;
305 }                                                 278 }
306 __initcall(aio_setup);                            279 __initcall(aio_setup);
307                                                   280 
308 static void put_aio_ring_file(struct kioctx *c    281 static void put_aio_ring_file(struct kioctx *ctx)
309 {                                                 282 {
310         struct file *aio_ring_file = ctx->aio_    283         struct file *aio_ring_file = ctx->aio_ring_file;
311         struct address_space *i_mapping;          284         struct address_space *i_mapping;
312                                                   285 
313         if (aio_ring_file) {                      286         if (aio_ring_file) {
314                 truncate_setsize(file_inode(ai    287                 truncate_setsize(file_inode(aio_ring_file), 0);
315                                                   288 
316                 /* Prevent further access to t    289                 /* Prevent further access to the kioctx from migratepages */
317                 i_mapping = aio_ring_file->f_m    290                 i_mapping = aio_ring_file->f_mapping;
318                 spin_lock(&i_mapping->i_privat !! 291                 spin_lock(&i_mapping->private_lock);
319                 i_mapping->i_private_data = NU !! 292                 i_mapping->private_data = NULL;
320                 ctx->aio_ring_file = NULL;        293                 ctx->aio_ring_file = NULL;
321                 spin_unlock(&i_mapping->i_priv !! 294                 spin_unlock(&i_mapping->private_lock);
322                                                   295 
323                 fput(aio_ring_file);              296                 fput(aio_ring_file);
324         }                                         297         }
325 }                                                 298 }
326                                                   299 
327 static void aio_free_ring(struct kioctx *ctx)     300 static void aio_free_ring(struct kioctx *ctx)
328 {                                                 301 {
329         int i;                                    302         int i;
330                                                   303 
331         /* Disconnect the kiotx from the ring     304         /* Disconnect the kiotx from the ring file.  This prevents future
332          * accesses to the kioctx from page mi    305          * accesses to the kioctx from page migration.
333          */                                       306          */
334         put_aio_ring_file(ctx);                   307         put_aio_ring_file(ctx);
335                                                   308 
336         for (i = 0; i < ctx->nr_pages; i++) {     309         for (i = 0; i < ctx->nr_pages; i++) {
337                 struct folio *folio = ctx->rin !! 310                 struct page *page;
338                                                !! 311                 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
339                 if (!folio)                    !! 312                                 page_count(ctx->ring_pages[i]));
                                                   >> 313                 page = ctx->ring_pages[i];
                                                   >> 314                 if (!page)
340                         continue;                 315                         continue;
341                                                !! 316                 ctx->ring_pages[i] = NULL;
342                 pr_debug("pid(%d) [%d] folio-> !! 317                 put_page(page);
343                          folio_ref_count(folio << 
344                 ctx->ring_folios[i] = NULL;    << 
345                 folio_put(folio);              << 
346         }                                         318         }
347                                                   319 
348         if (ctx->ring_folios && ctx->ring_foli !! 320         if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
349                 kfree(ctx->ring_folios);       !! 321                 kfree(ctx->ring_pages);
350                 ctx->ring_folios = NULL;       !! 322                 ctx->ring_pages = NULL;
351         }                                         323         }
352 }                                                 324 }
353                                                   325 
354 static int aio_ring_mremap(struct vm_area_stru !! 326 static int aio_ring_mremap(struct vm_area_struct *vma, unsigned long flags)
355 {                                                 327 {
356         struct file *file = vma->vm_file;         328         struct file *file = vma->vm_file;
357         struct mm_struct *mm = vma->vm_mm;        329         struct mm_struct *mm = vma->vm_mm;
358         struct kioctx_table *table;               330         struct kioctx_table *table;
359         int i, res = -EINVAL;                     331         int i, res = -EINVAL;
360                                                   332 
                                                   >> 333         if (flags & MREMAP_DONTUNMAP)
                                                   >> 334                 return -EINVAL;
                                                   >> 335 
361         spin_lock(&mm->ioctx_lock);               336         spin_lock(&mm->ioctx_lock);
362         rcu_read_lock();                          337         rcu_read_lock();
363         table = rcu_dereference(mm->ioctx_tabl    338         table = rcu_dereference(mm->ioctx_table);
364         if (!table)                            << 
365                 goto out_unlock;               << 
366                                                << 
367         for (i = 0; i < table->nr; i++) {         339         for (i = 0; i < table->nr; i++) {
368                 struct kioctx *ctx;               340                 struct kioctx *ctx;
369                                                   341 
370                 ctx = rcu_dereference(table->t    342                 ctx = rcu_dereference(table->table[i]);
371                 if (ctx && ctx->aio_ring_file     343                 if (ctx && ctx->aio_ring_file == file) {
372                         if (!atomic_read(&ctx-    344                         if (!atomic_read(&ctx->dead)) {
373                                 ctx->user_id =    345                                 ctx->user_id = ctx->mmap_base = vma->vm_start;
374                                 res = 0;          346                                 res = 0;
375                         }                         347                         }
376                         break;                    348                         break;
377                 }                                 349                 }
378         }                                         350         }
379                                                   351 
380 out_unlock:                                    << 
381         rcu_read_unlock();                        352         rcu_read_unlock();
382         spin_unlock(&mm->ioctx_lock);             353         spin_unlock(&mm->ioctx_lock);
383         return res;                               354         return res;
384 }                                                 355 }
385                                                   356 
386 static const struct vm_operations_struct aio_r    357 static const struct vm_operations_struct aio_ring_vm_ops = {
387         .mremap         = aio_ring_mremap,        358         .mremap         = aio_ring_mremap,
388 #if IS_ENABLED(CONFIG_MMU)                        359 #if IS_ENABLED(CONFIG_MMU)
389         .fault          = filemap_fault,          360         .fault          = filemap_fault,
390         .map_pages      = filemap_map_pages,      361         .map_pages      = filemap_map_pages,
391         .page_mkwrite   = filemap_page_mkwrite    362         .page_mkwrite   = filemap_page_mkwrite,
392 #endif                                            363 #endif
393 };                                                364 };
394                                                   365 
395 static int aio_ring_mmap(struct file *file, st    366 static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
396 {                                                 367 {
397         vm_flags_set(vma, VM_DONTEXPAND);      !! 368         vma->vm_flags |= VM_DONTEXPAND;
398         vma->vm_ops = &aio_ring_vm_ops;           369         vma->vm_ops = &aio_ring_vm_ops;
399         return 0;                                 370         return 0;
400 }                                                 371 }
401                                                   372 
402 static const struct file_operations aio_ring_f    373 static const struct file_operations aio_ring_fops = {
403         .mmap = aio_ring_mmap,                    374         .mmap = aio_ring_mmap,
404 };                                                375 };
405                                                   376 
406 #if IS_ENABLED(CONFIG_MIGRATION)                  377 #if IS_ENABLED(CONFIG_MIGRATION)
407 static int aio_migrate_folio(struct address_sp !! 378 static int aio_migratepage(struct address_space *mapping, struct page *new,
408                         struct folio *src, enu !! 379                         struct page *old, enum migrate_mode mode)
409 {                                                 380 {
410         struct kioctx *ctx;                       381         struct kioctx *ctx;
411         unsigned long flags;                      382         unsigned long flags;
412         pgoff_t idx;                              383         pgoff_t idx;
413         int rc = 0;                            !! 384         int rc;
414                                                   385 
415         /* mapping->i_private_lock here protec !! 386         /*
416         spin_lock(&mapping->i_private_lock);   !! 387          * We cannot support the _NO_COPY case here, because copy needs to
417         ctx = mapping->i_private_data;         !! 388          * happen under the ctx->completion_lock. That does not work with the
                                                   >> 389          * migration workflow of MIGRATE_SYNC_NO_COPY.
                                                   >> 390          */
                                                   >> 391         if (mode == MIGRATE_SYNC_NO_COPY)
                                                   >> 392                 return -EINVAL;
                                                   >> 393 
                                                   >> 394         rc = 0;
                                                   >> 395 
                                                   >> 396         /* mapping->private_lock here protects against the kioctx teardown.  */
                                                   >> 397         spin_lock(&mapping->private_lock);
                                                   >> 398         ctx = mapping->private_data;
418         if (!ctx) {                               399         if (!ctx) {
419                 rc = -EINVAL;                     400                 rc = -EINVAL;
420                 goto out;                         401                 goto out;
421         }                                         402         }
422                                                   403 
423         /* The ring_lock mutex.  The prevents     404         /* The ring_lock mutex.  The prevents aio_read_events() from writing
424          * to the ring's head, and prevents pa    405          * to the ring's head, and prevents page migration from mucking in
425          * a partially initialized kiotx.         406          * a partially initialized kiotx.
426          */                                       407          */
427         if (!mutex_trylock(&ctx->ring_lock)) {    408         if (!mutex_trylock(&ctx->ring_lock)) {
428                 rc = -EAGAIN;                     409                 rc = -EAGAIN;
429                 goto out;                         410                 goto out;
430         }                                         411         }
431                                                   412 
432         idx = src->index;                      !! 413         idx = old->index;
433         if (idx < (pgoff_t)ctx->nr_pages) {       414         if (idx < (pgoff_t)ctx->nr_pages) {
434                 /* Make sure the old folio has !! 415                 /* Make sure the old page hasn't already been changed */
435                 if (ctx->ring_folios[idx] != s !! 416                 if (ctx->ring_pages[idx] != old)
436                         rc = -EAGAIN;             417                         rc = -EAGAIN;
437         } else                                    418         } else
438                 rc = -EINVAL;                     419                 rc = -EINVAL;
439                                                   420 
440         if (rc != 0)                              421         if (rc != 0)
441                 goto out_unlock;                  422                 goto out_unlock;
442                                                   423 
443         /* Writeback must be complete */          424         /* Writeback must be complete */
444         BUG_ON(folio_test_writeback(src));     !! 425         BUG_ON(PageWriteback(old));
445         folio_get(dst);                        !! 426         get_page(new);
446                                                   427 
447         rc = folio_migrate_mapping(mapping, ds !! 428         rc = migrate_page_move_mapping(mapping, new, old, 1);
448         if (rc != MIGRATEPAGE_SUCCESS) {          429         if (rc != MIGRATEPAGE_SUCCESS) {
449                 folio_put(dst);                !! 430                 put_page(new);
450                 goto out_unlock;                  431                 goto out_unlock;
451         }                                         432         }
452                                                   433 
453         /* Take completion_lock to prevent oth    434         /* Take completion_lock to prevent other writes to the ring buffer
454          * while the old folio is copied to th !! 435          * while the old page is copied to the new.  This prevents new
455          * events from being lost.                436          * events from being lost.
456          */                                       437          */
457         spin_lock_irqsave(&ctx->completion_loc    438         spin_lock_irqsave(&ctx->completion_lock, flags);
458         folio_copy(dst, src);                  !! 439         migrate_page_copy(new, old);
459         folio_migrate_flags(dst, src);         !! 440         BUG_ON(ctx->ring_pages[idx] != old);
460         BUG_ON(ctx->ring_folios[idx] != src);  !! 441         ctx->ring_pages[idx] = new;
461         ctx->ring_folios[idx] = dst;           << 
462         spin_unlock_irqrestore(&ctx->completio    442         spin_unlock_irqrestore(&ctx->completion_lock, flags);
463                                                   443 
464         /* The old folio is no longer accessib !! 444         /* The old page is no longer accessible. */
465         folio_put(src);                        !! 445         put_page(old);
466                                                   446 
467 out_unlock:                                       447 out_unlock:
468         mutex_unlock(&ctx->ring_lock);            448         mutex_unlock(&ctx->ring_lock);
469 out:                                              449 out:
470         spin_unlock(&mapping->i_private_lock); !! 450         spin_unlock(&mapping->private_lock);
471         return rc;                                451         return rc;
472 }                                                 452 }
473 #else                                          << 
474 #define aio_migrate_folio NULL                 << 
475 #endif                                            453 #endif
476                                                   454 
477 static const struct address_space_operations a    455 static const struct address_space_operations aio_ctx_aops = {
478         .dirty_folio    = noop_dirty_folio,    !! 456         .set_page_dirty = __set_page_dirty_no_writeback,
479         .migrate_folio  = aio_migrate_folio,   !! 457 #if IS_ENABLED(CONFIG_MIGRATION)
                                                   >> 458         .migratepage    = aio_migratepage,
                                                   >> 459 #endif
480 };                                                460 };
481                                                   461 
482 static int aio_setup_ring(struct kioctx *ctx,     462 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
483 {                                                 463 {
484         struct aio_ring *ring;                    464         struct aio_ring *ring;
485         struct mm_struct *mm = current->mm;       465         struct mm_struct *mm = current->mm;
486         unsigned long size, unused;               466         unsigned long size, unused;
487         int nr_pages;                             467         int nr_pages;
488         int i;                                    468         int i;
489         struct file *file;                        469         struct file *file;
490                                                   470 
491         /* Compensate for the ring buffer's he    471         /* Compensate for the ring buffer's head/tail overlap entry */
492         nr_events += 2; /* 1 is required, 2 fo    472         nr_events += 2; /* 1 is required, 2 for good luck */
493                                                   473 
494         size = sizeof(struct aio_ring);           474         size = sizeof(struct aio_ring);
495         size += sizeof(struct io_event) * nr_e    475         size += sizeof(struct io_event) * nr_events;
496                                                   476 
497         nr_pages = PFN_UP(size);                  477         nr_pages = PFN_UP(size);
498         if (nr_pages < 0)                         478         if (nr_pages < 0)
499                 return -EINVAL;                   479                 return -EINVAL;
500                                                   480 
501         file = aio_private_file(ctx, nr_pages)    481         file = aio_private_file(ctx, nr_pages);
502         if (IS_ERR(file)) {                       482         if (IS_ERR(file)) {
503                 ctx->aio_ring_file = NULL;        483                 ctx->aio_ring_file = NULL;
504                 return -ENOMEM;                   484                 return -ENOMEM;
505         }                                         485         }
506                                                   486 
507         ctx->aio_ring_file = file;                487         ctx->aio_ring_file = file;
508         nr_events = (PAGE_SIZE * nr_pages - si    488         nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
509                         / sizeof(struct io_eve    489                         / sizeof(struct io_event);
510                                                   490 
511         ctx->ring_folios = ctx->internal_folio !! 491         ctx->ring_pages = ctx->internal_pages;
512         if (nr_pages > AIO_RING_PAGES) {          492         if (nr_pages > AIO_RING_PAGES) {
513                 ctx->ring_folios = kcalloc(nr_ !! 493                 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
514                                            GFP !! 494                                           GFP_KERNEL);
515                 if (!ctx->ring_folios) {       !! 495                 if (!ctx->ring_pages) {
516                         put_aio_ring_file(ctx)    496                         put_aio_ring_file(ctx);
517                         return -ENOMEM;           497                         return -ENOMEM;
518                 }                                 498                 }
519         }                                         499         }
520                                                   500 
521         for (i = 0; i < nr_pages; i++) {          501         for (i = 0; i < nr_pages; i++) {
522                 struct folio *folio;           !! 502                 struct page *page;
523                                                !! 503                 page = find_or_create_page(file->f_mapping,
524                 folio = __filemap_get_folio(fi !! 504                                            i, GFP_HIGHUSER | __GFP_ZERO);
525                                             FG !! 505                 if (!page)
526                                             GF << 
527                 if (IS_ERR(folio))             << 
528                         break;                    506                         break;
                                                   >> 507                 pr_debug("pid(%d) page[%d]->count=%d\n",
                                                   >> 508                          current->pid, i, page_count(page));
                                                   >> 509                 SetPageUptodate(page);
                                                   >> 510                 unlock_page(page);
529                                                   511 
530                 pr_debug("pid(%d) [%d] folio-> !! 512                 ctx->ring_pages[i] = page;
531                          folio_ref_count(folio << 
532                 folio_end_read(folio, true);   << 
533                                                << 
534                 ctx->ring_folios[i] = folio;   << 
535         }                                         513         }
536         ctx->nr_pages = i;                        514         ctx->nr_pages = i;
537                                                   515 
538         if (unlikely(i != nr_pages)) {            516         if (unlikely(i != nr_pages)) {
539                 aio_free_ring(ctx);               517                 aio_free_ring(ctx);
540                 return -ENOMEM;                   518                 return -ENOMEM;
541         }                                         519         }
542                                                   520 
543         ctx->mmap_size = nr_pages * PAGE_SIZE;    521         ctx->mmap_size = nr_pages * PAGE_SIZE;
544         pr_debug("attempting mmap of %lu bytes    522         pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
545                                                   523 
546         if (mmap_write_lock_killable(mm)) {       524         if (mmap_write_lock_killable(mm)) {
547                 ctx->mmap_size = 0;               525                 ctx->mmap_size = 0;
548                 aio_free_ring(ctx);               526                 aio_free_ring(ctx);
549                 return -EINTR;                    527                 return -EINTR;
550         }                                         528         }
551                                                   529 
552         ctx->mmap_base = do_mmap(ctx->aio_ring    530         ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
553                                  PROT_READ | P    531                                  PROT_READ | PROT_WRITE,
554                                  MAP_SHARED, 0 !! 532                                  MAP_SHARED, 0, &unused, NULL);
555         mmap_write_unlock(mm);                    533         mmap_write_unlock(mm);
556         if (IS_ERR((void *)ctx->mmap_base)) {     534         if (IS_ERR((void *)ctx->mmap_base)) {
557                 ctx->mmap_size = 0;               535                 ctx->mmap_size = 0;
558                 aio_free_ring(ctx);               536                 aio_free_ring(ctx);
559                 return -ENOMEM;                   537                 return -ENOMEM;
560         }                                         538         }
561                                                   539 
562         pr_debug("mmap address: 0x%08lx\n", ct    540         pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
563                                                   541 
564         ctx->user_id = ctx->mmap_base;            542         ctx->user_id = ctx->mmap_base;
565         ctx->nr_events = nr_events; /* trusted    543         ctx->nr_events = nr_events; /* trusted copy */
566                                                   544 
567         ring = folio_address(ctx->ring_folios[ !! 545         ring = kmap_atomic(ctx->ring_pages[0]);
568         ring->nr = nr_events;   /* user copy *    546         ring->nr = nr_events;   /* user copy */
569         ring->id = ~0U;                           547         ring->id = ~0U;
570         ring->head = ring->tail = 0;              548         ring->head = ring->tail = 0;
571         ring->magic = AIO_RING_MAGIC;             549         ring->magic = AIO_RING_MAGIC;
572         ring->compat_features = AIO_RING_COMPA    550         ring->compat_features = AIO_RING_COMPAT_FEATURES;
573         ring->incompat_features = AIO_RING_INC    551         ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
574         ring->header_length = sizeof(struct ai    552         ring->header_length = sizeof(struct aio_ring);
575         flush_dcache_folio(ctx->ring_folios[0] !! 553         kunmap_atomic(ring);
                                                   >> 554         flush_dcache_page(ctx->ring_pages[0]);
576                                                   555 
577         return 0;                                 556         return 0;
578 }                                                 557 }
579                                                   558 
580 #define AIO_EVENTS_PER_PAGE     (PAGE_SIZE / s    559 #define AIO_EVENTS_PER_PAGE     (PAGE_SIZE / sizeof(struct io_event))
581 #define AIO_EVENTS_FIRST_PAGE   ((PAGE_SIZE -     560 #define AIO_EVENTS_FIRST_PAGE   ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
582 #define AIO_EVENTS_OFFSET       (AIO_EVENTS_PE    561 #define AIO_EVENTS_OFFSET       (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
583                                                   562 
584 void kiocb_set_cancel_fn(struct kiocb *iocb, k    563 void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
585 {                                                 564 {
586         struct aio_kiocb *req;                 !! 565         struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
587         struct kioctx *ctx;                    !! 566         struct kioctx *ctx = req->ki_ctx;
588         unsigned long flags;                      567         unsigned long flags;
589                                                   568 
590         /*                                     << 
591          * kiocb didn't come from aio or is ne << 
592          * ignore it.                          << 
593          */                                    << 
594         if (!(iocb->ki_flags & IOCB_AIO_RW))   << 
595                 return;                        << 
596                                                << 
597         req = container_of(iocb, struct aio_ki << 
598                                                << 
599         if (WARN_ON_ONCE(!list_empty(&req->ki_    569         if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
600                 return;                           570                 return;
601                                                   571 
602         ctx = req->ki_ctx;                     << 
603                                                << 
604         spin_lock_irqsave(&ctx->ctx_lock, flag    572         spin_lock_irqsave(&ctx->ctx_lock, flags);
605         list_add_tail(&req->ki_list, &ctx->act    573         list_add_tail(&req->ki_list, &ctx->active_reqs);
606         req->ki_cancel = cancel;                  574         req->ki_cancel = cancel;
607         spin_unlock_irqrestore(&ctx->ctx_lock,    575         spin_unlock_irqrestore(&ctx->ctx_lock, flags);
608 }                                                 576 }
609 EXPORT_SYMBOL(kiocb_set_cancel_fn);               577 EXPORT_SYMBOL(kiocb_set_cancel_fn);
610                                                   578 
611 /*                                                579 /*
612  * free_ioctx() should be RCU delayed to synch    580  * free_ioctx() should be RCU delayed to synchronize against the RCU
613  * protected lookup_ioctx() and also needs pro    581  * protected lookup_ioctx() and also needs process context to call
614  * aio_free_ring().  Use rcu_work.                582  * aio_free_ring().  Use rcu_work.
615  */                                               583  */
616 static void free_ioctx(struct work_struct *wor    584 static void free_ioctx(struct work_struct *work)
617 {                                                 585 {
618         struct kioctx *ctx = container_of(to_r    586         struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
619                                           free    587                                           free_rwork);
620         pr_debug("freeing %p\n", ctx);            588         pr_debug("freeing %p\n", ctx);
621                                                   589 
622         aio_free_ring(ctx);                       590         aio_free_ring(ctx);
623         free_percpu(ctx->cpu);                    591         free_percpu(ctx->cpu);
624         percpu_ref_exit(&ctx->reqs);              592         percpu_ref_exit(&ctx->reqs);
625         percpu_ref_exit(&ctx->users);             593         percpu_ref_exit(&ctx->users);
626         kmem_cache_free(kioctx_cachep, ctx);      594         kmem_cache_free(kioctx_cachep, ctx);
627 }                                                 595 }
628                                                   596 
629 static void free_ioctx_reqs(struct percpu_ref     597 static void free_ioctx_reqs(struct percpu_ref *ref)
630 {                                                 598 {
631         struct kioctx *ctx = container_of(ref,    599         struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
632                                                   600 
633         /* At this point we know that there ar    601         /* At this point we know that there are no any in-flight requests */
634         if (ctx->rq_wait && atomic_dec_and_tes    602         if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
635                 complete(&ctx->rq_wait->comp);    603                 complete(&ctx->rq_wait->comp);
636                                                   604 
637         /* Synchronize against RCU protected t    605         /* Synchronize against RCU protected table->table[] dereferences */
638         INIT_RCU_WORK(&ctx->free_rwork, free_i    606         INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
639         queue_rcu_work(system_wq, &ctx->free_r    607         queue_rcu_work(system_wq, &ctx->free_rwork);
640 }                                                 608 }
641                                                   609 
642 /*                                                610 /*
643  * When this function runs, the kioctx has bee    611  * When this function runs, the kioctx has been removed from the "hash table"
644  * and ctx->users has dropped to 0, so we know    612  * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
645  * now it's safe to cancel any that need to be    613  * now it's safe to cancel any that need to be.
646  */                                               614  */
647 static void free_ioctx_users(struct percpu_ref    615 static void free_ioctx_users(struct percpu_ref *ref)
648 {                                                 616 {
649         struct kioctx *ctx = container_of(ref,    617         struct kioctx *ctx = container_of(ref, struct kioctx, users);
650         struct aio_kiocb *req;                    618         struct aio_kiocb *req;
651                                                   619 
652         spin_lock_irq(&ctx->ctx_lock);            620         spin_lock_irq(&ctx->ctx_lock);
653                                                   621 
654         while (!list_empty(&ctx->active_reqs))    622         while (!list_empty(&ctx->active_reqs)) {
655                 req = list_first_entry(&ctx->a    623                 req = list_first_entry(&ctx->active_reqs,
656                                        struct     624                                        struct aio_kiocb, ki_list);
657                 req->ki_cancel(&req->rw);         625                 req->ki_cancel(&req->rw);
658                 list_del_init(&req->ki_list);     626                 list_del_init(&req->ki_list);
659         }                                         627         }
660                                                   628 
661         spin_unlock_irq(&ctx->ctx_lock);          629         spin_unlock_irq(&ctx->ctx_lock);
662                                                   630 
663         percpu_ref_kill(&ctx->reqs);              631         percpu_ref_kill(&ctx->reqs);
664         percpu_ref_put(&ctx->reqs);               632         percpu_ref_put(&ctx->reqs);
665 }                                                 633 }
666                                                   634 
667 static int ioctx_add_table(struct kioctx *ctx,    635 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
668 {                                                 636 {
669         unsigned i, new_nr;                       637         unsigned i, new_nr;
670         struct kioctx_table *table, *old;         638         struct kioctx_table *table, *old;
671         struct aio_ring *ring;                    639         struct aio_ring *ring;
672                                                   640 
673         spin_lock(&mm->ioctx_lock);               641         spin_lock(&mm->ioctx_lock);
674         table = rcu_dereference_raw(mm->ioctx_    642         table = rcu_dereference_raw(mm->ioctx_table);
675                                                   643 
676         while (1) {                               644         while (1) {
677                 if (table)                        645                 if (table)
678                         for (i = 0; i < table-    646                         for (i = 0; i < table->nr; i++)
679                                 if (!rcu_acces    647                                 if (!rcu_access_pointer(table->table[i])) {
680                                         ctx->i    648                                         ctx->id = i;
681                                         rcu_as    649                                         rcu_assign_pointer(table->table[i], ctx);
682                                         spin_u    650                                         spin_unlock(&mm->ioctx_lock);
683                                                   651 
684                                         /* Whi    652                                         /* While kioctx setup is in progress,
685                                          * we     653                                          * we are protected from page migration
686                                          * cha !! 654                                          * changes ring_pages by ->ring_lock.
687                                          */       655                                          */
688                                         ring = !! 656                                         ring = kmap_atomic(ctx->ring_pages[0]);
689                                         ring->    657                                         ring->id = ctx->id;
                                                   >> 658                                         kunmap_atomic(ring);
690                                         return    659                                         return 0;
691                                 }                 660                                 }
692                                                   661 
693                 new_nr = (table ? table->nr :     662                 new_nr = (table ? table->nr : 1) * 4;
694                 spin_unlock(&mm->ioctx_lock);     663                 spin_unlock(&mm->ioctx_lock);
695                                                   664 
696                 table = kzalloc(struct_size(ta !! 665                 table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) *
                                                   >> 666                                 new_nr, GFP_KERNEL);
697                 if (!table)                       667                 if (!table)
698                         return -ENOMEM;           668                         return -ENOMEM;
699                                                   669 
700                 table->nr = new_nr;               670                 table->nr = new_nr;
701                                                   671 
702                 spin_lock(&mm->ioctx_lock);       672                 spin_lock(&mm->ioctx_lock);
703                 old = rcu_dereference_raw(mm->    673                 old = rcu_dereference_raw(mm->ioctx_table);
704                                                   674 
705                 if (!old) {                       675                 if (!old) {
706                         rcu_assign_pointer(mm-    676                         rcu_assign_pointer(mm->ioctx_table, table);
707                 } else if (table->nr > old->nr    677                 } else if (table->nr > old->nr) {
708                         memcpy(table->table, o    678                         memcpy(table->table, old->table,
709                                old->nr * sizeo    679                                old->nr * sizeof(struct kioctx *));
710                                                   680 
711                         rcu_assign_pointer(mm-    681                         rcu_assign_pointer(mm->ioctx_table, table);
712                         kfree_rcu(old, rcu);      682                         kfree_rcu(old, rcu);
713                 } else {                          683                 } else {
714                         kfree(table);             684                         kfree(table);
715                         table = old;              685                         table = old;
716                 }                                 686                 }
717         }                                         687         }
718 }                                                 688 }
719                                                   689 
720 static void aio_nr_sub(unsigned nr)               690 static void aio_nr_sub(unsigned nr)
721 {                                                 691 {
722         spin_lock(&aio_nr_lock);                  692         spin_lock(&aio_nr_lock);
723         if (WARN_ON(aio_nr - nr > aio_nr))        693         if (WARN_ON(aio_nr - nr > aio_nr))
724                 aio_nr = 0;                       694                 aio_nr = 0;
725         else                                      695         else
726                 aio_nr -= nr;                     696                 aio_nr -= nr;
727         spin_unlock(&aio_nr_lock);                697         spin_unlock(&aio_nr_lock);
728 }                                                 698 }
729                                                   699 
730 /* ioctx_alloc                                    700 /* ioctx_alloc
731  *      Allocates and initializes an ioctx.  R    701  *      Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
732  */                                               702  */
733 static struct kioctx *ioctx_alloc(unsigned nr_    703 static struct kioctx *ioctx_alloc(unsigned nr_events)
734 {                                                 704 {
735         struct mm_struct *mm = current->mm;       705         struct mm_struct *mm = current->mm;
736         struct kioctx *ctx;                       706         struct kioctx *ctx;
737         int err = -ENOMEM;                        707         int err = -ENOMEM;
738                                                   708 
739         /*                                        709         /*
740          * Store the original nr_events -- wha    710          * Store the original nr_events -- what userspace passed to io_setup(),
741          * for counting against the global lim    711          * for counting against the global limit -- before it changes.
742          */                                       712          */
743         unsigned int max_reqs = nr_events;        713         unsigned int max_reqs = nr_events;
744                                                   714 
745         /*                                        715         /*
746          * We keep track of the number of avai    716          * We keep track of the number of available ringbuffer slots, to prevent
747          * overflow (reqs_available), and we a    717          * overflow (reqs_available), and we also use percpu counters for this.
748          *                                        718          *
749          * So since up to half the slots might    719          * So since up to half the slots might be on other cpu's percpu counters
750          * and unavailable, double nr_events s    720          * and unavailable, double nr_events so userspace sees what they
751          * expected: additionally, we move req    721          * expected: additionally, we move req_batch slots to/from percpu
752          * counters at a time, so make sure th    722          * counters at a time, so make sure that isn't 0:
753          */                                       723          */
754         nr_events = max(nr_events, num_possibl    724         nr_events = max(nr_events, num_possible_cpus() * 4);
755         nr_events *= 2;                           725         nr_events *= 2;
756                                                   726 
757         /* Prevent overflows */                   727         /* Prevent overflows */
758         if (nr_events > (0x10000000U / sizeof(    728         if (nr_events > (0x10000000U / sizeof(struct io_event))) {
759                 pr_debug("ENOMEM: nr_events to    729                 pr_debug("ENOMEM: nr_events too high\n");
760                 return ERR_PTR(-EINVAL);          730                 return ERR_PTR(-EINVAL);
761         }                                         731         }
762                                                   732 
763         if (!nr_events || (unsigned long)max_r    733         if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
764                 return ERR_PTR(-EAGAIN);          734                 return ERR_PTR(-EAGAIN);
765                                                   735 
766         ctx = kmem_cache_zalloc(kioctx_cachep,    736         ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
767         if (!ctx)                                 737         if (!ctx)
768                 return ERR_PTR(-ENOMEM);          738                 return ERR_PTR(-ENOMEM);
769                                                   739 
770         ctx->max_reqs = max_reqs;                 740         ctx->max_reqs = max_reqs;
771                                                   741 
772         spin_lock_init(&ctx->ctx_lock);           742         spin_lock_init(&ctx->ctx_lock);
773         spin_lock_init(&ctx->completion_lock);    743         spin_lock_init(&ctx->completion_lock);
774         mutex_init(&ctx->ring_lock);              744         mutex_init(&ctx->ring_lock);
775         /* Protect against page migration thro    745         /* Protect against page migration throughout kiotx setup by keeping
776          * the ring_lock mutex held until setu    746          * the ring_lock mutex held until setup is complete. */
777         mutex_lock(&ctx->ring_lock);              747         mutex_lock(&ctx->ring_lock);
778         init_waitqueue_head(&ctx->wait);          748         init_waitqueue_head(&ctx->wait);
779                                                   749 
780         INIT_LIST_HEAD(&ctx->active_reqs);        750         INIT_LIST_HEAD(&ctx->active_reqs);
781                                                   751 
782         if (percpu_ref_init(&ctx->users, free_    752         if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
783                 goto err;                         753                 goto err;
784                                                   754 
785         if (percpu_ref_init(&ctx->reqs, free_i    755         if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
786                 goto err;                         756                 goto err;
787                                                   757 
788         ctx->cpu = alloc_percpu(struct kioctx_    758         ctx->cpu = alloc_percpu(struct kioctx_cpu);
789         if (!ctx->cpu)                            759         if (!ctx->cpu)
790                 goto err;                         760                 goto err;
791                                                   761 
792         err = aio_setup_ring(ctx, nr_events);     762         err = aio_setup_ring(ctx, nr_events);
793         if (err < 0)                              763         if (err < 0)
794                 goto err;                         764                 goto err;
795                                                   765 
796         atomic_set(&ctx->reqs_available, ctx->    766         atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
797         ctx->req_batch = (ctx->nr_events - 1)     767         ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
798         if (ctx->req_batch < 1)                   768         if (ctx->req_batch < 1)
799                 ctx->req_batch = 1;               769                 ctx->req_batch = 1;
800                                                   770 
801         /* limit the number of system wide aio    771         /* limit the number of system wide aios */
802         spin_lock(&aio_nr_lock);                  772         spin_lock(&aio_nr_lock);
803         if (aio_nr + ctx->max_reqs > aio_max_n    773         if (aio_nr + ctx->max_reqs > aio_max_nr ||
804             aio_nr + ctx->max_reqs < aio_nr) {    774             aio_nr + ctx->max_reqs < aio_nr) {
805                 spin_unlock(&aio_nr_lock);        775                 spin_unlock(&aio_nr_lock);
806                 err = -EAGAIN;                    776                 err = -EAGAIN;
807                 goto err_ctx;                     777                 goto err_ctx;
808         }                                         778         }
809         aio_nr += ctx->max_reqs;                  779         aio_nr += ctx->max_reqs;
810         spin_unlock(&aio_nr_lock);                780         spin_unlock(&aio_nr_lock);
811                                                   781 
812         percpu_ref_get(&ctx->users);    /* io_    782         percpu_ref_get(&ctx->users);    /* io_setup() will drop this ref */
813         percpu_ref_get(&ctx->reqs);     /* fre    783         percpu_ref_get(&ctx->reqs);     /* free_ioctx_users() will drop this */
814                                                   784 
815         err = ioctx_add_table(ctx, mm);           785         err = ioctx_add_table(ctx, mm);
816         if (err)                                  786         if (err)
817                 goto err_cleanup;                 787                 goto err_cleanup;
818                                                   788 
819         /* Release the ring_lock mutex now tha    789         /* Release the ring_lock mutex now that all setup is complete. */
820         mutex_unlock(&ctx->ring_lock);            790         mutex_unlock(&ctx->ring_lock);
821                                                   791 
822         pr_debug("allocated ioctx %p[%ld]: mm=    792         pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
823                  ctx, ctx->user_id, mm, ctx->n    793                  ctx, ctx->user_id, mm, ctx->nr_events);
824         return ctx;                               794         return ctx;
825                                                   795 
826 err_cleanup:                                      796 err_cleanup:
827         aio_nr_sub(ctx->max_reqs);                797         aio_nr_sub(ctx->max_reqs);
828 err_ctx:                                          798 err_ctx:
829         atomic_set(&ctx->dead, 1);                799         atomic_set(&ctx->dead, 1);
830         if (ctx->mmap_size)                       800         if (ctx->mmap_size)
831                 vm_munmap(ctx->mmap_base, ctx-    801                 vm_munmap(ctx->mmap_base, ctx->mmap_size);
832         aio_free_ring(ctx);                       802         aio_free_ring(ctx);
833 err:                                              803 err:
834         mutex_unlock(&ctx->ring_lock);            804         mutex_unlock(&ctx->ring_lock);
835         free_percpu(ctx->cpu);                    805         free_percpu(ctx->cpu);
836         percpu_ref_exit(&ctx->reqs);              806         percpu_ref_exit(&ctx->reqs);
837         percpu_ref_exit(&ctx->users);             807         percpu_ref_exit(&ctx->users);
838         kmem_cache_free(kioctx_cachep, ctx);      808         kmem_cache_free(kioctx_cachep, ctx);
839         pr_debug("error allocating ioctx %d\n"    809         pr_debug("error allocating ioctx %d\n", err);
840         return ERR_PTR(err);                      810         return ERR_PTR(err);
841 }                                                 811 }
842                                                   812 
843 /* kill_ioctx                                     813 /* kill_ioctx
844  *      Cancels all outstanding aio requests o    814  *      Cancels all outstanding aio requests on an aio context.  Used
845  *      when the processes owning a context ha    815  *      when the processes owning a context have all exited to encourage
846  *      the rapid destruction of the kioctx.      816  *      the rapid destruction of the kioctx.
847  */                                               817  */
848 static int kill_ioctx(struct mm_struct *mm, st    818 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
849                       struct ctx_rq_wait *wait    819                       struct ctx_rq_wait *wait)
850 {                                                 820 {
851         struct kioctx_table *table;               821         struct kioctx_table *table;
852                                                   822 
853         spin_lock(&mm->ioctx_lock);               823         spin_lock(&mm->ioctx_lock);
854         if (atomic_xchg(&ctx->dead, 1)) {         824         if (atomic_xchg(&ctx->dead, 1)) {
855                 spin_unlock(&mm->ioctx_lock);     825                 spin_unlock(&mm->ioctx_lock);
856                 return -EINVAL;                   826                 return -EINVAL;
857         }                                         827         }
858                                                   828 
859         table = rcu_dereference_raw(mm->ioctx_    829         table = rcu_dereference_raw(mm->ioctx_table);
860         WARN_ON(ctx != rcu_access_pointer(tabl    830         WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
861         RCU_INIT_POINTER(table->table[ctx->id]    831         RCU_INIT_POINTER(table->table[ctx->id], NULL);
862         spin_unlock(&mm->ioctx_lock);             832         spin_unlock(&mm->ioctx_lock);
863                                                   833 
864         /* free_ioctx_reqs() will do the neces    834         /* free_ioctx_reqs() will do the necessary RCU synchronization */
865         wake_up_all(&ctx->wait);                  835         wake_up_all(&ctx->wait);
866                                                   836 
867         /*                                        837         /*
868          * It'd be more correct to do this in     838          * It'd be more correct to do this in free_ioctx(), after all
869          * the outstanding kiocbs have finishe    839          * the outstanding kiocbs have finished - but by then io_destroy
870          * has already returned, so io_setup()    840          * has already returned, so io_setup() could potentially return
871          * -EAGAIN with no ioctxs actually in     841          * -EAGAIN with no ioctxs actually in use (as far as userspace
872          *  could tell).                          842          *  could tell).
873          */                                       843          */
874         aio_nr_sub(ctx->max_reqs);                844         aio_nr_sub(ctx->max_reqs);
875                                                   845 
876         if (ctx->mmap_size)                       846         if (ctx->mmap_size)
877                 vm_munmap(ctx->mmap_base, ctx-    847                 vm_munmap(ctx->mmap_base, ctx->mmap_size);
878                                                   848 
879         ctx->rq_wait = wait;                      849         ctx->rq_wait = wait;
880         percpu_ref_kill(&ctx->users);             850         percpu_ref_kill(&ctx->users);
881         return 0;                                 851         return 0;
882 }                                                 852 }
883                                                   853 
884 /*                                                854 /*
885  * exit_aio: called when the last user of mm g    855  * exit_aio: called when the last user of mm goes away.  At this point, there is
886  * no way for any new requests to be submited     856  * no way for any new requests to be submited or any of the io_* syscalls to be
887  * called on the context.                         857  * called on the context.
888  *                                                858  *
889  * There may be outstanding kiocbs, but free_i    859  * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
890  * them.                                          860  * them.
891  */                                               861  */
892 void exit_aio(struct mm_struct *mm)               862 void exit_aio(struct mm_struct *mm)
893 {                                                 863 {
894         struct kioctx_table *table = rcu_deref    864         struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
895         struct ctx_rq_wait wait;                  865         struct ctx_rq_wait wait;
896         int i, skipped;                           866         int i, skipped;
897                                                   867 
898         if (!table)                               868         if (!table)
899                 return;                           869                 return;
900                                                   870 
901         atomic_set(&wait.count, table->nr);       871         atomic_set(&wait.count, table->nr);
902         init_completion(&wait.comp);              872         init_completion(&wait.comp);
903                                                   873 
904         skipped = 0;                              874         skipped = 0;
905         for (i = 0; i < table->nr; ++i) {         875         for (i = 0; i < table->nr; ++i) {
906                 struct kioctx *ctx =              876                 struct kioctx *ctx =
907                         rcu_dereference_protec    877                         rcu_dereference_protected(table->table[i], true);
908                                                   878 
909                 if (!ctx) {                       879                 if (!ctx) {
910                         skipped++;                880                         skipped++;
911                         continue;                 881                         continue;
912                 }                                 882                 }
913                                                   883 
914                 /*                                884                 /*
915                  * We don't need to bother wit    885                  * We don't need to bother with munmap() here - exit_mmap(mm)
916                  * is coming and it'll unmap e    886                  * is coming and it'll unmap everything. And we simply can't,
917                  * this is not necessarily our    887                  * this is not necessarily our ->mm.
918                  * Since kill_ioctx() uses non    888                  * Since kill_ioctx() uses non-zero ->mmap_size as indicator
919                  * that it needs to unmap the     889                  * that it needs to unmap the area, just set it to 0.
920                  */                               890                  */
921                 ctx->mmap_size = 0;               891                 ctx->mmap_size = 0;
922                 kill_ioctx(mm, ctx, &wait);       892                 kill_ioctx(mm, ctx, &wait);
923         }                                         893         }
924                                                   894 
925         if (!atomic_sub_and_test(skipped, &wai    895         if (!atomic_sub_and_test(skipped, &wait.count)) {
926                 /* Wait until all IO for the c    896                 /* Wait until all IO for the context are done. */
927                 wait_for_completion(&wait.comp    897                 wait_for_completion(&wait.comp);
928         }                                         898         }
929                                                   899 
930         RCU_INIT_POINTER(mm->ioctx_table, NULL    900         RCU_INIT_POINTER(mm->ioctx_table, NULL);
931         kfree(table);                             901         kfree(table);
932 }                                                 902 }
933                                                   903 
934 static void put_reqs_available(struct kioctx *    904 static void put_reqs_available(struct kioctx *ctx, unsigned nr)
935 {                                                 905 {
936         struct kioctx_cpu *kcpu;                  906         struct kioctx_cpu *kcpu;
937         unsigned long flags;                      907         unsigned long flags;
938                                                   908 
939         local_irq_save(flags);                    909         local_irq_save(flags);
940         kcpu = this_cpu_ptr(ctx->cpu);            910         kcpu = this_cpu_ptr(ctx->cpu);
941         kcpu->reqs_available += nr;               911         kcpu->reqs_available += nr;
942                                                   912 
943         while (kcpu->reqs_available >= ctx->re    913         while (kcpu->reqs_available >= ctx->req_batch * 2) {
944                 kcpu->reqs_available -= ctx->r    914                 kcpu->reqs_available -= ctx->req_batch;
945                 atomic_add(ctx->req_batch, &ct    915                 atomic_add(ctx->req_batch, &ctx->reqs_available);
946         }                                         916         }
947                                                   917 
948         local_irq_restore(flags);                 918         local_irq_restore(flags);
949 }                                                 919 }
950                                                   920 
951 static bool __get_reqs_available(struct kioctx    921 static bool __get_reqs_available(struct kioctx *ctx)
952 {                                                 922 {
953         struct kioctx_cpu *kcpu;                  923         struct kioctx_cpu *kcpu;
954         bool ret = false;                         924         bool ret = false;
955         unsigned long flags;                      925         unsigned long flags;
956                                                   926 
957         local_irq_save(flags);                    927         local_irq_save(flags);
958         kcpu = this_cpu_ptr(ctx->cpu);            928         kcpu = this_cpu_ptr(ctx->cpu);
959         if (!kcpu->reqs_available) {              929         if (!kcpu->reqs_available) {
960                 int avail = atomic_read(&ctx-> !! 930                 int old, avail = atomic_read(&ctx->reqs_available);
961                                                   931 
962                 do {                              932                 do {
963                         if (avail < ctx->req_b    933                         if (avail < ctx->req_batch)
964                                 goto out;         934                                 goto out;
965                 } while (!atomic_try_cmpxchg(& !! 935 
966                                              & !! 936                         old = avail;
                                                   >> 937                         avail = atomic_cmpxchg(&ctx->reqs_available,
                                                   >> 938                                                avail, avail - ctx->req_batch);
                                                   >> 939                 } while (avail != old);
967                                                   940 
968                 kcpu->reqs_available += ctx->r    941                 kcpu->reqs_available += ctx->req_batch;
969         }                                         942         }
970                                                   943 
971         ret = true;                               944         ret = true;
972         kcpu->reqs_available--;                   945         kcpu->reqs_available--;
973 out:                                              946 out:
974         local_irq_restore(flags);                 947         local_irq_restore(flags);
975         return ret;                               948         return ret;
976 }                                                 949 }
977                                                   950 
978 /* refill_reqs_available                          951 /* refill_reqs_available
979  *      Updates the reqs_available reference c    952  *      Updates the reqs_available reference counts used for tracking the
980  *      number of free slots in the completion    953  *      number of free slots in the completion ring.  This can be called
981  *      from aio_complete() (to optimistically    954  *      from aio_complete() (to optimistically update reqs_available) or
982  *      from aio_get_req() (the we're out of e    955  *      from aio_get_req() (the we're out of events case).  It must be
983  *      called holding ctx->completion_lock.      956  *      called holding ctx->completion_lock.
984  */                                               957  */
985 static void refill_reqs_available(struct kioct    958 static void refill_reqs_available(struct kioctx *ctx, unsigned head,
986                                   unsigned tai    959                                   unsigned tail)
987 {                                                 960 {
988         unsigned events_in_ring, completed;       961         unsigned events_in_ring, completed;
989                                                   962 
990         /* Clamp head since userland can write    963         /* Clamp head since userland can write to it. */
991         head %= ctx->nr_events;                   964         head %= ctx->nr_events;
992         if (head <= tail)                         965         if (head <= tail)
993                 events_in_ring = tail - head;     966                 events_in_ring = tail - head;
994         else                                      967         else
995                 events_in_ring = ctx->nr_event    968                 events_in_ring = ctx->nr_events - (head - tail);
996                                                   969 
997         completed = ctx->completed_events;        970         completed = ctx->completed_events;
998         if (events_in_ring < completed)           971         if (events_in_ring < completed)
999                 completed -= events_in_ring;      972                 completed -= events_in_ring;
1000         else                                     973         else
1001                 completed = 0;                   974                 completed = 0;
1002                                                  975 
1003         if (!completed)                          976         if (!completed)
1004                 return;                          977                 return;
1005                                                  978 
1006         ctx->completed_events -= completed;      979         ctx->completed_events -= completed;
1007         put_reqs_available(ctx, completed);      980         put_reqs_available(ctx, completed);
1008 }                                                981 }
1009                                                  982 
1010 /* user_refill_reqs_available                    983 /* user_refill_reqs_available
1011  *      Called to refill reqs_available when     984  *      Called to refill reqs_available when aio_get_req() encounters an
1012  *      out of space in the completion ring.     985  *      out of space in the completion ring.
1013  */                                              986  */
1014 static void user_refill_reqs_available(struct    987 static void user_refill_reqs_available(struct kioctx *ctx)
1015 {                                                988 {
1016         spin_lock_irq(&ctx->completion_lock);    989         spin_lock_irq(&ctx->completion_lock);
1017         if (ctx->completed_events) {             990         if (ctx->completed_events) {
1018                 struct aio_ring *ring;           991                 struct aio_ring *ring;
1019                 unsigned head;                   992                 unsigned head;
1020                                                  993 
1021                 /* Access of ring->head may r    994                 /* Access of ring->head may race with aio_read_events_ring()
1022                  * here, but that's okay sinc    995                  * here, but that's okay since whether we read the old version
1023                  * or the new version, and ei    996                  * or the new version, and either will be valid.  The important
1024                  * part is that head cannot p    997                  * part is that head cannot pass tail since we prevent
1025                  * aio_complete() from updati    998                  * aio_complete() from updating tail by holding
1026                  * ctx->completion_lock.  Eve    999                  * ctx->completion_lock.  Even if head is invalid, the check
1027                  * against ctx->completed_eve    1000                  * against ctx->completed_events below will make sure we do the
1028                  * safe/right thing.             1001                  * safe/right thing.
1029                  */                              1002                  */
1030                 ring = folio_address(ctx->rin !! 1003                 ring = kmap_atomic(ctx->ring_pages[0]);
1031                 head = ring->head;               1004                 head = ring->head;
                                                   >> 1005                 kunmap_atomic(ring);
1032                                                  1006 
1033                 refill_reqs_available(ctx, he    1007                 refill_reqs_available(ctx, head, ctx->tail);
1034         }                                        1008         }
1035                                                  1009 
1036         spin_unlock_irq(&ctx->completion_lock    1010         spin_unlock_irq(&ctx->completion_lock);
1037 }                                                1011 }
1038                                                  1012 
1039 static bool get_reqs_available(struct kioctx     1013 static bool get_reqs_available(struct kioctx *ctx)
1040 {                                                1014 {
1041         if (__get_reqs_available(ctx))           1015         if (__get_reqs_available(ctx))
1042                 return true;                     1016                 return true;
1043         user_refill_reqs_available(ctx);         1017         user_refill_reqs_available(ctx);
1044         return __get_reqs_available(ctx);        1018         return __get_reqs_available(ctx);
1045 }                                                1019 }
1046                                                  1020 
1047 /* aio_get_req                                   1021 /* aio_get_req
1048  *      Allocate a slot for an aio request.      1022  *      Allocate a slot for an aio request.
1049  * Returns NULL if no requests are free.         1023  * Returns NULL if no requests are free.
1050  *                                               1024  *
1051  * The refcount is initialized to 2 - one for    1025  * The refcount is initialized to 2 - one for the async op completion,
1052  * one for the synchronous code that does thi    1026  * one for the synchronous code that does this.
1053  */                                              1027  */
1054 static inline struct aio_kiocb *aio_get_req(s    1028 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1055 {                                                1029 {
1056         struct aio_kiocb *req;                   1030         struct aio_kiocb *req;
1057                                                  1031 
1058         req = kmem_cache_alloc(kiocb_cachep,     1032         req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
1059         if (unlikely(!req))                      1033         if (unlikely(!req))
1060                 return NULL;                     1034                 return NULL;
1061                                                  1035 
1062         if (unlikely(!get_reqs_available(ctx)    1036         if (unlikely(!get_reqs_available(ctx))) {
1063                 kmem_cache_free(kiocb_cachep,    1037                 kmem_cache_free(kiocb_cachep, req);
1064                 return NULL;                     1038                 return NULL;
1065         }                                        1039         }
1066                                                  1040 
1067         percpu_ref_get(&ctx->reqs);              1041         percpu_ref_get(&ctx->reqs);
1068         req->ki_ctx = ctx;                       1042         req->ki_ctx = ctx;
1069         INIT_LIST_HEAD(&req->ki_list);           1043         INIT_LIST_HEAD(&req->ki_list);
1070         refcount_set(&req->ki_refcnt, 2);        1044         refcount_set(&req->ki_refcnt, 2);
1071         req->ki_eventfd = NULL;                  1045         req->ki_eventfd = NULL;
1072         return req;                              1046         return req;
1073 }                                                1047 }
1074                                                  1048 
1075 static struct kioctx *lookup_ioctx(unsigned l    1049 static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1076 {                                                1050 {
1077         struct aio_ring __user *ring  = (void    1051         struct aio_ring __user *ring  = (void __user *)ctx_id;
1078         struct mm_struct *mm = current->mm;      1052         struct mm_struct *mm = current->mm;
1079         struct kioctx *ctx, *ret = NULL;         1053         struct kioctx *ctx, *ret = NULL;
1080         struct kioctx_table *table;              1054         struct kioctx_table *table;
1081         unsigned id;                             1055         unsigned id;
1082                                                  1056 
1083         if (get_user(id, &ring->id))             1057         if (get_user(id, &ring->id))
1084                 return NULL;                     1058                 return NULL;
1085                                                  1059 
1086         rcu_read_lock();                         1060         rcu_read_lock();
1087         table = rcu_dereference(mm->ioctx_tab    1061         table = rcu_dereference(mm->ioctx_table);
1088                                                  1062 
1089         if (!table || id >= table->nr)           1063         if (!table || id >= table->nr)
1090                 goto out;                        1064                 goto out;
1091                                                  1065 
1092         id = array_index_nospec(id, table->nr    1066         id = array_index_nospec(id, table->nr);
1093         ctx = rcu_dereference(table->table[id    1067         ctx = rcu_dereference(table->table[id]);
1094         if (ctx && ctx->user_id == ctx_id) {     1068         if (ctx && ctx->user_id == ctx_id) {
1095                 if (percpu_ref_tryget_live(&c    1069                 if (percpu_ref_tryget_live(&ctx->users))
1096                         ret = ctx;               1070                         ret = ctx;
1097         }                                        1071         }
1098 out:                                             1072 out:
1099         rcu_read_unlock();                       1073         rcu_read_unlock();
1100         return ret;                              1074         return ret;
1101 }                                                1075 }
1102                                                  1076 
1103 static inline void iocb_destroy(struct aio_ki    1077 static inline void iocb_destroy(struct aio_kiocb *iocb)
1104 {                                                1078 {
1105         if (iocb->ki_eventfd)                    1079         if (iocb->ki_eventfd)
1106                 eventfd_ctx_put(iocb->ki_even    1080                 eventfd_ctx_put(iocb->ki_eventfd);
1107         if (iocb->ki_filp)                       1081         if (iocb->ki_filp)
1108                 fput(iocb->ki_filp);             1082                 fput(iocb->ki_filp);
1109         percpu_ref_put(&iocb->ki_ctx->reqs);     1083         percpu_ref_put(&iocb->ki_ctx->reqs);
1110         kmem_cache_free(kiocb_cachep, iocb);     1084         kmem_cache_free(kiocb_cachep, iocb);
1111 }                                                1085 }
1112                                                  1086 
1113 struct aio_waiter {                           << 
1114         struct wait_queue_entry w;            << 
1115         size_t                  min_nr;       << 
1116 };                                            << 
1117                                               << 
1118 /* aio_complete                                  1087 /* aio_complete
1119  *      Called when the io request on the giv    1088  *      Called when the io request on the given iocb is complete.
1120  */                                              1089  */
1121 static void aio_complete(struct aio_kiocb *io    1090 static void aio_complete(struct aio_kiocb *iocb)
1122 {                                                1091 {
1123         struct kioctx   *ctx = iocb->ki_ctx;     1092         struct kioctx   *ctx = iocb->ki_ctx;
1124         struct aio_ring *ring;                   1093         struct aio_ring *ring;
1125         struct io_event *ev_page, *event;        1094         struct io_event *ev_page, *event;
1126         unsigned tail, pos, head, avail;      !! 1095         unsigned tail, pos, head;
1127         unsigned long   flags;                   1096         unsigned long   flags;
1128                                                  1097 
1129         /*                                       1098         /*
1130          * Add a completion event to the ring    1099          * Add a completion event to the ring buffer. Must be done holding
1131          * ctx->completion_lock to prevent ot    1100          * ctx->completion_lock to prevent other code from messing with the tail
1132          * pointer since we might be called f    1101          * pointer since we might be called from irq context.
1133          */                                      1102          */
1134         spin_lock_irqsave(&ctx->completion_lo    1103         spin_lock_irqsave(&ctx->completion_lock, flags);
1135                                                  1104 
1136         tail = ctx->tail;                        1105         tail = ctx->tail;
1137         pos = tail + AIO_EVENTS_OFFSET;          1106         pos = tail + AIO_EVENTS_OFFSET;
1138                                                  1107 
1139         if (++tail >= ctx->nr_events)            1108         if (++tail >= ctx->nr_events)
1140                 tail = 0;                        1109                 tail = 0;
1141                                                  1110 
1142         ev_page = folio_address(ctx->ring_fol !! 1111         ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1143         event = ev_page + pos % AIO_EVENTS_PE    1112         event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1144                                                  1113 
1145         *event = iocb->ki_res;                   1114         *event = iocb->ki_res;
1146                                                  1115 
1147         flush_dcache_folio(ctx->ring_folios[p !! 1116         kunmap_atomic(ev_page);
                                                   >> 1117         flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1148                                                  1118 
1149         pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\    1119         pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1150                  (void __user *)(unsigned lon    1120                  (void __user *)(unsigned long)iocb->ki_res.obj,
1151                  iocb->ki_res.data, iocb->ki_    1121                  iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1152                                                  1122 
1153         /* after flagging the request as done    1123         /* after flagging the request as done, we
1154          * must never even look at it again      1124          * must never even look at it again
1155          */                                      1125          */
1156         smp_wmb();      /* make event visible    1126         smp_wmb();      /* make event visible before updating tail */
1157                                                  1127 
1158         ctx->tail = tail;                        1128         ctx->tail = tail;
1159                                                  1129 
1160         ring = folio_address(ctx->ring_folios !! 1130         ring = kmap_atomic(ctx->ring_pages[0]);
1161         head = ring->head;                       1131         head = ring->head;
1162         ring->tail = tail;                       1132         ring->tail = tail;
1163         flush_dcache_folio(ctx->ring_folios[0 !! 1133         kunmap_atomic(ring);
                                                   >> 1134         flush_dcache_page(ctx->ring_pages[0]);
1164                                                  1135 
1165         ctx->completed_events++;                 1136         ctx->completed_events++;
1166         if (ctx->completed_events > 1)           1137         if (ctx->completed_events > 1)
1167                 refill_reqs_available(ctx, he    1138                 refill_reqs_available(ctx, head, tail);
1168                                               << 
1169         avail = tail > head                   << 
1170                 ? tail - head                 << 
1171                 : tail + ctx->nr_events - hea << 
1172         spin_unlock_irqrestore(&ctx->completi    1139         spin_unlock_irqrestore(&ctx->completion_lock, flags);
1173                                                  1140 
1174         pr_debug("added to ring %p at [%u]\n"    1141         pr_debug("added to ring %p at [%u]\n", iocb, tail);
1175                                                  1142 
1176         /*                                       1143         /*
1177          * Check if the user asked us to deli    1144          * Check if the user asked us to deliver the result through an
1178          * eventfd. The eventfd_signal() func    1145          * eventfd. The eventfd_signal() function is safe to be called
1179          * from IRQ context.                     1146          * from IRQ context.
1180          */                                      1147          */
1181         if (iocb->ki_eventfd)                    1148         if (iocb->ki_eventfd)
1182                 eventfd_signal(iocb->ki_event !! 1149                 eventfd_signal(iocb->ki_eventfd, 1);
1183                                                  1150 
1184         /*                                       1151         /*
1185          * We have to order our ring_info tai    1152          * We have to order our ring_info tail store above and test
1186          * of the wait list below outside the    1153          * of the wait list below outside the wait lock.  This is
1187          * like in wake_up_bit() where cleari    1154          * like in wake_up_bit() where clearing a bit has to be
1188          * ordered with the unlocked test.       1155          * ordered with the unlocked test.
1189          */                                      1156          */
1190         smp_mb();                                1157         smp_mb();
1191                                                  1158 
1192         if (waitqueue_active(&ctx->wait)) {   !! 1159         if (waitqueue_active(&ctx->wait))
1193                 struct aio_waiter *curr, *nex !! 1160                 wake_up(&ctx->wait);
1194                 unsigned long flags;          << 
1195                                               << 
1196                 spin_lock_irqsave(&ctx->wait. << 
1197                 list_for_each_entry_safe(curr << 
1198                         if (avail >= curr->mi << 
1199                                 wake_up_proce << 
1200                                 list_del_init << 
1201                         }                     << 
1202                 spin_unlock_irqrestore(&ctx-> << 
1203         }                                     << 
1204 }                                                1161 }
1205                                                  1162 
1206 static inline void iocb_put(struct aio_kiocb     1163 static inline void iocb_put(struct aio_kiocb *iocb)
1207 {                                                1164 {
1208         if (refcount_dec_and_test(&iocb->ki_r    1165         if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1209                 aio_complete(iocb);              1166                 aio_complete(iocb);
1210                 iocb_destroy(iocb);              1167                 iocb_destroy(iocb);
1211         }                                        1168         }
1212 }                                                1169 }
1213                                                  1170 
1214 /* aio_read_events_ring                          1171 /* aio_read_events_ring
1215  *      Pull an event off of the ioctx's even    1172  *      Pull an event off of the ioctx's event ring.  Returns the number of
1216  *      events fetched                           1173  *      events fetched
1217  */                                              1174  */
1218 static long aio_read_events_ring(struct kioct    1175 static long aio_read_events_ring(struct kioctx *ctx,
1219                                  struct io_ev    1176                                  struct io_event __user *event, long nr)
1220 {                                                1177 {
1221         struct aio_ring *ring;                   1178         struct aio_ring *ring;
1222         unsigned head, tail, pos;                1179         unsigned head, tail, pos;
1223         long ret = 0;                            1180         long ret = 0;
1224         int copy_ret;                            1181         int copy_ret;
1225                                                  1182 
1226         /*                                       1183         /*
1227          * The mutex can block and wake us up    1184          * The mutex can block and wake us up and that will cause
1228          * wait_event_interruptible_hrtimeout    1185          * wait_event_interruptible_hrtimeout() to schedule without sleeping
1229          * and repeat. This should be rare en    1186          * and repeat. This should be rare enough that it doesn't cause
1230          * peformance issues. See the comment    1187          * peformance issues. See the comment in read_events() for more detail.
1231          */                                      1188          */
1232         sched_annotate_sleep();                  1189         sched_annotate_sleep();
1233         mutex_lock(&ctx->ring_lock);             1190         mutex_lock(&ctx->ring_lock);
1234                                                  1191 
1235         /* Access to ->ring_folios here is pr !! 1192         /* Access to ->ring_pages here is protected by ctx->ring_lock. */
1236         ring = folio_address(ctx->ring_folios !! 1193         ring = kmap_atomic(ctx->ring_pages[0]);
1237         head = ring->head;                       1194         head = ring->head;
1238         tail = ring->tail;                       1195         tail = ring->tail;
                                                   >> 1196         kunmap_atomic(ring);
1239                                                  1197 
1240         /*                                       1198         /*
1241          * Ensure that once we've read the cu    1199          * Ensure that once we've read the current tail pointer, that
1242          * we also see the events that were s    1200          * we also see the events that were stored up to the tail.
1243          */                                      1201          */
1244         smp_rmb();                               1202         smp_rmb();
1245                                                  1203 
1246         pr_debug("h%u t%u m%u\n", head, tail,    1204         pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
1247                                                  1205 
1248         if (head == tail)                        1206         if (head == tail)
1249                 goto out;                        1207                 goto out;
1250                                                  1208 
1251         head %= ctx->nr_events;                  1209         head %= ctx->nr_events;
1252         tail %= ctx->nr_events;                  1210         tail %= ctx->nr_events;
1253                                                  1211 
1254         while (ret < nr) {                       1212         while (ret < nr) {
1255                 long avail;                      1213                 long avail;
1256                 struct io_event *ev;             1214                 struct io_event *ev;
1257                 struct folio *folio;          !! 1215                 struct page *page;
1258                                                  1216 
1259                 avail = (head <= tail ?  tail    1217                 avail = (head <= tail ?  tail : ctx->nr_events) - head;
1260                 if (head == tail)                1218                 if (head == tail)
1261                         break;                   1219                         break;
1262                                                  1220 
1263                 pos = head + AIO_EVENTS_OFFSE    1221                 pos = head + AIO_EVENTS_OFFSET;
1264                 folio = ctx->ring_folios[pos  !! 1222                 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
1265                 pos %= AIO_EVENTS_PER_PAGE;      1223                 pos %= AIO_EVENTS_PER_PAGE;
1266                                                  1224 
1267                 avail = min(avail, nr - ret);    1225                 avail = min(avail, nr - ret);
1268                 avail = min_t(long, avail, AI    1226                 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
1269                                                  1227 
1270                 ev = folio_address(folio);    !! 1228                 ev = kmap(page);
1271                 copy_ret = copy_to_user(event    1229                 copy_ret = copy_to_user(event + ret, ev + pos,
1272                                         sizeo    1230                                         sizeof(*ev) * avail);
                                                   >> 1231                 kunmap(page);
1273                                                  1232 
1274                 if (unlikely(copy_ret)) {        1233                 if (unlikely(copy_ret)) {
1275                         ret = -EFAULT;           1234                         ret = -EFAULT;
1276                         goto out;                1235                         goto out;
1277                 }                                1236                 }
1278                                                  1237 
1279                 ret += avail;                    1238                 ret += avail;
1280                 head += avail;                   1239                 head += avail;
1281                 head %= ctx->nr_events;          1240                 head %= ctx->nr_events;
1282         }                                        1241         }
1283                                                  1242 
1284         ring = folio_address(ctx->ring_folios !! 1243         ring = kmap_atomic(ctx->ring_pages[0]);
1285         ring->head = head;                       1244         ring->head = head;
1286         flush_dcache_folio(ctx->ring_folios[0 !! 1245         kunmap_atomic(ring);
                                                   >> 1246         flush_dcache_page(ctx->ring_pages[0]);
1287                                                  1247 
1288         pr_debug("%li  h%u t%u\n", ret, head,    1248         pr_debug("%li  h%u t%u\n", ret, head, tail);
1289 out:                                             1249 out:
1290         mutex_unlock(&ctx->ring_lock);           1250         mutex_unlock(&ctx->ring_lock);
1291                                                  1251 
1292         return ret;                              1252         return ret;
1293 }                                                1253 }
1294                                                  1254 
1295 static bool aio_read_events(struct kioctx *ct    1255 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1296                             struct io_event _    1256                             struct io_event __user *event, long *i)
1297 {                                                1257 {
1298         long ret = aio_read_events_ring(ctx,     1258         long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
1299                                                  1259 
1300         if (ret > 0)                             1260         if (ret > 0)
1301                 *i += ret;                       1261                 *i += ret;
1302                                                  1262 
1303         if (unlikely(atomic_read(&ctx->dead))    1263         if (unlikely(atomic_read(&ctx->dead)))
1304                 ret = -EINVAL;                   1264                 ret = -EINVAL;
1305                                                  1265 
1306         if (!*i)                                 1266         if (!*i)
1307                 *i = ret;                        1267                 *i = ret;
1308                                                  1268 
1309         return ret < 0 || *i >= min_nr;          1269         return ret < 0 || *i >= min_nr;
1310 }                                                1270 }
1311                                                  1271 
1312 static long read_events(struct kioctx *ctx, l    1272 static long read_events(struct kioctx *ctx, long min_nr, long nr,
1313                         struct io_event __use    1273                         struct io_event __user *event,
1314                         ktime_t until)           1274                         ktime_t until)
1315 {                                                1275 {
1316         struct hrtimer_sleeper  t;            !! 1276         long ret = 0;
1317         struct aio_waiter       w;            << 
1318         long ret = 0, ret2 = 0;               << 
1319                                                  1277 
1320         /*                                       1278         /*
1321          * Note that aio_read_events() is bei    1279          * Note that aio_read_events() is being called as the conditional - i.e.
1322          * we're calling it after prepare_to_    1280          * we're calling it after prepare_to_wait() has set task state to
1323          * TASK_INTERRUPTIBLE.                   1281          * TASK_INTERRUPTIBLE.
1324          *                                       1282          *
1325          * But aio_read_events() can block, a    1283          * But aio_read_events() can block, and if it blocks it's going to flip
1326          * the task state back to TASK_RUNNIN    1284          * the task state back to TASK_RUNNING.
1327          *                                       1285          *
1328          * This should be ok, provided it doe    1286          * This should be ok, provided it doesn't flip the state back to
1329          * TASK_RUNNING and return 0 too much    1287          * TASK_RUNNING and return 0 too much - that causes us to spin. That
1330          * will only happen if the mutex_lock    1288          * will only happen if the mutex_lock() call blocks, and we then find
1331          * the ringbuffer empty. So in practi    1289          * the ringbuffer empty. So in practice we should be ok, but it's
1332          * something to be aware of when touc    1290          * something to be aware of when touching this code.
1333          */                                      1291          */
1334         aio_read_events(ctx, min_nr, nr, even !! 1292         if (until == 0)
1335         if (until == 0 || ret < 0 || ret >= m !! 1293                 aio_read_events(ctx, min_nr, nr, event, &ret);
1336                 return ret;                   !! 1294         else
1337                                               !! 1295                 wait_event_interruptible_hrtimeout(ctx->wait,
1338         hrtimer_init_sleeper_on_stack(&t, CLO !! 1296                                 aio_read_events(ctx, min_nr, nr, event, &ret),
1339         if (until != KTIME_MAX) {             !! 1297                                 until);
1340                 hrtimer_set_expires_range_ns( << 
1341                 hrtimer_sleeper_start_expires << 
1342         }                                     << 
1343                                               << 
1344         init_wait(&w.w);                      << 
1345                                               << 
1346         while (1) {                           << 
1347                 unsigned long nr_got = ret;   << 
1348                                               << 
1349                 w.min_nr = min_nr - ret;      << 
1350                                               << 
1351                 ret2 = prepare_to_wait_event( << 
1352                 if (!ret2 && !t.task)         << 
1353                         ret2 = -ETIME;        << 
1354                                               << 
1355                 if (aio_read_events(ctx, min_ << 
1356                         break;                << 
1357                                               << 
1358                 if (nr_got == ret)            << 
1359                         schedule();           << 
1360         }                                     << 
1361                                               << 
1362         finish_wait(&ctx->wait, &w.w);        << 
1363         hrtimer_cancel(&t.timer);             << 
1364         destroy_hrtimer_on_stack(&t.timer);   << 
1365                                               << 
1366         return ret;                              1298         return ret;
1367 }                                                1299 }
1368                                                  1300 
1369 /* sys_io_setup:                                 1301 /* sys_io_setup:
1370  *      Create an aio_context capable of rece    1302  *      Create an aio_context capable of receiving at least nr_events.
1371  *      ctxp must not point to an aio_context    1303  *      ctxp must not point to an aio_context that already exists, and
1372  *      must be initialized to 0 prior to the    1304  *      must be initialized to 0 prior to the call.  On successful
1373  *      creation of the aio_context, *ctxp is    1305  *      creation of the aio_context, *ctxp is filled in with the resulting 
1374  *      handle.  May fail with -EINVAL if *ct    1306  *      handle.  May fail with -EINVAL if *ctxp is not initialized,
1375  *      if the specified nr_events exceeds in    1307  *      if the specified nr_events exceeds internal limits.  May fail 
1376  *      with -EAGAIN if the specified nr_even    1308  *      with -EAGAIN if the specified nr_events exceeds the user's limit 
1377  *      of available events.  May fail with -    1309  *      of available events.  May fail with -ENOMEM if insufficient kernel
1378  *      resources are available.  May fail wi    1310  *      resources are available.  May fail with -EFAULT if an invalid
1379  *      pointer is passed for ctxp.  Will fai    1311  *      pointer is passed for ctxp.  Will fail with -ENOSYS if not
1380  *      implemented.                             1312  *      implemented.
1381  */                                              1313  */
1382 SYSCALL_DEFINE2(io_setup, unsigned, nr_events    1314 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1383 {                                                1315 {
1384         struct kioctx *ioctx = NULL;             1316         struct kioctx *ioctx = NULL;
1385         unsigned long ctx;                       1317         unsigned long ctx;
1386         long ret;                                1318         long ret;
1387                                                  1319 
1388         ret = get_user(ctx, ctxp);               1320         ret = get_user(ctx, ctxp);
1389         if (unlikely(ret))                       1321         if (unlikely(ret))
1390                 goto out;                        1322                 goto out;
1391                                                  1323 
1392         ret = -EINVAL;                           1324         ret = -EINVAL;
1393         if (unlikely(ctx || nr_events == 0))     1325         if (unlikely(ctx || nr_events == 0)) {
1394                 pr_debug("EINVAL: ctx %lu nr_    1326                 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1395                          ctx, nr_events);        1327                          ctx, nr_events);
1396                 goto out;                        1328                 goto out;
1397         }                                        1329         }
1398                                                  1330 
1399         ioctx = ioctx_alloc(nr_events);          1331         ioctx = ioctx_alloc(nr_events);
1400         ret = PTR_ERR(ioctx);                    1332         ret = PTR_ERR(ioctx);
1401         if (!IS_ERR(ioctx)) {                    1333         if (!IS_ERR(ioctx)) {
1402                 ret = put_user(ioctx->user_id    1334                 ret = put_user(ioctx->user_id, ctxp);
1403                 if (ret)                         1335                 if (ret)
1404                         kill_ioctx(current->m    1336                         kill_ioctx(current->mm, ioctx, NULL);
1405                 percpu_ref_put(&ioctx->users)    1337                 percpu_ref_put(&ioctx->users);
1406         }                                        1338         }
1407                                                  1339 
1408 out:                                             1340 out:
1409         return ret;                              1341         return ret;
1410 }                                                1342 }
1411                                                  1343 
1412 #ifdef CONFIG_COMPAT                             1344 #ifdef CONFIG_COMPAT
1413 COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr    1345 COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
1414 {                                                1346 {
1415         struct kioctx *ioctx = NULL;             1347         struct kioctx *ioctx = NULL;
1416         unsigned long ctx;                       1348         unsigned long ctx;
1417         long ret;                                1349         long ret;
1418                                                  1350 
1419         ret = get_user(ctx, ctx32p);             1351         ret = get_user(ctx, ctx32p);
1420         if (unlikely(ret))                       1352         if (unlikely(ret))
1421                 goto out;                        1353                 goto out;
1422                                                  1354 
1423         ret = -EINVAL;                           1355         ret = -EINVAL;
1424         if (unlikely(ctx || nr_events == 0))     1356         if (unlikely(ctx || nr_events == 0)) {
1425                 pr_debug("EINVAL: ctx %lu nr_    1357                 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1426                          ctx, nr_events);        1358                          ctx, nr_events);
1427                 goto out;                        1359                 goto out;
1428         }                                        1360         }
1429                                                  1361 
1430         ioctx = ioctx_alloc(nr_events);          1362         ioctx = ioctx_alloc(nr_events);
1431         ret = PTR_ERR(ioctx);                    1363         ret = PTR_ERR(ioctx);
1432         if (!IS_ERR(ioctx)) {                    1364         if (!IS_ERR(ioctx)) {
1433                 /* truncating is ok because i    1365                 /* truncating is ok because it's a user address */
1434                 ret = put_user((u32)ioctx->us    1366                 ret = put_user((u32)ioctx->user_id, ctx32p);
1435                 if (ret)                         1367                 if (ret)
1436                         kill_ioctx(current->m    1368                         kill_ioctx(current->mm, ioctx, NULL);
1437                 percpu_ref_put(&ioctx->users)    1369                 percpu_ref_put(&ioctx->users);
1438         }                                        1370         }
1439                                                  1371 
1440 out:                                             1372 out:
1441         return ret;                              1373         return ret;
1442 }                                                1374 }
1443 #endif                                           1375 #endif
1444                                                  1376 
1445 /* sys_io_destroy:                               1377 /* sys_io_destroy:
1446  *      Destroy the aio_context specified.  M    1378  *      Destroy the aio_context specified.  May cancel any outstanding 
1447  *      AIOs and block on completion.  Will f    1379  *      AIOs and block on completion.  Will fail with -ENOSYS if not
1448  *      implemented.  May fail with -EINVAL i    1380  *      implemented.  May fail with -EINVAL if the context pointed to
1449  *      is invalid.                              1381  *      is invalid.
1450  */                                              1382  */
1451 SYSCALL_DEFINE1(io_destroy, aio_context_t, ct    1383 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1452 {                                                1384 {
1453         struct kioctx *ioctx = lookup_ioctx(c    1385         struct kioctx *ioctx = lookup_ioctx(ctx);
1454         if (likely(NULL != ioctx)) {             1386         if (likely(NULL != ioctx)) {
1455                 struct ctx_rq_wait wait;         1387                 struct ctx_rq_wait wait;
1456                 int ret;                         1388                 int ret;
1457                                                  1389 
1458                 init_completion(&wait.comp);     1390                 init_completion(&wait.comp);
1459                 atomic_set(&wait.count, 1);      1391                 atomic_set(&wait.count, 1);
1460                                                  1392 
1461                 /* Pass requests_done to kill    1393                 /* Pass requests_done to kill_ioctx() where it can be set
1462                  * in a thread-safe way. If w    1394                  * in a thread-safe way. If we try to set it here then we have
1463                  * a race condition if two io    1395                  * a race condition if two io_destroy() called simultaneously.
1464                  */                              1396                  */
1465                 ret = kill_ioctx(current->mm,    1397                 ret = kill_ioctx(current->mm, ioctx, &wait);
1466                 percpu_ref_put(&ioctx->users)    1398                 percpu_ref_put(&ioctx->users);
1467                                                  1399 
1468                 /* Wait until all IO for the     1400                 /* Wait until all IO for the context are done. Otherwise kernel
1469                  * keep using user-space buff    1401                  * keep using user-space buffers even if user thinks the context
1470                  * is destroyed.                 1402                  * is destroyed.
1471                  */                              1403                  */
1472                 if (!ret)                        1404                 if (!ret)
1473                         wait_for_completion(&    1405                         wait_for_completion(&wait.comp);
1474                                                  1406 
1475                 return ret;                      1407                 return ret;
1476         }                                        1408         }
1477         pr_debug("EINVAL: invalid context id\    1409         pr_debug("EINVAL: invalid context id\n");
1478         return -EINVAL;                          1410         return -EINVAL;
1479 }                                                1411 }
1480                                                  1412 
1481 static void aio_remove_iocb(struct aio_kiocb     1413 static void aio_remove_iocb(struct aio_kiocb *iocb)
1482 {                                                1414 {
1483         struct kioctx *ctx = iocb->ki_ctx;       1415         struct kioctx *ctx = iocb->ki_ctx;
1484         unsigned long flags;                     1416         unsigned long flags;
1485                                                  1417 
1486         spin_lock_irqsave(&ctx->ctx_lock, fla    1418         spin_lock_irqsave(&ctx->ctx_lock, flags);
1487         list_del(&iocb->ki_list);                1419         list_del(&iocb->ki_list);
1488         spin_unlock_irqrestore(&ctx->ctx_lock    1420         spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1489 }                                                1421 }
1490                                                  1422 
1491 static void aio_complete_rw(struct kiocb *kio !! 1423 static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
1492 {                                                1424 {
1493         struct aio_kiocb *iocb = container_of    1425         struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
1494                                                  1426 
1495         if (!list_empty_careful(&iocb->ki_lis    1427         if (!list_empty_careful(&iocb->ki_list))
1496                 aio_remove_iocb(iocb);           1428                 aio_remove_iocb(iocb);
1497                                                  1429 
1498         if (kiocb->ki_flags & IOCB_WRITE) {      1430         if (kiocb->ki_flags & IOCB_WRITE) {
1499                 struct inode *inode = file_in    1431                 struct inode *inode = file_inode(kiocb->ki_filp);
1500                                                  1432 
                                                   >> 1433                 /*
                                                   >> 1434                  * Tell lockdep we inherited freeze protection from submission
                                                   >> 1435                  * thread.
                                                   >> 1436                  */
1501                 if (S_ISREG(inode->i_mode))      1437                 if (S_ISREG(inode->i_mode))
1502                         kiocb_end_write(kiocb !! 1438                         __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
                                                   >> 1439                 file_end_write(kiocb->ki_filp);
1503         }                                        1440         }
1504                                                  1441 
1505         iocb->ki_res.res = res;                  1442         iocb->ki_res.res = res;
1506         iocb->ki_res.res2 = 0;                !! 1443         iocb->ki_res.res2 = res2;
1507         iocb_put(iocb);                          1444         iocb_put(iocb);
1508 }                                                1445 }
1509                                                  1446 
1510 static int aio_prep_rw(struct kiocb *req, con !! 1447 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
1511 {                                                1448 {
1512         int ret;                                 1449         int ret;
1513                                                  1450 
1514         req->ki_complete = aio_complete_rw;      1451         req->ki_complete = aio_complete_rw;
1515         req->private = NULL;                     1452         req->private = NULL;
1516         req->ki_pos = iocb->aio_offset;          1453         req->ki_pos = iocb->aio_offset;
1517         req->ki_flags = req->ki_filp->f_iocb_ !! 1454         req->ki_flags = iocb_flags(req->ki_filp);
1518         if (iocb->aio_flags & IOCB_FLAG_RESFD    1455         if (iocb->aio_flags & IOCB_FLAG_RESFD)
1519                 req->ki_flags |= IOCB_EVENTFD    1456                 req->ki_flags |= IOCB_EVENTFD;
                                                   >> 1457         req->ki_hint = ki_hint_validate(file_write_hint(req->ki_filp));
1520         if (iocb->aio_flags & IOCB_FLAG_IOPRI    1458         if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
1521                 /*                               1459                 /*
1522                  * If the IOCB_FLAG_IOPRIO fl    1460                  * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
1523                  * aio_reqprio is interpreted    1461                  * aio_reqprio is interpreted as an I/O scheduling
1524                  * class and priority.           1462                  * class and priority.
1525                  */                              1463                  */
1526                 ret = ioprio_check_cap(iocb->    1464                 ret = ioprio_check_cap(iocb->aio_reqprio);
1527                 if (ret) {                       1465                 if (ret) {
1528                         pr_debug("aio ioprio     1466                         pr_debug("aio ioprio check cap error: %d\n", ret);
1529                         return ret;              1467                         return ret;
1530                 }                                1468                 }
1531                                                  1469 
1532                 req->ki_ioprio = iocb->aio_re    1470                 req->ki_ioprio = iocb->aio_reqprio;
1533         } else                                   1471         } else
1534                 req->ki_ioprio = get_current_    1472                 req->ki_ioprio = get_current_ioprio();
1535                                                  1473 
1536         ret = kiocb_set_rw_flags(req, iocb->a !! 1474         ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
1537         if (unlikely(ret))                       1475         if (unlikely(ret))
1538                 return ret;                      1476                 return ret;
1539                                                  1477 
1540         req->ki_flags &= ~IOCB_HIPRI; /* no o    1478         req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
1541         return 0;                                1479         return 0;
1542 }                                                1480 }
1543                                                  1481 
1544 static ssize_t aio_setup_rw(int rw, const str    1482 static ssize_t aio_setup_rw(int rw, const struct iocb *iocb,
1545                 struct iovec **iovec, bool ve    1483                 struct iovec **iovec, bool vectored, bool compat,
1546                 struct iov_iter *iter)           1484                 struct iov_iter *iter)
1547 {                                                1485 {
1548         void __user *buf = (void __user *)(ui    1486         void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1549         size_t len = iocb->aio_nbytes;           1487         size_t len = iocb->aio_nbytes;
1550                                                  1488 
1551         if (!vectored) {                         1489         if (!vectored) {
1552                 ssize_t ret = import_ubuf(rw, !! 1490                 ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
1553                 *iovec = NULL;                   1491                 *iovec = NULL;
1554                 return ret;                      1492                 return ret;
1555         }                                        1493         }
1556                                                  1494 
1557         return __import_iovec(rw, buf, len, U    1495         return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat);
1558 }                                                1496 }
1559                                                  1497 
1560 static inline void aio_rw_done(struct kiocb *    1498 static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
1561 {                                                1499 {
1562         switch (ret) {                           1500         switch (ret) {
1563         case -EIOCBQUEUED:                       1501         case -EIOCBQUEUED:
1564                 break;                           1502                 break;
1565         case -ERESTARTSYS:                       1503         case -ERESTARTSYS:
1566         case -ERESTARTNOINTR:                    1504         case -ERESTARTNOINTR:
1567         case -ERESTARTNOHAND:                    1505         case -ERESTARTNOHAND:
1568         case -ERESTART_RESTARTBLOCK:             1506         case -ERESTART_RESTARTBLOCK:
1569                 /*                               1507                 /*
1570                  * There's no easy way to res    1508                  * There's no easy way to restart the syscall since other AIO's
1571                  * may be already running. Ju    1509                  * may be already running. Just fail this IO with EINTR.
1572                  */                              1510                  */
1573                 ret = -EINTR;                    1511                 ret = -EINTR;
1574                 fallthrough;                     1512                 fallthrough;
1575         default:                                 1513         default:
1576                 req->ki_complete(req, ret);   !! 1514                 req->ki_complete(req, ret, 0);
1577         }                                        1515         }
1578 }                                                1516 }
1579                                                  1517 
1580 static int aio_read(struct kiocb *req, const     1518 static int aio_read(struct kiocb *req, const struct iocb *iocb,
1581                         bool vectored, bool c    1519                         bool vectored, bool compat)
1582 {                                                1520 {
1583         struct iovec inline_vecs[UIO_FASTIOV]    1521         struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1584         struct iov_iter iter;                    1522         struct iov_iter iter;
1585         struct file *file;                       1523         struct file *file;
1586         int ret;                                 1524         int ret;
1587                                                  1525 
1588         ret = aio_prep_rw(req, iocb, READ);   !! 1526         ret = aio_prep_rw(req, iocb);
1589         if (ret)                                 1527         if (ret)
1590                 return ret;                      1528                 return ret;
1591         file = req->ki_filp;                     1529         file = req->ki_filp;
1592         if (unlikely(!(file->f_mode & FMODE_R    1530         if (unlikely(!(file->f_mode & FMODE_READ)))
1593                 return -EBADF;                   1531                 return -EBADF;
                                                   >> 1532         ret = -EINVAL;
1594         if (unlikely(!file->f_op->read_iter))    1533         if (unlikely(!file->f_op->read_iter))
1595                 return -EINVAL;                  1534                 return -EINVAL;
1596                                                  1535 
1597         ret = aio_setup_rw(ITER_DEST, iocb, & !! 1536         ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
1598         if (ret < 0)                             1537         if (ret < 0)
1599                 return ret;                      1538                 return ret;
1600         ret = rw_verify_area(READ, file, &req    1539         ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1601         if (!ret)                                1540         if (!ret)
1602                 aio_rw_done(req, file->f_op-> !! 1541                 aio_rw_done(req, call_read_iter(file, req, &iter));
1603         kfree(iovec);                            1542         kfree(iovec);
1604         return ret;                              1543         return ret;
1605 }                                                1544 }
1606                                                  1545 
1607 static int aio_write(struct kiocb *req, const    1546 static int aio_write(struct kiocb *req, const struct iocb *iocb,
1608                          bool vectored, bool     1547                          bool vectored, bool compat)
1609 {                                                1548 {
1610         struct iovec inline_vecs[UIO_FASTIOV]    1549         struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1611         struct iov_iter iter;                    1550         struct iov_iter iter;
1612         struct file *file;                       1551         struct file *file;
1613         int ret;                                 1552         int ret;
1614                                                  1553 
1615         ret = aio_prep_rw(req, iocb, WRITE);  !! 1554         ret = aio_prep_rw(req, iocb);
1616         if (ret)                                 1555         if (ret)
1617                 return ret;                      1556                 return ret;
1618         file = req->ki_filp;                     1557         file = req->ki_filp;
1619                                                  1558 
1620         if (unlikely(!(file->f_mode & FMODE_W    1559         if (unlikely(!(file->f_mode & FMODE_WRITE)))
1621                 return -EBADF;                   1560                 return -EBADF;
1622         if (unlikely(!file->f_op->write_iter)    1561         if (unlikely(!file->f_op->write_iter))
1623                 return -EINVAL;                  1562                 return -EINVAL;
1624                                                  1563 
1625         ret = aio_setup_rw(ITER_SOURCE, iocb, !! 1564         ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
1626         if (ret < 0)                             1565         if (ret < 0)
1627                 return ret;                      1566                 return ret;
1628         ret = rw_verify_area(WRITE, file, &re    1567         ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1629         if (!ret) {                              1568         if (!ret) {
1630                 if (S_ISREG(file_inode(file)- !! 1569                 /*
1631                         kiocb_start_write(req !! 1570                  * Open-code file_start_write here to grab freeze protection,
                                                   >> 1571                  * which will be released by another thread in
                                                   >> 1572                  * aio_complete_rw().  Fool lockdep by telling it the lock got
                                                   >> 1573                  * released so that it doesn't complain about the held lock when
                                                   >> 1574                  * we return to userspace.
                                                   >> 1575                  */
                                                   >> 1576                 if (S_ISREG(file_inode(file)->i_mode)) {
                                                   >> 1577                         sb_start_write(file_inode(file)->i_sb);
                                                   >> 1578                         __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
                                                   >> 1579                 }
1632                 req->ki_flags |= IOCB_WRITE;     1580                 req->ki_flags |= IOCB_WRITE;
1633                 aio_rw_done(req, file->f_op-> !! 1581                 aio_rw_done(req, call_write_iter(file, req, &iter));
1634         }                                        1582         }
1635         kfree(iovec);                            1583         kfree(iovec);
1636         return ret;                              1584         return ret;
1637 }                                                1585 }
1638                                                  1586 
1639 static void aio_fsync_work(struct work_struct    1587 static void aio_fsync_work(struct work_struct *work)
1640 {                                                1588 {
1641         struct aio_kiocb *iocb = container_of    1589         struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1642         const struct cred *old_cred = overrid    1590         const struct cred *old_cred = override_creds(iocb->fsync.creds);
1643                                                  1591 
1644         iocb->ki_res.res = vfs_fsync(iocb->fs    1592         iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1645         revert_creds(old_cred);                  1593         revert_creds(old_cred);
1646         put_cred(iocb->fsync.creds);             1594         put_cred(iocb->fsync.creds);
1647         iocb_put(iocb);                          1595         iocb_put(iocb);
1648 }                                                1596 }
1649                                                  1597 
1650 static int aio_fsync(struct fsync_iocb *req,     1598 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1651                      bool datasync)              1599                      bool datasync)
1652 {                                                1600 {
1653         if (unlikely(iocb->aio_buf || iocb->a    1601         if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
1654                         iocb->aio_rw_flags))     1602                         iocb->aio_rw_flags))
1655                 return -EINVAL;                  1603                 return -EINVAL;
1656                                                  1604 
1657         if (unlikely(!req->file->f_op->fsync)    1605         if (unlikely(!req->file->f_op->fsync))
1658                 return -EINVAL;                  1606                 return -EINVAL;
1659                                                  1607 
1660         req->creds = prepare_creds();            1608         req->creds = prepare_creds();
1661         if (!req->creds)                         1609         if (!req->creds)
1662                 return -ENOMEM;                  1610                 return -ENOMEM;
1663                                                  1611 
1664         req->datasync = datasync;                1612         req->datasync = datasync;
1665         INIT_WORK(&req->work, aio_fsync_work)    1613         INIT_WORK(&req->work, aio_fsync_work);
1666         schedule_work(&req->work);               1614         schedule_work(&req->work);
1667         return 0;                                1615         return 0;
1668 }                                                1616 }
1669                                                  1617 
1670 static void aio_poll_put_work(struct work_str    1618 static void aio_poll_put_work(struct work_struct *work)
1671 {                                                1619 {
1672         struct poll_iocb *req = container_of(    1620         struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1673         struct aio_kiocb *iocb = container_of    1621         struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1674                                                  1622 
1675         iocb_put(iocb);                          1623         iocb_put(iocb);
1676 }                                                1624 }
1677                                                  1625 
1678 /*                                            << 
1679  * Safely lock the waitqueue which the reques << 
1680  * case where the ->poll() provider decides t << 
1681  *                                            << 
1682  * Returns true on success, meaning that req- << 
1683  * is on req->head, and an RCU read lock was  << 
1684  * request was already removed from its waitq << 
1685  */                                           << 
1686 static bool poll_iocb_lock_wq(struct poll_ioc << 
1687 {                                             << 
1688         wait_queue_head_t *head;              << 
1689                                               << 
1690         /*                                    << 
1691          * While we hold the waitqueue lock a << 
1692          * wake_up_pollfree() will wait for u << 
1693          * lock in the first place can race w << 
1694          *                                    << 
1695          * We solve this as eventpoll does: b << 
1696          * all users of wake_up_pollfree() wi << 
1697          * we enter rcu_read_lock() and see t << 
1698          * non-NULL, we can then lock it with << 
1699          * under us, then check whether the r << 
1700          *                                    << 
1701          * Keep holding rcu_read_lock() as lo << 
1702          * case the caller deletes the entry  << 
1703          * In that case, only RCU prevents th << 
1704          */                                   << 
1705         rcu_read_lock();                      << 
1706         head = smp_load_acquire(&req->head);  << 
1707         if (head) {                           << 
1708                 spin_lock(&head->lock);       << 
1709                 if (!list_empty(&req->wait.en << 
1710                         return true;          << 
1711                 spin_unlock(&head->lock);     << 
1712         }                                     << 
1713         rcu_read_unlock();                    << 
1714         return false;                         << 
1715 }                                             << 
1716                                               << 
1717 static void poll_iocb_unlock_wq(struct poll_i << 
1718 {                                             << 
1719         spin_unlock(&req->head->lock);        << 
1720         rcu_read_unlock();                    << 
1721 }                                             << 
1722                                               << 
1723 static void aio_poll_complete_work(struct wor    1626 static void aio_poll_complete_work(struct work_struct *work)
1724 {                                                1627 {
1725         struct poll_iocb *req = container_of(    1628         struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1726         struct aio_kiocb *iocb = container_of    1629         struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1727         struct poll_table_struct pt = { ._key    1630         struct poll_table_struct pt = { ._key = req->events };
1728         struct kioctx *ctx = iocb->ki_ctx;       1631         struct kioctx *ctx = iocb->ki_ctx;
1729         __poll_t mask = 0;                       1632         __poll_t mask = 0;
1730                                                  1633 
1731         if (!READ_ONCE(req->cancelled))          1634         if (!READ_ONCE(req->cancelled))
1732                 mask = vfs_poll(req->file, &p    1635                 mask = vfs_poll(req->file, &pt) & req->events;
1733                                                  1636 
1734         /*                                       1637         /*
1735          * Note that ->ki_cancel callers also    1638          * Note that ->ki_cancel callers also delete iocb from active_reqs after
1736          * calling ->ki_cancel.  We need the     1639          * calling ->ki_cancel.  We need the ctx_lock roundtrip here to
1737          * synchronize with them.  In the can    1640          * synchronize with them.  In the cancellation case the list_del_init
1738          * itself is not actually needed, but    1641          * itself is not actually needed, but harmless so we keep it in to
1739          * avoid further branches in the fast    1642          * avoid further branches in the fast path.
1740          */                                      1643          */
1741         spin_lock_irq(&ctx->ctx_lock);           1644         spin_lock_irq(&ctx->ctx_lock);
1742         if (poll_iocb_lock_wq(req)) {         !! 1645         if (!mask && !READ_ONCE(req->cancelled)) {
1743                 if (!mask && !READ_ONCE(req-> !! 1646                 add_wait_queue(req->head, &req->wait);
1744                         /*                    !! 1647                 spin_unlock_irq(&ctx->ctx_lock);
1745                          * The request isn't  !! 1648                 return;
1746                          * Reschedule complet !! 1649         }
1747                          */                   << 
1748                         if (req->work_need_re << 
1749                                 schedule_work << 
1750                                 req->work_nee << 
1751                         } else {              << 
1752                                 req->work_sch << 
1753                         }                     << 
1754                         poll_iocb_unlock_wq(r << 
1755                         spin_unlock_irq(&ctx- << 
1756                         return;               << 
1757                 }                             << 
1758                 list_del_init(&req->wait.entr << 
1759                 poll_iocb_unlock_wq(req);     << 
1760         } /* else, POLLFREE has freed the wai << 
1761         list_del_init(&iocb->ki_list);           1650         list_del_init(&iocb->ki_list);
1762         iocb->ki_res.res = mangle_poll(mask);    1651         iocb->ki_res.res = mangle_poll(mask);
                                                   >> 1652         req->done = true;
1763         spin_unlock_irq(&ctx->ctx_lock);         1653         spin_unlock_irq(&ctx->ctx_lock);
1764                                                  1654 
1765         iocb_put(iocb);                          1655         iocb_put(iocb);
1766 }                                                1656 }
1767                                                  1657 
1768 /* assumes we are called with irqs disabled *    1658 /* assumes we are called with irqs disabled */
1769 static int aio_poll_cancel(struct kiocb *iocb    1659 static int aio_poll_cancel(struct kiocb *iocb)
1770 {                                                1660 {
1771         struct aio_kiocb *aiocb = container_o    1661         struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
1772         struct poll_iocb *req = &aiocb->poll;    1662         struct poll_iocb *req = &aiocb->poll;
1773                                                  1663 
1774         if (poll_iocb_lock_wq(req)) {         !! 1664         spin_lock(&req->head->lock);
1775                 WRITE_ONCE(req->cancelled, tr !! 1665         WRITE_ONCE(req->cancelled, true);
1776                 if (!req->work_scheduled) {   !! 1666         if (!list_empty(&req->wait.entry)) {
1777                         schedule_work(&aiocb- !! 1667                 list_del_init(&req->wait.entry);
1778                         req->work_scheduled = !! 1668                 schedule_work(&aiocb->poll.work);
1779                 }                             !! 1669         }
1780                 poll_iocb_unlock_wq(req);     !! 1670         spin_unlock(&req->head->lock);
1781         } /* else, the request was force-canc << 
1782                                                  1671 
1783         return 0;                                1672         return 0;
1784 }                                                1673 }
1785                                                  1674 
1786 static int aio_poll_wake(struct wait_queue_en    1675 static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1787                 void *key)                       1676                 void *key)
1788 {                                                1677 {
1789         struct poll_iocb *req = container_of(    1678         struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
1790         struct aio_kiocb *iocb = container_of    1679         struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1791         __poll_t mask = key_to_poll(key);        1680         __poll_t mask = key_to_poll(key);
1792         unsigned long flags;                     1681         unsigned long flags;
1793                                                  1682 
1794         /* for instances that support it chec    1683         /* for instances that support it check for an event match first: */
1795         if (mask && !(mask & req->events))       1684         if (mask && !(mask & req->events))
1796                 return 0;                        1685                 return 0;
1797                                                  1686 
1798         /*                                    !! 1687         list_del_init(&req->wait.entry);
1799          * Complete the request inline if pos !! 1688 
1800          * conditions be met:                 !! 1689         if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1801          *   1. An event mask must have been  << 
1802          *      instead, then mask == 0 and w << 
1803          *      the events, so inline complet << 
1804          *   2. The completion work must not  << 
1805          *   3. ctx_lock must not be busy.  W << 
1806          *      already hold the waitqueue lo << 
1807          *      locking order.  Use irqsave/i << 
1808          *      filesystems (e.g. fuse) call  << 
1809          *      yet IRQs have to be disabled  << 
1810          */                                   << 
1811         if (mask && !req->work_scheduled &&   << 
1812             spin_trylock_irqsave(&iocb->ki_ct << 
1813                 struct kioctx *ctx = iocb->ki    1690                 struct kioctx *ctx = iocb->ki_ctx;
1814                                                  1691 
1815                 list_del_init(&req->wait.entr !! 1692                 /*
                                                   >> 1693                  * Try to complete the iocb inline if we can. Use
                                                   >> 1694                  * irqsave/irqrestore because not all filesystems (e.g. fuse)
                                                   >> 1695                  * call this function with IRQs disabled and because IRQs
                                                   >> 1696                  * have to be disabled before ctx_lock is obtained.
                                                   >> 1697                  */
1816                 list_del(&iocb->ki_list);        1698                 list_del(&iocb->ki_list);
1817                 iocb->ki_res.res = mangle_pol    1699                 iocb->ki_res.res = mangle_poll(mask);
1818                 if (iocb->ki_eventfd && !even !! 1700                 req->done = true;
                                                   >> 1701                 if (iocb->ki_eventfd && eventfd_signal_count()) {
1819                         iocb = NULL;             1702                         iocb = NULL;
1820                         INIT_WORK(&req->work,    1703                         INIT_WORK(&req->work, aio_poll_put_work);
1821                         schedule_work(&req->w    1704                         schedule_work(&req->work);
1822                 }                                1705                 }
1823                 spin_unlock_irqrestore(&ctx->    1706                 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1824                 if (iocb)                        1707                 if (iocb)
1825                         iocb_put(iocb);          1708                         iocb_put(iocb);
1826         } else {                                 1709         } else {
1827                 /*                            !! 1710                 schedule_work(&req->work);
1828                  * Schedule the completion wo << 
1829                  * scheduled, record that ano << 
1830                  *                            << 
1831                  * Don't remove the request f << 
1832                  * not actually be complete y << 
1833                  * is called), and we must no << 
1834                  * exception to this; see bel << 
1835                  */                           << 
1836                 if (req->work_scheduled) {    << 
1837                         req->work_need_resche << 
1838                 } else {                      << 
1839                         schedule_work(&req->w << 
1840                         req->work_scheduled = << 
1841                 }                             << 
1842                                               << 
1843                 /*                            << 
1844                  * If the waitqueue is being  << 
1845                  * the request inline, we hav << 
1846                  * we can.  That means immedi << 
1847                  * waitqueue and preventing a << 
1848                  * waitqueue via the request. << 
1849                  * completion work (done abov << 
1850                  * cancelled, to potentially  << 
1851                  */                           << 
1852                 if (mask & POLLFREE) {        << 
1853                         WRITE_ONCE(req->cance << 
1854                         list_del_init(&req->w << 
1855                                               << 
1856                         /*                    << 
1857                          * Careful: this *mus << 
1858                          * as req->head is NU << 
1859                          * completed and free << 
1860                          * will no longer nee << 
1861                          */                   << 
1862                         smp_store_release(&re << 
1863                 }                             << 
1864         }                                        1711         }
1865         return 1;                                1712         return 1;
1866 }                                                1713 }
1867                                                  1714 
1868 struct aio_poll_table {                          1715 struct aio_poll_table {
1869         struct poll_table_struct        pt;      1716         struct poll_table_struct        pt;
1870         struct aio_kiocb                *iocb    1717         struct aio_kiocb                *iocb;
1871         bool                            queue << 
1872         int                             error    1718         int                             error;
1873 };                                               1719 };
1874                                                  1720 
1875 static void                                      1721 static void
1876 aio_poll_queue_proc(struct file *file, struct    1722 aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1877                 struct poll_table_struct *p)     1723                 struct poll_table_struct *p)
1878 {                                                1724 {
1879         struct aio_poll_table *pt = container    1725         struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
1880                                                  1726 
1881         /* multiple wait queues per file are     1727         /* multiple wait queues per file are not supported */
1882         if (unlikely(pt->queued)) {           !! 1728         if (unlikely(pt->iocb->poll.head)) {
1883                 pt->error = -EINVAL;             1729                 pt->error = -EINVAL;
1884                 return;                          1730                 return;
1885         }                                        1731         }
1886                                                  1732 
1887         pt->queued = true;                    << 
1888         pt->error = 0;                           1733         pt->error = 0;
1889         pt->iocb->poll.head = head;              1734         pt->iocb->poll.head = head;
1890         add_wait_queue(head, &pt->iocb->poll.    1735         add_wait_queue(head, &pt->iocb->poll.wait);
1891 }                                                1736 }
1892                                                  1737 
1893 static int aio_poll(struct aio_kiocb *aiocb,     1738 static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1894 {                                                1739 {
1895         struct kioctx *ctx = aiocb->ki_ctx;      1740         struct kioctx *ctx = aiocb->ki_ctx;
1896         struct poll_iocb *req = &aiocb->poll;    1741         struct poll_iocb *req = &aiocb->poll;
1897         struct aio_poll_table apt;               1742         struct aio_poll_table apt;
1898         bool cancel = false;                     1743         bool cancel = false;
1899         __poll_t mask;                           1744         __poll_t mask;
1900                                                  1745 
1901         /* reject any unknown events outside     1746         /* reject any unknown events outside the normal event mask. */
1902         if ((u16)iocb->aio_buf != iocb->aio_b    1747         if ((u16)iocb->aio_buf != iocb->aio_buf)
1903                 return -EINVAL;                  1748                 return -EINVAL;
1904         /* reject fields that are not defined    1749         /* reject fields that are not defined for poll */
1905         if (iocb->aio_offset || iocb->aio_nby    1750         if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
1906                 return -EINVAL;                  1751                 return -EINVAL;
1907                                                  1752 
1908         INIT_WORK(&req->work, aio_poll_comple    1753         INIT_WORK(&req->work, aio_poll_complete_work);
1909         req->events = demangle_poll(iocb->aio    1754         req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1910                                                  1755 
1911         req->head = NULL;                        1756         req->head = NULL;
                                                   >> 1757         req->done = false;
1912         req->cancelled = false;                  1758         req->cancelled = false;
1913         req->work_scheduled = false;          << 
1914         req->work_need_resched = false;       << 
1915                                                  1759 
1916         apt.pt._qproc = aio_poll_queue_proc;     1760         apt.pt._qproc = aio_poll_queue_proc;
1917         apt.pt._key = req->events;               1761         apt.pt._key = req->events;
1918         apt.iocb = aiocb;                        1762         apt.iocb = aiocb;
1919         apt.queued = false;                   << 
1920         apt.error = -EINVAL; /* same as no su    1763         apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1921                                                  1764 
1922         /* initialized the list so that we ca    1765         /* initialized the list so that we can do list_empty checks */
1923         INIT_LIST_HEAD(&req->wait.entry);        1766         INIT_LIST_HEAD(&req->wait.entry);
1924         init_waitqueue_func_entry(&req->wait,    1767         init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1925                                                  1768 
1926         mask = vfs_poll(req->file, &apt.pt) &    1769         mask = vfs_poll(req->file, &apt.pt) & req->events;
1927         spin_lock_irq(&ctx->ctx_lock);           1770         spin_lock_irq(&ctx->ctx_lock);
1928         if (likely(apt.queued)) {             !! 1771         if (likely(req->head)) {
1929                 bool on_queue = poll_iocb_loc !! 1772                 spin_lock(&req->head->lock);
1930                                               !! 1773                 if (unlikely(list_empty(&req->wait.entry))) {
1931                 if (!on_queue || req->work_sc !! 1774                         if (apt.error)
1932                         /*                    << 
1933                          * aio_poll_wake() al << 
1934                          * completion work, o << 
1935                          */                   << 
1936                         if (apt.error) /* uns << 
1937                                 cancel = true    1775                                 cancel = true;
1938                         apt.error = 0;           1776                         apt.error = 0;
1939                         mask = 0;                1777                         mask = 0;
1940                 }                                1778                 }
1941                 if (mask || apt.error) {         1779                 if (mask || apt.error) {
1942                         /* Steal to complete  << 
1943                         list_del_init(&req->w    1780                         list_del_init(&req->wait.entry);
1944                 } else if (cancel) {             1781                 } else if (cancel) {
1945                         /* Cancel if possible << 
1946                         WRITE_ONCE(req->cance    1782                         WRITE_ONCE(req->cancelled, true);
1947                 } else if (on_queue) {        !! 1783                 } else if (!req->done) { /* actually waiting for an event */
1948                         /*                    << 
1949                          * Actually waiting f << 
1950                          * active_reqs so tha << 
1951                          */                   << 
1952                         list_add_tail(&aiocb-    1784                         list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1953                         aiocb->ki_cancel = ai    1785                         aiocb->ki_cancel = aio_poll_cancel;
1954                 }                                1786                 }
1955                 if (on_queue)                 !! 1787                 spin_unlock(&req->head->lock);
1956                         poll_iocb_unlock_wq(r << 
1957         }                                        1788         }
1958         if (mask) { /* no async, we'd stolen     1789         if (mask) { /* no async, we'd stolen it */
1959                 aiocb->ki_res.res = mangle_po    1790                 aiocb->ki_res.res = mangle_poll(mask);
1960                 apt.error = 0;                   1791                 apt.error = 0;
1961         }                                        1792         }
1962         spin_unlock_irq(&ctx->ctx_lock);         1793         spin_unlock_irq(&ctx->ctx_lock);
1963         if (mask)                                1794         if (mask)
1964                 iocb_put(aiocb);                 1795                 iocb_put(aiocb);
1965         return apt.error;                        1796         return apt.error;
1966 }                                                1797 }
1967                                                  1798 
1968 static int __io_submit_one(struct kioctx *ctx    1799 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1969                            struct iocb __user    1800                            struct iocb __user *user_iocb, struct aio_kiocb *req,
1970                            bool compat)          1801                            bool compat)
1971 {                                                1802 {
1972         req->ki_filp = fget(iocb->aio_fildes)    1803         req->ki_filp = fget(iocb->aio_fildes);
1973         if (unlikely(!req->ki_filp))             1804         if (unlikely(!req->ki_filp))
1974                 return -EBADF;                   1805                 return -EBADF;
1975                                                  1806 
1976         if (iocb->aio_flags & IOCB_FLAG_RESFD    1807         if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1977                 struct eventfd_ctx *eventfd;     1808                 struct eventfd_ctx *eventfd;
1978                 /*                               1809                 /*
1979                  * If the IOCB_FLAG_RESFD fla    1810                  * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1980                  * instance of the file* now.    1811                  * instance of the file* now. The file descriptor must be
1981                  * an eventfd() fd, and will     1812                  * an eventfd() fd, and will be signaled for each completed
1982                  * event using the eventfd_si    1813                  * event using the eventfd_signal() function.
1983                  */                              1814                  */
1984                 eventfd = eventfd_ctx_fdget(i    1815                 eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1985                 if (IS_ERR(eventfd))             1816                 if (IS_ERR(eventfd))
1986                         return PTR_ERR(eventf    1817                         return PTR_ERR(eventfd);
1987                                                  1818 
1988                 req->ki_eventfd = eventfd;       1819                 req->ki_eventfd = eventfd;
1989         }                                        1820         }
1990                                                  1821 
1991         if (unlikely(put_user(KIOCB_KEY, &use    1822         if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
1992                 pr_debug("EFAULT: aio_key\n")    1823                 pr_debug("EFAULT: aio_key\n");
1993                 return -EFAULT;                  1824                 return -EFAULT;
1994         }                                        1825         }
1995                                                  1826 
1996         req->ki_res.obj = (u64)(unsigned long    1827         req->ki_res.obj = (u64)(unsigned long)user_iocb;
1997         req->ki_res.data = iocb->aio_data;       1828         req->ki_res.data = iocb->aio_data;
1998         req->ki_res.res = 0;                     1829         req->ki_res.res = 0;
1999         req->ki_res.res2 = 0;                    1830         req->ki_res.res2 = 0;
2000                                                  1831 
2001         switch (iocb->aio_lio_opcode) {          1832         switch (iocb->aio_lio_opcode) {
2002         case IOCB_CMD_PREAD:                     1833         case IOCB_CMD_PREAD:
2003                 return aio_read(&req->rw, ioc    1834                 return aio_read(&req->rw, iocb, false, compat);
2004         case IOCB_CMD_PWRITE:                    1835         case IOCB_CMD_PWRITE:
2005                 return aio_write(&req->rw, io    1836                 return aio_write(&req->rw, iocb, false, compat);
2006         case IOCB_CMD_PREADV:                    1837         case IOCB_CMD_PREADV:
2007                 return aio_read(&req->rw, ioc    1838                 return aio_read(&req->rw, iocb, true, compat);
2008         case IOCB_CMD_PWRITEV:                   1839         case IOCB_CMD_PWRITEV:
2009                 return aio_write(&req->rw, io    1840                 return aio_write(&req->rw, iocb, true, compat);
2010         case IOCB_CMD_FSYNC:                     1841         case IOCB_CMD_FSYNC:
2011                 return aio_fsync(&req->fsync,    1842                 return aio_fsync(&req->fsync, iocb, false);
2012         case IOCB_CMD_FDSYNC:                    1843         case IOCB_CMD_FDSYNC:
2013                 return aio_fsync(&req->fsync,    1844                 return aio_fsync(&req->fsync, iocb, true);
2014         case IOCB_CMD_POLL:                      1845         case IOCB_CMD_POLL:
2015                 return aio_poll(req, iocb);      1846                 return aio_poll(req, iocb);
2016         default:                                 1847         default:
2017                 pr_debug("invalid aio operati    1848                 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
2018                 return -EINVAL;                  1849                 return -EINVAL;
2019         }                                        1850         }
2020 }                                                1851 }
2021                                                  1852 
2022 static int io_submit_one(struct kioctx *ctx,     1853 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
2023                          bool compat)            1854                          bool compat)
2024 {                                                1855 {
2025         struct aio_kiocb *req;                   1856         struct aio_kiocb *req;
2026         struct iocb iocb;                        1857         struct iocb iocb;
2027         int err;                                 1858         int err;
2028                                                  1859 
2029         if (unlikely(copy_from_user(&iocb, us    1860         if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
2030                 return -EFAULT;                  1861                 return -EFAULT;
2031                                                  1862 
2032         /* enforce forwards compatibility on     1863         /* enforce forwards compatibility on users */
2033         if (unlikely(iocb.aio_reserved2)) {      1864         if (unlikely(iocb.aio_reserved2)) {
2034                 pr_debug("EINVAL: reserve fie    1865                 pr_debug("EINVAL: reserve field set\n");
2035                 return -EINVAL;                  1866                 return -EINVAL;
2036         }                                        1867         }
2037                                                  1868 
2038         /* prevent overflows */                  1869         /* prevent overflows */
2039         if (unlikely(                            1870         if (unlikely(
2040             (iocb.aio_buf != (unsigned long)i    1871             (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
2041             (iocb.aio_nbytes != (size_t)iocb.    1872             (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
2042             ((ssize_t)iocb.aio_nbytes < 0)       1873             ((ssize_t)iocb.aio_nbytes < 0)
2043            )) {                                  1874            )) {
2044                 pr_debug("EINVAL: overflow ch    1875                 pr_debug("EINVAL: overflow check\n");
2045                 return -EINVAL;                  1876                 return -EINVAL;
2046         }                                        1877         }
2047                                                  1878 
2048         req = aio_get_req(ctx);                  1879         req = aio_get_req(ctx);
2049         if (unlikely(!req))                      1880         if (unlikely(!req))
2050                 return -EAGAIN;                  1881                 return -EAGAIN;
2051                                                  1882 
2052         err = __io_submit_one(ctx, &iocb, use    1883         err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
2053                                                  1884 
2054         /* Done with the synchronous referenc    1885         /* Done with the synchronous reference */
2055         iocb_put(req);                           1886         iocb_put(req);
2056                                                  1887 
2057         /*                                       1888         /*
2058          * If err is 0, we'd either done aio_    1889          * If err is 0, we'd either done aio_complete() ourselves or have
2059          * arranged for that to be done async    1890          * arranged for that to be done asynchronously.  Anything non-zero
2060          * means that we need to destroy req     1891          * means that we need to destroy req ourselves.
2061          */                                      1892          */
2062         if (unlikely(err)) {                     1893         if (unlikely(err)) {
2063                 iocb_destroy(req);               1894                 iocb_destroy(req);
2064                 put_reqs_available(ctx, 1);      1895                 put_reqs_available(ctx, 1);
2065         }                                        1896         }
2066         return err;                              1897         return err;
2067 }                                                1898 }
2068                                                  1899 
2069 /* sys_io_submit:                                1900 /* sys_io_submit:
2070  *      Queue the nr iocbs pointed to by iocb    1901  *      Queue the nr iocbs pointed to by iocbpp for processing.  Returns
2071  *      the number of iocbs queued.  May retu    1902  *      the number of iocbs queued.  May return -EINVAL if the aio_context
2072  *      specified by ctx_id is invalid, if nr    1903  *      specified by ctx_id is invalid, if nr is < 0, if the iocb at
2073  *      *iocbpp[0] is not properly initialize    1904  *      *iocbpp[0] is not properly initialized, if the operation specified
2074  *      is invalid for the file descriptor in    1905  *      is invalid for the file descriptor in the iocb.  May fail with
2075  *      -EFAULT if any of the data structures    1906  *      -EFAULT if any of the data structures point to invalid data.  May
2076  *      fail with -EBADF if the file descript    1907  *      fail with -EBADF if the file descriptor specified in the first
2077  *      iocb is invalid.  May fail with -EAGA    1908  *      iocb is invalid.  May fail with -EAGAIN if insufficient resources
2078  *      are available to queue any iocbs.  Wi    1909  *      are available to queue any iocbs.  Will return 0 if nr is 0.  Will
2079  *      fail with -ENOSYS if not implemented.    1910  *      fail with -ENOSYS if not implemented.
2080  */                                              1911  */
2081 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx    1912 SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
2082                 struct iocb __user * __user *    1913                 struct iocb __user * __user *, iocbpp)
2083 {                                                1914 {
2084         struct kioctx *ctx;                      1915         struct kioctx *ctx;
2085         long ret = 0;                            1916         long ret = 0;
2086         int i = 0;                               1917         int i = 0;
2087         struct blk_plug plug;                    1918         struct blk_plug plug;
2088                                                  1919 
2089         if (unlikely(nr < 0))                    1920         if (unlikely(nr < 0))
2090                 return -EINVAL;                  1921                 return -EINVAL;
2091                                                  1922 
2092         ctx = lookup_ioctx(ctx_id);              1923         ctx = lookup_ioctx(ctx_id);
2093         if (unlikely(!ctx)) {                    1924         if (unlikely(!ctx)) {
2094                 pr_debug("EINVAL: invalid con    1925                 pr_debug("EINVAL: invalid context id\n");
2095                 return -EINVAL;                  1926                 return -EINVAL;
2096         }                                        1927         }
2097                                                  1928 
2098         if (nr > ctx->nr_events)                 1929         if (nr > ctx->nr_events)
2099                 nr = ctx->nr_events;             1930                 nr = ctx->nr_events;
2100                                                  1931 
2101         if (nr > AIO_PLUG_THRESHOLD)             1932         if (nr > AIO_PLUG_THRESHOLD)
2102                 blk_start_plug(&plug);           1933                 blk_start_plug(&plug);
2103         for (i = 0; i < nr; i++) {               1934         for (i = 0; i < nr; i++) {
2104                 struct iocb __user *user_iocb    1935                 struct iocb __user *user_iocb;
2105                                                  1936 
2106                 if (unlikely(get_user(user_io    1937                 if (unlikely(get_user(user_iocb, iocbpp + i))) {
2107                         ret = -EFAULT;           1938                         ret = -EFAULT;
2108                         break;                   1939                         break;
2109                 }                                1940                 }
2110                                                  1941 
2111                 ret = io_submit_one(ctx, user    1942                 ret = io_submit_one(ctx, user_iocb, false);
2112                 if (ret)                         1943                 if (ret)
2113                         break;                   1944                         break;
2114         }                                        1945         }
2115         if (nr > AIO_PLUG_THRESHOLD)             1946         if (nr > AIO_PLUG_THRESHOLD)
2116                 blk_finish_plug(&plug);          1947                 blk_finish_plug(&plug);
2117                                                  1948 
2118         percpu_ref_put(&ctx->users);             1949         percpu_ref_put(&ctx->users);
2119         return i ? i : ret;                      1950         return i ? i : ret;
2120 }                                                1951 }
2121                                                  1952 
2122 #ifdef CONFIG_COMPAT                             1953 #ifdef CONFIG_COMPAT
2123 COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_    1954 COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
2124                        int, nr, compat_uptr_t    1955                        int, nr, compat_uptr_t __user *, iocbpp)
2125 {                                                1956 {
2126         struct kioctx *ctx;                      1957         struct kioctx *ctx;
2127         long ret = 0;                            1958         long ret = 0;
2128         int i = 0;                               1959         int i = 0;
2129         struct blk_plug plug;                    1960         struct blk_plug plug;
2130                                                  1961 
2131         if (unlikely(nr < 0))                    1962         if (unlikely(nr < 0))
2132                 return -EINVAL;                  1963                 return -EINVAL;
2133                                                  1964 
2134         ctx = lookup_ioctx(ctx_id);              1965         ctx = lookup_ioctx(ctx_id);
2135         if (unlikely(!ctx)) {                    1966         if (unlikely(!ctx)) {
2136                 pr_debug("EINVAL: invalid con    1967                 pr_debug("EINVAL: invalid context id\n");
2137                 return -EINVAL;                  1968                 return -EINVAL;
2138         }                                        1969         }
2139                                                  1970 
2140         if (nr > ctx->nr_events)                 1971         if (nr > ctx->nr_events)
2141                 nr = ctx->nr_events;             1972                 nr = ctx->nr_events;
2142                                                  1973 
2143         if (nr > AIO_PLUG_THRESHOLD)             1974         if (nr > AIO_PLUG_THRESHOLD)
2144                 blk_start_plug(&plug);           1975                 blk_start_plug(&plug);
2145         for (i = 0; i < nr; i++) {               1976         for (i = 0; i < nr; i++) {
2146                 compat_uptr_t user_iocb;         1977                 compat_uptr_t user_iocb;
2147                                                  1978 
2148                 if (unlikely(get_user(user_io    1979                 if (unlikely(get_user(user_iocb, iocbpp + i))) {
2149                         ret = -EFAULT;           1980                         ret = -EFAULT;
2150                         break;                   1981                         break;
2151                 }                                1982                 }
2152                                                  1983 
2153                 ret = io_submit_one(ctx, comp    1984                 ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
2154                 if (ret)                         1985                 if (ret)
2155                         break;                   1986                         break;
2156         }                                        1987         }
2157         if (nr > AIO_PLUG_THRESHOLD)             1988         if (nr > AIO_PLUG_THRESHOLD)
2158                 blk_finish_plug(&plug);          1989                 blk_finish_plug(&plug);
2159                                                  1990 
2160         percpu_ref_put(&ctx->users);             1991         percpu_ref_put(&ctx->users);
2161         return i ? i : ret;                      1992         return i ? i : ret;
2162 }                                                1993 }
2163 #endif                                           1994 #endif
2164                                                  1995 
2165 /* sys_io_cancel:                                1996 /* sys_io_cancel:
2166  *      Attempts to cancel an iocb previously    1997  *      Attempts to cancel an iocb previously passed to io_submit.  If
2167  *      the operation is successfully cancell    1998  *      the operation is successfully cancelled, the resulting event is
2168  *      copied into the memory pointed to by     1999  *      copied into the memory pointed to by result without being placed
2169  *      into the completion queue and 0 is re    2000  *      into the completion queue and 0 is returned.  May fail with
2170  *      -EFAULT if any of the data structures    2001  *      -EFAULT if any of the data structures pointed to are invalid.
2171  *      May fail with -EINVAL if aio_context     2002  *      May fail with -EINVAL if aio_context specified by ctx_id is
2172  *      invalid.  May fail with -EAGAIN if th    2003  *      invalid.  May fail with -EAGAIN if the iocb specified was not
2173  *      cancelled.  Will fail with -ENOSYS if    2004  *      cancelled.  Will fail with -ENOSYS if not implemented.
2174  */                                              2005  */
2175 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx    2006 SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2176                 struct io_event __user *, res    2007                 struct io_event __user *, result)
2177 {                                                2008 {
2178         struct kioctx *ctx;                      2009         struct kioctx *ctx;
2179         struct aio_kiocb *kiocb;                 2010         struct aio_kiocb *kiocb;
2180         int ret = -EINVAL;                       2011         int ret = -EINVAL;
2181         u32 key;                                 2012         u32 key;
2182         u64 obj = (u64)(unsigned long)iocb;      2013         u64 obj = (u64)(unsigned long)iocb;
2183                                                  2014 
2184         if (unlikely(get_user(key, &iocb->aio    2015         if (unlikely(get_user(key, &iocb->aio_key)))
2185                 return -EFAULT;                  2016                 return -EFAULT;
2186         if (unlikely(key != KIOCB_KEY))          2017         if (unlikely(key != KIOCB_KEY))
2187                 return -EINVAL;                  2018                 return -EINVAL;
2188                                                  2019 
2189         ctx = lookup_ioctx(ctx_id);              2020         ctx = lookup_ioctx(ctx_id);
2190         if (unlikely(!ctx))                      2021         if (unlikely(!ctx))
2191                 return -EINVAL;                  2022                 return -EINVAL;
2192                                                  2023 
2193         spin_lock_irq(&ctx->ctx_lock);           2024         spin_lock_irq(&ctx->ctx_lock);
2194         /* TODO: use a hash or array, this su    2025         /* TODO: use a hash or array, this sucks. */
2195         list_for_each_entry(kiocb, &ctx->acti    2026         list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2196                 if (kiocb->ki_res.obj == obj)    2027                 if (kiocb->ki_res.obj == obj) {
2197                         ret = kiocb->ki_cance    2028                         ret = kiocb->ki_cancel(&kiocb->rw);
2198                         list_del_init(&kiocb-    2029                         list_del_init(&kiocb->ki_list);
2199                         break;                   2030                         break;
2200                 }                                2031                 }
2201         }                                        2032         }
2202         spin_unlock_irq(&ctx->ctx_lock);         2033         spin_unlock_irq(&ctx->ctx_lock);
2203                                                  2034 
2204         if (!ret) {                              2035         if (!ret) {
2205                 /*                               2036                 /*
2206                  * The result argument is no     2037                  * The result argument is no longer used - the io_event is
2207                  * always delivered via the r    2038                  * always delivered via the ring buffer. -EINPROGRESS indicates
2208                  * cancellation is progress:     2039                  * cancellation is progress:
2209                  */                              2040                  */
2210                 ret = -EINPROGRESS;              2041                 ret = -EINPROGRESS;
2211         }                                        2042         }
2212                                                  2043 
2213         percpu_ref_put(&ctx->users);             2044         percpu_ref_put(&ctx->users);
2214                                                  2045 
2215         return ret;                              2046         return ret;
2216 }                                                2047 }
2217                                                  2048 
2218 static long do_io_getevents(aio_context_t ctx    2049 static long do_io_getevents(aio_context_t ctx_id,
2219                 long min_nr,                     2050                 long min_nr,
2220                 long nr,                         2051                 long nr,
2221                 struct io_event __user *event    2052                 struct io_event __user *events,
2222                 struct timespec64 *ts)           2053                 struct timespec64 *ts)
2223 {                                                2054 {
2224         ktime_t until = ts ? timespec64_to_kt    2055         ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX;
2225         struct kioctx *ioctx = lookup_ioctx(c    2056         struct kioctx *ioctx = lookup_ioctx(ctx_id);
2226         long ret = -EINVAL;                      2057         long ret = -EINVAL;
2227                                                  2058 
2228         if (likely(ioctx)) {                     2059         if (likely(ioctx)) {
2229                 if (likely(min_nr <= nr && mi    2060                 if (likely(min_nr <= nr && min_nr >= 0))
2230                         ret = read_events(ioc    2061                         ret = read_events(ioctx, min_nr, nr, events, until);
2231                 percpu_ref_put(&ioctx->users)    2062                 percpu_ref_put(&ioctx->users);
2232         }                                        2063         }
2233                                                  2064 
2234         return ret;                              2065         return ret;
2235 }                                                2066 }
2236                                                  2067 
2237 /* io_getevents:                                 2068 /* io_getevents:
2238  *      Attempts to read at least min_nr even    2069  *      Attempts to read at least min_nr events and up to nr events from
2239  *      the completion queue for the aio_cont    2070  *      the completion queue for the aio_context specified by ctx_id. If
2240  *      it succeeds, the number of read event    2071  *      it succeeds, the number of read events is returned. May fail with
2241  *      -EINVAL if ctx_id is invalid, if min_    2072  *      -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2242  *      out of range, if timeout is out of ra    2073  *      out of range, if timeout is out of range.  May fail with -EFAULT
2243  *      if any of the memory specified is inv    2074  *      if any of the memory specified is invalid.  May return 0 or
2244  *      < min_nr if the timeout specified by     2075  *      < min_nr if the timeout specified by timeout has elapsed
2245  *      before sufficient events are availabl    2076  *      before sufficient events are available, where timeout == NULL
2246  *      specifies an infinite timeout. Note t    2077  *      specifies an infinite timeout. Note that the timeout pointed to by
2247  *      timeout is relative.  Will fail with     2078  *      timeout is relative.  Will fail with -ENOSYS if not implemented.
2248  */                                              2079  */
2249 #ifdef CONFIG_64BIT                              2080 #ifdef CONFIG_64BIT
2250                                                  2081 
2251 SYSCALL_DEFINE5(io_getevents, aio_context_t,     2082 SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
2252                 long, min_nr,                    2083                 long, min_nr,
2253                 long, nr,                        2084                 long, nr,
2254                 struct io_event __user *, eve    2085                 struct io_event __user *, events,
2255                 struct __kernel_timespec __us    2086                 struct __kernel_timespec __user *, timeout)
2256 {                                                2087 {
2257         struct timespec64       ts;              2088         struct timespec64       ts;
2258         int                     ret;             2089         int                     ret;
2259                                                  2090 
2260         if (timeout && unlikely(get_timespec6    2091         if (timeout && unlikely(get_timespec64(&ts, timeout)))
2261                 return -EFAULT;                  2092                 return -EFAULT;
2262                                                  2093 
2263         ret = do_io_getevents(ctx_id, min_nr,    2094         ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2264         if (!ret && signal_pending(current))     2095         if (!ret && signal_pending(current))
2265                 ret = -EINTR;                    2096                 ret = -EINTR;
2266         return ret;                              2097         return ret;
2267 }                                                2098 }
2268                                                  2099 
2269 #endif                                           2100 #endif
2270                                                  2101 
2271 struct __aio_sigset {                            2102 struct __aio_sigset {
2272         const sigset_t __user   *sigmask;        2103         const sigset_t __user   *sigmask;
2273         size_t          sigsetsize;              2104         size_t          sigsetsize;
2274 };                                               2105 };
2275                                                  2106 
2276 SYSCALL_DEFINE6(io_pgetevents,                   2107 SYSCALL_DEFINE6(io_pgetevents,
2277                 aio_context_t, ctx_id,           2108                 aio_context_t, ctx_id,
2278                 long, min_nr,                    2109                 long, min_nr,
2279                 long, nr,                        2110                 long, nr,
2280                 struct io_event __user *, eve    2111                 struct io_event __user *, events,
2281                 struct __kernel_timespec __us    2112                 struct __kernel_timespec __user *, timeout,
2282                 const struct __aio_sigset __u    2113                 const struct __aio_sigset __user *, usig)
2283 {                                                2114 {
2284         struct __aio_sigset     ksig = { NULL    2115         struct __aio_sigset     ksig = { NULL, };
2285         struct timespec64       ts;              2116         struct timespec64       ts;
2286         bool interrupted;                        2117         bool interrupted;
2287         int ret;                                 2118         int ret;
2288                                                  2119 
2289         if (timeout && unlikely(get_timespec6    2120         if (timeout && unlikely(get_timespec64(&ts, timeout)))
2290                 return -EFAULT;                  2121                 return -EFAULT;
2291                                                  2122 
2292         if (usig && copy_from_user(&ksig, usi    2123         if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2293                 return -EFAULT;                  2124                 return -EFAULT;
2294                                                  2125 
2295         ret = set_user_sigmask(ksig.sigmask,     2126         ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2296         if (ret)                                 2127         if (ret)
2297                 return ret;                      2128                 return ret;
2298                                                  2129 
2299         ret = do_io_getevents(ctx_id, min_nr,    2130         ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2300                                                  2131 
2301         interrupted = signal_pending(current)    2132         interrupted = signal_pending(current);
2302         restore_saved_sigmask_unless(interrup    2133         restore_saved_sigmask_unless(interrupted);
2303         if (interrupted && !ret)                 2134         if (interrupted && !ret)
2304                 ret = -ERESTARTNOHAND;           2135                 ret = -ERESTARTNOHAND;
2305                                                  2136 
2306         return ret;                              2137         return ret;
2307 }                                                2138 }
2308                                                  2139 
2309 #if defined(CONFIG_COMPAT_32BIT_TIME) && !def    2140 #if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
2310                                                  2141 
2311 SYSCALL_DEFINE6(io_pgetevents_time32,            2142 SYSCALL_DEFINE6(io_pgetevents_time32,
2312                 aio_context_t, ctx_id,           2143                 aio_context_t, ctx_id,
2313                 long, min_nr,                    2144                 long, min_nr,
2314                 long, nr,                        2145                 long, nr,
2315                 struct io_event __user *, eve    2146                 struct io_event __user *, events,
2316                 struct old_timespec32 __user     2147                 struct old_timespec32 __user *, timeout,
2317                 const struct __aio_sigset __u    2148                 const struct __aio_sigset __user *, usig)
2318 {                                                2149 {
2319         struct __aio_sigset     ksig = { NULL    2150         struct __aio_sigset     ksig = { NULL, };
2320         struct timespec64       ts;              2151         struct timespec64       ts;
2321         bool interrupted;                        2152         bool interrupted;
2322         int ret;                                 2153         int ret;
2323                                                  2154 
2324         if (timeout && unlikely(get_old_times    2155         if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
2325                 return -EFAULT;                  2156                 return -EFAULT;
2326                                                  2157 
2327         if (usig && copy_from_user(&ksig, usi    2158         if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2328                 return -EFAULT;                  2159                 return -EFAULT;
2329                                                  2160 
2330                                                  2161 
2331         ret = set_user_sigmask(ksig.sigmask,     2162         ret = set_user_sigmask(ksig.sigmask, ksig.sigsetsize);
2332         if (ret)                                 2163         if (ret)
2333                 return ret;                      2164                 return ret;
2334                                                  2165 
2335         ret = do_io_getevents(ctx_id, min_nr,    2166         ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2336                                                  2167 
2337         interrupted = signal_pending(current)    2168         interrupted = signal_pending(current);
2338         restore_saved_sigmask_unless(interrup    2169         restore_saved_sigmask_unless(interrupted);
2339         if (interrupted && !ret)                 2170         if (interrupted && !ret)
2340                 ret = -ERESTARTNOHAND;           2171                 ret = -ERESTARTNOHAND;
2341                                                  2172 
2342         return ret;                              2173         return ret;
2343 }                                                2174 }
2344                                                  2175 
2345 #endif                                           2176 #endif
2346                                                  2177 
2347 #if defined(CONFIG_COMPAT_32BIT_TIME)            2178 #if defined(CONFIG_COMPAT_32BIT_TIME)
2348                                                  2179 
2349 SYSCALL_DEFINE5(io_getevents_time32, __u32, c    2180 SYSCALL_DEFINE5(io_getevents_time32, __u32, ctx_id,
2350                 __s32, min_nr,                   2181                 __s32, min_nr,
2351                 __s32, nr,                       2182                 __s32, nr,
2352                 struct io_event __user *, eve    2183                 struct io_event __user *, events,
2353                 struct old_timespec32 __user     2184                 struct old_timespec32 __user *, timeout)
2354 {                                                2185 {
2355         struct timespec64 t;                     2186         struct timespec64 t;
2356         int ret;                                 2187         int ret;
2357                                                  2188 
2358         if (timeout && get_old_timespec32(&t,    2189         if (timeout && get_old_timespec32(&t, timeout))
2359                 return -EFAULT;                  2190                 return -EFAULT;
2360                                                  2191 
2361         ret = do_io_getevents(ctx_id, min_nr,    2192         ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2362         if (!ret && signal_pending(current))     2193         if (!ret && signal_pending(current))
2363                 ret = -EINTR;                    2194                 ret = -EINTR;
2364         return ret;                              2195         return ret;
2365 }                                                2196 }
2366                                                  2197 
2367 #endif                                           2198 #endif
2368                                                  2199 
2369 #ifdef CONFIG_COMPAT                             2200 #ifdef CONFIG_COMPAT
2370                                                  2201 
2371 struct __compat_aio_sigset {                     2202 struct __compat_aio_sigset {
2372         compat_uptr_t           sigmask;         2203         compat_uptr_t           sigmask;
2373         compat_size_t           sigsetsize;      2204         compat_size_t           sigsetsize;
2374 };                                               2205 };
2375                                                  2206 
2376 #if defined(CONFIG_COMPAT_32BIT_TIME)            2207 #if defined(CONFIG_COMPAT_32BIT_TIME)
2377                                                  2208 
2378 COMPAT_SYSCALL_DEFINE6(io_pgetevents,            2209 COMPAT_SYSCALL_DEFINE6(io_pgetevents,
2379                 compat_aio_context_t, ctx_id,    2210                 compat_aio_context_t, ctx_id,
2380                 compat_long_t, min_nr,           2211                 compat_long_t, min_nr,
2381                 compat_long_t, nr,               2212                 compat_long_t, nr,
2382                 struct io_event __user *, eve    2213                 struct io_event __user *, events,
2383                 struct old_timespec32 __user     2214                 struct old_timespec32 __user *, timeout,
2384                 const struct __compat_aio_sig    2215                 const struct __compat_aio_sigset __user *, usig)
2385 {                                                2216 {
2386         struct __compat_aio_sigset ksig = { 0    2217         struct __compat_aio_sigset ksig = { 0, };
2387         struct timespec64 t;                     2218         struct timespec64 t;
2388         bool interrupted;                        2219         bool interrupted;
2389         int ret;                                 2220         int ret;
2390                                                  2221 
2391         if (timeout && get_old_timespec32(&t,    2222         if (timeout && get_old_timespec32(&t, timeout))
2392                 return -EFAULT;                  2223                 return -EFAULT;
2393                                                  2224 
2394         if (usig && copy_from_user(&ksig, usi    2225         if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2395                 return -EFAULT;                  2226                 return -EFAULT;
2396                                                  2227 
2397         ret = set_compat_user_sigmask(compat_    2228         ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2398         if (ret)                                 2229         if (ret)
2399                 return ret;                      2230                 return ret;
2400                                                  2231 
2401         ret = do_io_getevents(ctx_id, min_nr,    2232         ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2402                                                  2233 
2403         interrupted = signal_pending(current)    2234         interrupted = signal_pending(current);
2404         restore_saved_sigmask_unless(interrup    2235         restore_saved_sigmask_unless(interrupted);
2405         if (interrupted && !ret)                 2236         if (interrupted && !ret)
2406                 ret = -ERESTARTNOHAND;           2237                 ret = -ERESTARTNOHAND;
2407                                                  2238 
2408         return ret;                              2239         return ret;
2409 }                                                2240 }
2410                                                  2241 
2411 #endif                                           2242 #endif
2412                                                  2243 
2413 COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,     2244 COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
2414                 compat_aio_context_t, ctx_id,    2245                 compat_aio_context_t, ctx_id,
2415                 compat_long_t, min_nr,           2246                 compat_long_t, min_nr,
2416                 compat_long_t, nr,               2247                 compat_long_t, nr,
2417                 struct io_event __user *, eve    2248                 struct io_event __user *, events,
2418                 struct __kernel_timespec __us    2249                 struct __kernel_timespec __user *, timeout,
2419                 const struct __compat_aio_sig    2250                 const struct __compat_aio_sigset __user *, usig)
2420 {                                                2251 {
2421         struct __compat_aio_sigset ksig = { 0    2252         struct __compat_aio_sigset ksig = { 0, };
2422         struct timespec64 t;                     2253         struct timespec64 t;
2423         bool interrupted;                        2254         bool interrupted;
2424         int ret;                                 2255         int ret;
2425                                                  2256 
2426         if (timeout && get_timespec64(&t, tim    2257         if (timeout && get_timespec64(&t, timeout))
2427                 return -EFAULT;                  2258                 return -EFAULT;
2428                                                  2259 
2429         if (usig && copy_from_user(&ksig, usi    2260         if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2430                 return -EFAULT;                  2261                 return -EFAULT;
2431                                                  2262 
2432         ret = set_compat_user_sigmask(compat_    2263         ret = set_compat_user_sigmask(compat_ptr(ksig.sigmask), ksig.sigsetsize);
2433         if (ret)                                 2264         if (ret)
2434                 return ret;                      2265                 return ret;
2435                                                  2266 
2436         ret = do_io_getevents(ctx_id, min_nr,    2267         ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2437                                                  2268 
2438         interrupted = signal_pending(current)    2269         interrupted = signal_pending(current);
2439         restore_saved_sigmask_unless(interrup    2270         restore_saved_sigmask_unless(interrupted);
2440         if (interrupted && !ret)                 2271         if (interrupted && !ret)
2441                 ret = -ERESTARTNOHAND;           2272                 ret = -ERESTARTNOHAND;
2442                                                  2273 
2443         return ret;                              2274         return ret;
2444 }                                                2275 }
2445 #endif                                           2276 #endif
2446                                                  2277 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php