1 // SPDX-License-Identifier: GPL-2.0-only << 2 /* 1 /* 3 * fs/eventfd.c 2 * fs/eventfd.c 4 * 3 * 5 * Copyright (C) 2007 Davide Libenzi <davide 4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> 6 * 5 * 7 */ 6 */ 8 7 9 #include <linux/file.h> 8 #include <linux/file.h> 10 #include <linux/poll.h> 9 #include <linux/poll.h> 11 #include <linux/init.h> 10 #include <linux/init.h> 12 #include <linux/fs.h> 11 #include <linux/fs.h> 13 #include <linux/sched/signal.h> !! 12 #include <linux/sched.h> 14 #include <linux/kernel.h> 13 #include <linux/kernel.h> 15 #include <linux/slab.h> 14 #include <linux/slab.h> 16 #include <linux/list.h> 15 #include <linux/list.h> 17 #include <linux/spinlock.h> 16 #include <linux/spinlock.h> 18 #include <linux/anon_inodes.h> 17 #include <linux/anon_inodes.h> 19 #include <linux/syscalls.h> 18 #include <linux/syscalls.h> 20 #include <linux/export.h> 19 #include <linux/export.h> 21 #include <linux/kref.h> 20 #include <linux/kref.h> 22 #include <linux/eventfd.h> 21 #include <linux/eventfd.h> 23 #include <linux/proc_fs.h> 22 #include <linux/proc_fs.h> 24 #include <linux/seq_file.h> 23 #include <linux/seq_file.h> 25 #include <linux/idr.h> << 26 #include <linux/uio.h> << 27 << 28 static DEFINE_IDA(eventfd_ida); << 29 24 30 struct eventfd_ctx { 25 struct eventfd_ctx { 31 struct kref kref; 26 struct kref kref; 32 wait_queue_head_t wqh; 27 wait_queue_head_t wqh; 33 /* 28 /* 34 * Every time that a write(2) is perfo 29 * Every time that a write(2) is performed on an eventfd, the 35 * value of the __u64 being written is 30 * value of the __u64 being written is added to "count" and a 36 * wakeup is performed on "wqh". If EF !! 31 * wakeup is performed on "wqh". A read(2) will return the "count" 37 * specified, a read(2) will return th !! 32 * value to userspace, and will reset "count" to zero. The kernel 38 * and will reset "count" to zero. The !! 33 * side eventfd_signal() also, adds to the "count" counter and 39 * also, adds to the "count" counter a !! 34 * issue a wakeup. 40 */ 35 */ 41 __u64 count; 36 __u64 count; 42 unsigned int flags; 37 unsigned int flags; 43 int id; << 44 }; 38 }; 45 39 46 /** 40 /** 47 * eventfd_signal_mask - Increment the event c !! 41 * eventfd_signal - Adds @n to the eventfd counter. 48 * @ctx: [in] Pointer to the eventfd context. 42 * @ctx: [in] Pointer to the eventfd context. 49 * @mask: [in] poll mask !! 43 * @n: [in] Value of the counter to be added to the eventfd internal counter. >> 44 * The value cannot be negative. 50 * 45 * 51 * This function is supposed to be called by t 46 * This function is supposed to be called by the kernel in paths that do not 52 * allow sleeping. In this function we allow t 47 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX 53 * value, and we signal this as overflow condi !! 48 * value, and we signal this as overflow condition by returning a POLLERR 54 * to poll(2). 49 * to poll(2). >> 50 * >> 51 * Returns the amount by which the counter was incremented. This will be less >> 52 * than @n if the counter has overflowed. 55 */ 53 */ 56 void eventfd_signal_mask(struct eventfd_ctx *c !! 54 __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) 57 { 55 { 58 unsigned long flags; 56 unsigned long flags; 59 57 60 /* << 61 * Deadlock or stack overflow issues c << 62 * through waitqueue wakeup handlers. << 63 * nested waitqueues with custom wakeu << 64 * check eventfd_signal_allowed() befo << 65 * it returns false, the eventfd_signa << 66 * safe context. << 67 */ << 68 if (WARN_ON_ONCE(current->in_eventfd)) << 69 return; << 70 << 71 spin_lock_irqsave(&ctx->wqh.lock, flag 58 spin_lock_irqsave(&ctx->wqh.lock, flags); 72 current->in_eventfd = 1; !! 59 if (ULLONG_MAX - ctx->count < n) 73 if (ctx->count < ULLONG_MAX) !! 60 n = ULLONG_MAX - ctx->count; 74 ctx->count++; !! 61 ctx->count += n; 75 if (waitqueue_active(&ctx->wqh)) 62 if (waitqueue_active(&ctx->wqh)) 76 wake_up_locked_poll(&ctx->wqh, !! 63 wake_up_locked_poll(&ctx->wqh, POLLIN); 77 current->in_eventfd = 0; << 78 spin_unlock_irqrestore(&ctx->wqh.lock, 64 spin_unlock_irqrestore(&ctx->wqh.lock, flags); >> 65 >> 66 return n; 79 } 67 } 80 EXPORT_SYMBOL_GPL(eventfd_signal_mask); !! 68 EXPORT_SYMBOL_GPL(eventfd_signal); 81 69 82 static void eventfd_free_ctx(struct eventfd_ct 70 static void eventfd_free_ctx(struct eventfd_ctx *ctx) 83 { 71 { 84 if (ctx->id >= 0) << 85 ida_free(&eventfd_ida, ctx->id << 86 kfree(ctx); 72 kfree(ctx); 87 } 73 } 88 74 89 static void eventfd_free(struct kref *kref) 75 static void eventfd_free(struct kref *kref) 90 { 76 { 91 struct eventfd_ctx *ctx = container_of 77 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); 92 78 93 eventfd_free_ctx(ctx); 79 eventfd_free_ctx(ctx); 94 } 80 } 95 81 96 /** 82 /** >> 83 * eventfd_ctx_get - Acquires a reference to the internal eventfd context. >> 84 * @ctx: [in] Pointer to the eventfd context. >> 85 * >> 86 * Returns: In case of success, returns a pointer to the eventfd context. >> 87 */ >> 88 struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx) >> 89 { >> 90 kref_get(&ctx->kref); >> 91 return ctx; >> 92 } >> 93 EXPORT_SYMBOL_GPL(eventfd_ctx_get); >> 94 >> 95 /** 97 * eventfd_ctx_put - Releases a reference to t 96 * eventfd_ctx_put - Releases a reference to the internal eventfd context. 98 * @ctx: [in] Pointer to eventfd context. 97 * @ctx: [in] Pointer to eventfd context. 99 * 98 * 100 * The eventfd context reference must have bee 99 * The eventfd context reference must have been previously acquired either 101 * with eventfd_ctx_fdget() or eventfd_ctx_fil !! 100 * with eventfd_ctx_get() or eventfd_ctx_fdget(). 102 */ 101 */ 103 void eventfd_ctx_put(struct eventfd_ctx *ctx) 102 void eventfd_ctx_put(struct eventfd_ctx *ctx) 104 { 103 { 105 kref_put(&ctx->kref, eventfd_free); 104 kref_put(&ctx->kref, eventfd_free); 106 } 105 } 107 EXPORT_SYMBOL_GPL(eventfd_ctx_put); 106 EXPORT_SYMBOL_GPL(eventfd_ctx_put); 108 107 109 static int eventfd_release(struct inode *inode 108 static int eventfd_release(struct inode *inode, struct file *file) 110 { 109 { 111 struct eventfd_ctx *ctx = file->privat 110 struct eventfd_ctx *ctx = file->private_data; 112 111 113 wake_up_poll(&ctx->wqh, EPOLLHUP); !! 112 wake_up_poll(&ctx->wqh, POLLHUP); 114 eventfd_ctx_put(ctx); 113 eventfd_ctx_put(ctx); 115 return 0; 114 return 0; 116 } 115 } 117 116 118 static __poll_t eventfd_poll(struct file *file !! 117 static unsigned int eventfd_poll(struct file *file, poll_table *wait) 119 { 118 { 120 struct eventfd_ctx *ctx = file->privat 119 struct eventfd_ctx *ctx = file->private_data; 121 __poll_t events = 0; !! 120 unsigned int events = 0; 122 u64 count; 121 u64 count; 123 122 124 poll_wait(file, &ctx->wqh, wait); 123 poll_wait(file, &ctx->wqh, wait); 125 124 126 /* 125 /* 127 * All writes to ctx->count occur with 126 * All writes to ctx->count occur within ctx->wqh.lock. This read 128 * can be done outside ctx->wqh.lock b 127 * can be done outside ctx->wqh.lock because we know that poll_wait 129 * takes that lock (through add_wait_q 128 * takes that lock (through add_wait_queue) if our caller will sleep. 130 * 129 * 131 * The read _can_ therefore seep into 130 * The read _can_ therefore seep into add_wait_queue's critical 132 * section, but cannot move above it! 131 * section, but cannot move above it! add_wait_queue's spin_lock acts 133 * as an acquire barrier and ensures t 132 * as an acquire barrier and ensures that the read be ordered properly 134 * against the writes. The following 133 * against the writes. The following CAN happen and is safe: 135 * 134 * 136 * poll 135 * poll write 137 * ----------------- 136 * ----------------- ------------ 138 * lock ctx->wqh.lock (in poll_wai 137 * lock ctx->wqh.lock (in poll_wait) 139 * count = ctx->count 138 * count = ctx->count 140 * __add_wait_queue 139 * __add_wait_queue 141 * unlock ctx->wqh.lock 140 * unlock ctx->wqh.lock 142 * 141 * lock ctx->qwh.lock 143 * 142 * ctx->count += n 144 * 143 * if (waitqueue_active) 145 * 144 * wake_up_locked_poll 146 * 145 * unlock ctx->qwh.lock 147 * eventfd_poll returns 0 146 * eventfd_poll returns 0 148 * 147 * 149 * but the following, which would miss 148 * but the following, which would miss a wakeup, cannot happen: 150 * 149 * 151 * poll 150 * poll write 152 * ----------------- 151 * ----------------- ------------ 153 * count = ctx->count (INVALID!) 152 * count = ctx->count (INVALID!) 154 * 153 * lock ctx->qwh.lock 155 * 154 * ctx->count += n 156 * 155 * **waitqueue_active is false** 157 * 156 * **no wake_up_locked_poll!** 158 * 157 * unlock ctx->qwh.lock 159 * lock ctx->wqh.lock (in poll_wai 158 * lock ctx->wqh.lock (in poll_wait) 160 * __add_wait_queue 159 * __add_wait_queue 161 * unlock ctx->wqh.lock 160 * unlock ctx->wqh.lock 162 * eventfd_poll returns 0 161 * eventfd_poll returns 0 163 */ 162 */ 164 count = READ_ONCE(ctx->count); 163 count = READ_ONCE(ctx->count); 165 164 166 if (count > 0) 165 if (count > 0) 167 events |= EPOLLIN; !! 166 events |= POLLIN; 168 if (count == ULLONG_MAX) 167 if (count == ULLONG_MAX) 169 events |= EPOLLERR; !! 168 events |= POLLERR; 170 if (ULLONG_MAX - 1 > count) 169 if (ULLONG_MAX - 1 > count) 171 events |= EPOLLOUT; !! 170 events |= POLLOUT; 172 171 173 return events; 172 return events; 174 } 173 } 175 174 176 void eventfd_ctx_do_read(struct eventfd_ctx *c !! 175 static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) 177 { 176 { 178 lockdep_assert_held(&ctx->wqh.lock); !! 177 *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; 179 << 180 *cnt = ((ctx->flags & EFD_SEMAPHORE) & << 181 ctx->count -= *cnt; 178 ctx->count -= *cnt; 182 } 179 } 183 EXPORT_SYMBOL_GPL(eventfd_ctx_do_read); << 184 180 185 /** 181 /** 186 * eventfd_ctx_remove_wait_queue - Read the cu 182 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. 187 * @ctx: [in] Pointer to eventfd context. 183 * @ctx: [in] Pointer to eventfd context. 188 * @wait: [in] Wait queue to be removed. 184 * @wait: [in] Wait queue to be removed. 189 * @cnt: [out] Pointer to the 64-bit counter v 185 * @cnt: [out] Pointer to the 64-bit counter value. 190 * 186 * 191 * Returns %0 if successful, or the following 187 * Returns %0 if successful, or the following error codes: 192 * 188 * 193 * -EAGAIN : The operation would have blo 189 * -EAGAIN : The operation would have blocked. 194 * 190 * 195 * This is used to atomically remove a wait qu 191 * This is used to atomically remove a wait queue entry from the eventfd wait 196 * queue head, and read/reset the counter valu 192 * queue head, and read/reset the counter value. 197 */ 193 */ 198 int eventfd_ctx_remove_wait_queue(struct event !! 194 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, 199 __u64 *cnt) 195 __u64 *cnt) 200 { 196 { 201 unsigned long flags; 197 unsigned long flags; 202 198 203 spin_lock_irqsave(&ctx->wqh.lock, flag 199 spin_lock_irqsave(&ctx->wqh.lock, flags); 204 eventfd_ctx_do_read(ctx, cnt); 200 eventfd_ctx_do_read(ctx, cnt); 205 __remove_wait_queue(&ctx->wqh, wait); 201 __remove_wait_queue(&ctx->wqh, wait); 206 if (*cnt != 0 && waitqueue_active(&ctx 202 if (*cnt != 0 && waitqueue_active(&ctx->wqh)) 207 wake_up_locked_poll(&ctx->wqh, !! 203 wake_up_locked_poll(&ctx->wqh, POLLOUT); 208 spin_unlock_irqrestore(&ctx->wqh.lock, 204 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 209 205 210 return *cnt != 0 ? 0 : -EAGAIN; 206 return *cnt != 0 ? 0 : -EAGAIN; 211 } 207 } 212 EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queu 208 EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); 213 209 214 static ssize_t eventfd_read(struct kiocb *iocb !! 210 /** >> 211 * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. >> 212 * @ctx: [in] Pointer to eventfd context. >> 213 * @no_wait: [in] Different from zero if the operation should not block. >> 214 * @cnt: [out] Pointer to the 64-bit counter value. >> 215 * >> 216 * Returns %0 if successful, or the following error codes: >> 217 * >> 218 * -EAGAIN : The operation would have blocked but @no_wait was non-zero. >> 219 * -ERESTARTSYS : A signal interrupted the wait operation. >> 220 * >> 221 * If @no_wait is zero, the function might sleep until the eventfd internal >> 222 * counter becomes greater than zero. >> 223 */ >> 224 ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt) 215 { 225 { 216 struct file *file = iocb->ki_filp; !! 226 ssize_t res; 217 struct eventfd_ctx *ctx = file->privat !! 227 DECLARE_WAITQUEUE(wait, current); 218 __u64 ucnt = 0; << 219 228 220 if (iov_iter_count(to) < sizeof(ucnt)) << 221 return -EINVAL; << 222 spin_lock_irq(&ctx->wqh.lock); 229 spin_lock_irq(&ctx->wqh.lock); 223 if (!ctx->count) { !! 230 *cnt = 0; 224 if ((file->f_flags & O_NONBLOC !! 231 res = -EAGAIN; 225 (iocb->ki_flags & IOCB_NOW !! 232 if (ctx->count > 0) 226 spin_unlock_irq(&ctx-> !! 233 res = 0; 227 return -EAGAIN; !! 234 else if (!no_wait) { 228 } !! 235 __add_wait_queue(&ctx->wqh, &wait); 229 !! 236 for (;;) { 230 if (wait_event_interruptible_l !! 237 set_current_state(TASK_INTERRUPTIBLE); >> 238 if (ctx->count > 0) { >> 239 res = 0; >> 240 break; >> 241 } >> 242 if (signal_pending(current)) { >> 243 res = -ERESTARTSYS; >> 244 break; >> 245 } 231 spin_unlock_irq(&ctx-> 246 spin_unlock_irq(&ctx->wqh.lock); 232 return -ERESTARTSYS; !! 247 schedule(); >> 248 spin_lock_irq(&ctx->wqh.lock); 233 } 249 } >> 250 __remove_wait_queue(&ctx->wqh, &wait); >> 251 __set_current_state(TASK_RUNNING); >> 252 } >> 253 if (likely(res == 0)) { >> 254 eventfd_ctx_do_read(ctx, cnt); >> 255 if (waitqueue_active(&ctx->wqh)) >> 256 wake_up_locked_poll(&ctx->wqh, POLLOUT); 234 } 257 } 235 eventfd_ctx_do_read(ctx, &ucnt); << 236 current->in_eventfd = 1; << 237 if (waitqueue_active(&ctx->wqh)) << 238 wake_up_locked_poll(&ctx->wqh, << 239 current->in_eventfd = 0; << 240 spin_unlock_irq(&ctx->wqh.lock); 258 spin_unlock_irq(&ctx->wqh.lock); 241 if (unlikely(copy_to_iter(&ucnt, sizeo << 242 return -EFAULT; << 243 259 244 return sizeof(ucnt); !! 260 return res; >> 261 } >> 262 EXPORT_SYMBOL_GPL(eventfd_ctx_read); >> 263 >> 264 static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, >> 265 loff_t *ppos) >> 266 { >> 267 struct eventfd_ctx *ctx = file->private_data; >> 268 ssize_t res; >> 269 __u64 cnt; >> 270 >> 271 if (count < sizeof(cnt)) >> 272 return -EINVAL; >> 273 res = eventfd_ctx_read(ctx, file->f_flags & O_NONBLOCK, &cnt); >> 274 if (res < 0) >> 275 return res; >> 276 >> 277 return put_user(cnt, (__u64 __user *) buf) ? -EFAULT : sizeof(cnt); 245 } 278 } 246 279 247 static ssize_t eventfd_write(struct file *file 280 static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, 248 loff_t *ppos) 281 loff_t *ppos) 249 { 282 { 250 struct eventfd_ctx *ctx = file->privat 283 struct eventfd_ctx *ctx = file->private_data; 251 ssize_t res; 284 ssize_t res; 252 __u64 ucnt; 285 __u64 ucnt; >> 286 DECLARE_WAITQUEUE(wait, current); 253 287 254 if (count != sizeof(ucnt)) !! 288 if (count < sizeof(ucnt)) 255 return -EINVAL; 289 return -EINVAL; 256 if (copy_from_user(&ucnt, buf, sizeof( 290 if (copy_from_user(&ucnt, buf, sizeof(ucnt))) 257 return -EFAULT; 291 return -EFAULT; 258 if (ucnt == ULLONG_MAX) 292 if (ucnt == ULLONG_MAX) 259 return -EINVAL; 293 return -EINVAL; 260 spin_lock_irq(&ctx->wqh.lock); 294 spin_lock_irq(&ctx->wqh.lock); 261 res = -EAGAIN; 295 res = -EAGAIN; 262 if (ULLONG_MAX - ctx->count > ucnt) 296 if (ULLONG_MAX - ctx->count > ucnt) 263 res = sizeof(ucnt); 297 res = sizeof(ucnt); 264 else if (!(file->f_flags & O_NONBLOCK) 298 else if (!(file->f_flags & O_NONBLOCK)) { 265 res = wait_event_interruptible !! 299 __add_wait_queue(&ctx->wqh, &wait); 266 ULLONG_MAX - c !! 300 for (res = 0;;) { 267 if (!res) !! 301 set_current_state(TASK_INTERRUPTIBLE); 268 res = sizeof(ucnt); !! 302 if (ULLONG_MAX - ctx->count > ucnt) { >> 303 res = sizeof(ucnt); >> 304 break; >> 305 } >> 306 if (signal_pending(current)) { >> 307 res = -ERESTARTSYS; >> 308 break; >> 309 } >> 310 spin_unlock_irq(&ctx->wqh.lock); >> 311 schedule(); >> 312 spin_lock_irq(&ctx->wqh.lock); >> 313 } >> 314 __remove_wait_queue(&ctx->wqh, &wait); >> 315 __set_current_state(TASK_RUNNING); 269 } 316 } 270 if (likely(res > 0)) { 317 if (likely(res > 0)) { 271 ctx->count += ucnt; 318 ctx->count += ucnt; 272 current->in_eventfd = 1; << 273 if (waitqueue_active(&ctx->wqh 319 if (waitqueue_active(&ctx->wqh)) 274 wake_up_locked_poll(&c !! 320 wake_up_locked_poll(&ctx->wqh, POLLIN); 275 current->in_eventfd = 0; << 276 } 321 } 277 spin_unlock_irq(&ctx->wqh.lock); 322 spin_unlock_irq(&ctx->wqh.lock); 278 323 279 return res; 324 return res; 280 } 325 } 281 326 282 #ifdef CONFIG_PROC_FS 327 #ifdef CONFIG_PROC_FS 283 static void eventfd_show_fdinfo(struct seq_fil 328 static void eventfd_show_fdinfo(struct seq_file *m, struct file *f) 284 { 329 { 285 struct eventfd_ctx *ctx = f->private_d 330 struct eventfd_ctx *ctx = f->private_data; 286 __u64 cnt; << 287 331 288 spin_lock_irq(&ctx->wqh.lock); 332 spin_lock_irq(&ctx->wqh.lock); 289 cnt = ctx->count; !! 333 seq_printf(m, "eventfd-count: %16llx\n", >> 334 (unsigned long long)ctx->count); 290 spin_unlock_irq(&ctx->wqh.lock); 335 spin_unlock_irq(&ctx->wqh.lock); 291 << 292 seq_printf(m, << 293 "eventfd-count: %16llx\n" << 294 "eventfd-id: %d\n" << 295 "eventfd-semaphore: %d\n", << 296 cnt, << 297 ctx->id, << 298 !!(ctx->flags & EFD_SEMAPHO << 299 } 336 } 300 #endif 337 #endif 301 338 302 static const struct file_operations eventfd_fo 339 static const struct file_operations eventfd_fops = { 303 #ifdef CONFIG_PROC_FS 340 #ifdef CONFIG_PROC_FS 304 .show_fdinfo = eventfd_show_fdinfo, 341 .show_fdinfo = eventfd_show_fdinfo, 305 #endif 342 #endif 306 .release = eventfd_release, 343 .release = eventfd_release, 307 .poll = eventfd_poll, 344 .poll = eventfd_poll, 308 .read_iter = eventfd_read, !! 345 .read = eventfd_read, 309 .write = eventfd_write, 346 .write = eventfd_write, 310 .llseek = noop_llseek, 347 .llseek = noop_llseek, 311 }; 348 }; 312 349 313 /** 350 /** 314 * eventfd_fget - Acquire a reference of an ev 351 * eventfd_fget - Acquire a reference of an eventfd file descriptor. 315 * @fd: [in] Eventfd file descriptor. 352 * @fd: [in] Eventfd file descriptor. 316 * 353 * 317 * Returns a pointer to the eventfd file struc 354 * Returns a pointer to the eventfd file structure in case of success, or the 318 * following error pointer: 355 * following error pointer: 319 * 356 * 320 * -EBADF : Invalid @fd file descriptor. 357 * -EBADF : Invalid @fd file descriptor. 321 * -EINVAL : The @fd file descriptor is not 358 * -EINVAL : The @fd file descriptor is not an eventfd file. 322 */ 359 */ 323 struct file *eventfd_fget(int fd) 360 struct file *eventfd_fget(int fd) 324 { 361 { 325 struct file *file; 362 struct file *file; 326 363 327 file = fget(fd); 364 file = fget(fd); 328 if (!file) 365 if (!file) 329 return ERR_PTR(-EBADF); 366 return ERR_PTR(-EBADF); 330 if (file->f_op != &eventfd_fops) { 367 if (file->f_op != &eventfd_fops) { 331 fput(file); 368 fput(file); 332 return ERR_PTR(-EINVAL); 369 return ERR_PTR(-EINVAL); 333 } 370 } 334 371 335 return file; 372 return file; 336 } 373 } 337 EXPORT_SYMBOL_GPL(eventfd_fget); 374 EXPORT_SYMBOL_GPL(eventfd_fget); 338 375 339 /** 376 /** 340 * eventfd_ctx_fdget - Acquires a reference to 377 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context. 341 * @fd: [in] Eventfd file descriptor. 378 * @fd: [in] Eventfd file descriptor. 342 * 379 * 343 * Returns a pointer to the internal eventfd c 380 * Returns a pointer to the internal eventfd context, otherwise the error 344 * pointers returned by the following function 381 * pointers returned by the following functions: 345 * 382 * 346 * eventfd_fget 383 * eventfd_fget 347 */ 384 */ 348 struct eventfd_ctx *eventfd_ctx_fdget(int fd) 385 struct eventfd_ctx *eventfd_ctx_fdget(int fd) 349 { 386 { 350 struct eventfd_ctx *ctx; 387 struct eventfd_ctx *ctx; 351 struct fd f = fdget(fd); 388 struct fd f = fdget(fd); 352 if (!f.file) 389 if (!f.file) 353 return ERR_PTR(-EBADF); 390 return ERR_PTR(-EBADF); 354 ctx = eventfd_ctx_fileget(f.file); 391 ctx = eventfd_ctx_fileget(f.file); 355 fdput(f); 392 fdput(f); 356 return ctx; 393 return ctx; 357 } 394 } 358 EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); 395 EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); 359 396 360 /** 397 /** 361 * eventfd_ctx_fileget - Acquires a reference 398 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context. 362 * @file: [in] Eventfd file pointer. 399 * @file: [in] Eventfd file pointer. 363 * 400 * 364 * Returns a pointer to the internal eventfd c 401 * Returns a pointer to the internal eventfd context, otherwise the error 365 * pointer: 402 * pointer: 366 * 403 * 367 * -EINVAL : The @fd file descriptor is not 404 * -EINVAL : The @fd file descriptor is not an eventfd file. 368 */ 405 */ 369 struct eventfd_ctx *eventfd_ctx_fileget(struct 406 struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) 370 { 407 { 371 struct eventfd_ctx *ctx; << 372 << 373 if (file->f_op != &eventfd_fops) 408 if (file->f_op != &eventfd_fops) 374 return ERR_PTR(-EINVAL); 409 return ERR_PTR(-EINVAL); 375 410 376 ctx = file->private_data; !! 411 return eventfd_ctx_get(file->private_data); 377 kref_get(&ctx->kref); << 378 return ctx; << 379 } 412 } 380 EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); 413 EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); 381 414 382 static int do_eventfd(unsigned int count, int !! 415 /** >> 416 * eventfd_file_create - Creates an eventfd file pointer. >> 417 * @count: Initial eventfd counter value. >> 418 * @flags: Flags for the eventfd file. >> 419 * >> 420 * This function creates an eventfd file pointer, w/out installing it into >> 421 * the fd table. This is useful when the eventfd file is used during the >> 422 * initialization of data structures that require extra setup after the eventfd >> 423 * creation. So the eventfd creation is split into the file pointer creation >> 424 * phase, and the file descriptor installation phase. >> 425 * In this way races with userspace closing the newly installed file descriptor >> 426 * can be avoided. >> 427 * Returns an eventfd file pointer, or a proper error pointer. >> 428 */ >> 429 struct file *eventfd_file_create(unsigned int count, int flags) 383 { 430 { 384 struct eventfd_ctx *ctx; << 385 struct file *file; 431 struct file *file; 386 int fd; !! 432 struct eventfd_ctx *ctx; 387 433 388 /* Check the EFD_* constants for consi 434 /* Check the EFD_* constants for consistency. */ 389 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC) 435 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC); 390 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOC 436 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK); 391 BUILD_BUG_ON(EFD_SEMAPHORE != (1 << 0) << 392 437 393 if (flags & ~EFD_FLAGS_SET) 438 if (flags & ~EFD_FLAGS_SET) 394 return -EINVAL; !! 439 return ERR_PTR(-EINVAL); 395 440 396 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL 441 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 397 if (!ctx) 442 if (!ctx) 398 return -ENOMEM; !! 443 return ERR_PTR(-ENOMEM); 399 444 400 kref_init(&ctx->kref); 445 kref_init(&ctx->kref); 401 init_waitqueue_head(&ctx->wqh); 446 init_waitqueue_head(&ctx->wqh); 402 ctx->count = count; 447 ctx->count = count; 403 ctx->flags = flags; 448 ctx->flags = flags; 404 ctx->id = ida_alloc(&eventfd_ida, GFP_ << 405 449 406 flags &= EFD_SHARED_FCNTL_FLAGS; !! 450 file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, 407 flags |= O_RDWR; !! 451 O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS)); 408 fd = get_unused_fd_flags(flags); !! 452 if (IS_ERR(file)) 409 if (fd < 0) !! 453 eventfd_free_ctx(ctx); 410 goto err; !! 454 >> 455 return file; >> 456 } >> 457 >> 458 SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) >> 459 { >> 460 int fd, error; >> 461 struct file *file; >> 462 >> 463 error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS); >> 464 if (error < 0) >> 465 return error; >> 466 fd = error; 411 467 412 file = anon_inode_getfile("[eventfd]", !! 468 file = eventfd_file_create(count, flags); 413 if (IS_ERR(file)) { 469 if (IS_ERR(file)) { 414 put_unused_fd(fd); !! 470 error = PTR_ERR(file); 415 fd = PTR_ERR(file); !! 471 goto err_put_unused_fd; 416 goto err; << 417 } 472 } 418 << 419 file->f_mode |= FMODE_NOWAIT; << 420 fd_install(fd, file); 473 fd_install(fd, file); >> 474 421 return fd; 475 return fd; 422 err: << 423 eventfd_free_ctx(ctx); << 424 return fd; << 425 } << 426 476 427 SYSCALL_DEFINE2(eventfd2, unsigned int, count, !! 477 err_put_unused_fd: 428 { !! 478 put_unused_fd(fd); 429 return do_eventfd(count, flags); !! 479 >> 480 return error; 430 } 481 } 431 482 432 SYSCALL_DEFINE1(eventfd, unsigned int, count) 483 SYSCALL_DEFINE1(eventfd, unsigned int, count) 433 { 484 { 434 return do_eventfd(count, 0); !! 485 return sys_eventfd2(count, 0); 435 } 486 } 436 487 437 488
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.