1 // SPDX-License-Identifier: GPL-2.0-only << 2 /* 1 /* 3 * fs/eventfd.c 2 * fs/eventfd.c 4 * 3 * 5 * Copyright (C) 2007 Davide Libenzi <davide 4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> 6 * 5 * 7 */ 6 */ 8 7 9 #include <linux/file.h> 8 #include <linux/file.h> 10 #include <linux/poll.h> 9 #include <linux/poll.h> 11 #include <linux/init.h> 10 #include <linux/init.h> 12 #include <linux/fs.h> 11 #include <linux/fs.h> 13 #include <linux/sched/signal.h> !! 12 #include <linux/sched.h> 14 #include <linux/kernel.h> 13 #include <linux/kernel.h> 15 #include <linux/slab.h> << 16 #include <linux/list.h> 14 #include <linux/list.h> 17 #include <linux/spinlock.h> 15 #include <linux/spinlock.h> 18 #include <linux/anon_inodes.h> 16 #include <linux/anon_inodes.h> 19 #include <linux/syscalls.h> 17 #include <linux/syscalls.h> 20 #include <linux/export.h> !! 18 #include <linux/module.h> 21 #include <linux/kref.h> 19 #include <linux/kref.h> 22 #include <linux/eventfd.h> 20 #include <linux/eventfd.h> 23 #include <linux/proc_fs.h> << 24 #include <linux/seq_file.h> << 25 #include <linux/idr.h> << 26 #include <linux/uio.h> << 27 << 28 static DEFINE_IDA(eventfd_ida); << 29 21 30 struct eventfd_ctx { 22 struct eventfd_ctx { 31 struct kref kref; 23 struct kref kref; 32 wait_queue_head_t wqh; 24 wait_queue_head_t wqh; 33 /* 25 /* 34 * Every time that a write(2) is perfo 26 * Every time that a write(2) is performed on an eventfd, the 35 * value of the __u64 being written is 27 * value of the __u64 being written is added to "count" and a 36 * wakeup is performed on "wqh". If EF !! 28 * wakeup is performed on "wqh". A read(2) will return the "count" 37 * specified, a read(2) will return th !! 29 * value to userspace, and will reset "count" to zero. The kernel 38 * and will reset "count" to zero. The !! 30 * side eventfd_signal() also, adds to the "count" counter and 39 * also, adds to the "count" counter a !! 31 * issue a wakeup. 40 */ 32 */ 41 __u64 count; 33 __u64 count; 42 unsigned int flags; 34 unsigned int flags; 43 int id; << 44 }; 35 }; 45 36 46 /** 37 /** 47 * eventfd_signal_mask - Increment the event c !! 38 * eventfd_signal - Adds @n to the eventfd counter. 48 * @ctx: [in] Pointer to the eventfd context. 39 * @ctx: [in] Pointer to the eventfd context. 49 * @mask: [in] poll mask !! 40 * @n: [in] Value of the counter to be added to the eventfd internal counter. >> 41 * The value cannot be negative. 50 * 42 * 51 * This function is supposed to be called by t 43 * This function is supposed to be called by the kernel in paths that do not 52 * allow sleeping. In this function we allow t 44 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX 53 * value, and we signal this as overflow condi !! 45 * value, and we signal this as overflow condition by returining a POLLERR 54 * to poll(2). 46 * to poll(2). >> 47 * >> 48 * Returns @n in case of success, a non-negative number lower than @n in case >> 49 * of overflow, or the following error codes: >> 50 * >> 51 * -EINVAL : The value of @n is negative. 55 */ 52 */ 56 void eventfd_signal_mask(struct eventfd_ctx *c !! 53 int eventfd_signal(struct eventfd_ctx *ctx, int n) 57 { 54 { 58 unsigned long flags; 55 unsigned long flags; 59 56 60 /* !! 57 if (n < 0) 61 * Deadlock or stack overflow issues c !! 58 return -EINVAL; 62 * through waitqueue wakeup handlers. << 63 * nested waitqueues with custom wakeu << 64 * check eventfd_signal_allowed() befo << 65 * it returns false, the eventfd_signa << 66 * safe context. << 67 */ << 68 if (WARN_ON_ONCE(current->in_eventfd)) << 69 return; << 70 << 71 spin_lock_irqsave(&ctx->wqh.lock, flag 59 spin_lock_irqsave(&ctx->wqh.lock, flags); 72 current->in_eventfd = 1; !! 60 if (ULLONG_MAX - ctx->count < n) 73 if (ctx->count < ULLONG_MAX) !! 61 n = (int) (ULLONG_MAX - ctx->count); 74 ctx->count++; !! 62 ctx->count += n; 75 if (waitqueue_active(&ctx->wqh)) 63 if (waitqueue_active(&ctx->wqh)) 76 wake_up_locked_poll(&ctx->wqh, !! 64 wake_up_locked_poll(&ctx->wqh, POLLIN); 77 current->in_eventfd = 0; << 78 spin_unlock_irqrestore(&ctx->wqh.lock, 65 spin_unlock_irqrestore(&ctx->wqh.lock, flags); >> 66 >> 67 return n; 79 } 68 } 80 EXPORT_SYMBOL_GPL(eventfd_signal_mask); !! 69 EXPORT_SYMBOL_GPL(eventfd_signal); 81 70 82 static void eventfd_free_ctx(struct eventfd_ct 71 static void eventfd_free_ctx(struct eventfd_ctx *ctx) 83 { 72 { 84 if (ctx->id >= 0) << 85 ida_free(&eventfd_ida, ctx->id << 86 kfree(ctx); 73 kfree(ctx); 87 } 74 } 88 75 89 static void eventfd_free(struct kref *kref) 76 static void eventfd_free(struct kref *kref) 90 { 77 { 91 struct eventfd_ctx *ctx = container_of 78 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); 92 79 93 eventfd_free_ctx(ctx); 80 eventfd_free_ctx(ctx); 94 } 81 } 95 82 96 /** 83 /** >> 84 * eventfd_ctx_get - Acquires a reference to the internal eventfd context. >> 85 * @ctx: [in] Pointer to the eventfd context. >> 86 * >> 87 * Returns: In case of success, returns a pointer to the eventfd context. >> 88 */ >> 89 struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx) >> 90 { >> 91 kref_get(&ctx->kref); >> 92 return ctx; >> 93 } >> 94 EXPORT_SYMBOL_GPL(eventfd_ctx_get); >> 95 >> 96 /** 97 * eventfd_ctx_put - Releases a reference to t 97 * eventfd_ctx_put - Releases a reference to the internal eventfd context. 98 * @ctx: [in] Pointer to eventfd context. 98 * @ctx: [in] Pointer to eventfd context. 99 * 99 * 100 * The eventfd context reference must have bee 100 * The eventfd context reference must have been previously acquired either 101 * with eventfd_ctx_fdget() or eventfd_ctx_fil !! 101 * with eventfd_ctx_get() or eventfd_ctx_fdget()). 102 */ 102 */ 103 void eventfd_ctx_put(struct eventfd_ctx *ctx) 103 void eventfd_ctx_put(struct eventfd_ctx *ctx) 104 { 104 { 105 kref_put(&ctx->kref, eventfd_free); 105 kref_put(&ctx->kref, eventfd_free); 106 } 106 } 107 EXPORT_SYMBOL_GPL(eventfd_ctx_put); 107 EXPORT_SYMBOL_GPL(eventfd_ctx_put); 108 108 109 static int eventfd_release(struct inode *inode 109 static int eventfd_release(struct inode *inode, struct file *file) 110 { 110 { 111 struct eventfd_ctx *ctx = file->privat 111 struct eventfd_ctx *ctx = file->private_data; 112 112 113 wake_up_poll(&ctx->wqh, EPOLLHUP); !! 113 wake_up_poll(&ctx->wqh, POLLHUP); 114 eventfd_ctx_put(ctx); 114 eventfd_ctx_put(ctx); 115 return 0; 115 return 0; 116 } 116 } 117 117 118 static __poll_t eventfd_poll(struct file *file !! 118 static unsigned int eventfd_poll(struct file *file, poll_table *wait) 119 { 119 { 120 struct eventfd_ctx *ctx = file->privat 120 struct eventfd_ctx *ctx = file->private_data; 121 __poll_t events = 0; !! 121 unsigned int events = 0; 122 u64 count; !! 122 unsigned long flags; 123 123 124 poll_wait(file, &ctx->wqh, wait); 124 poll_wait(file, &ctx->wqh, wait); 125 125 126 /* << 127 * All writes to ctx->count occur with << 128 * can be done outside ctx->wqh.lock b << 129 * takes that lock (through add_wait_q << 130 * << 131 * The read _can_ therefore seep into << 132 * section, but cannot move above it! << 133 * as an acquire barrier and ensures t << 134 * against the writes. The following << 135 * << 136 * poll << 137 * ----------------- << 138 * lock ctx->wqh.lock (in poll_wai << 139 * count = ctx->count << 140 * __add_wait_queue << 141 * unlock ctx->wqh.lock << 142 * << 143 * << 144 * << 145 * << 146 * << 147 * eventfd_poll returns 0 << 148 * << 149 * but the following, which would miss << 150 * << 151 * poll << 152 * ----------------- << 153 * count = ctx->count (INVALID!) << 154 * << 155 * << 156 * << 157 * << 158 * << 159 * lock ctx->wqh.lock (in poll_wai << 160 * __add_wait_queue << 161 * unlock ctx->wqh.lock << 162 * eventfd_poll returns 0 << 163 */ << 164 count = READ_ONCE(ctx->count); << 165 << 166 if (count > 0) << 167 events |= EPOLLIN; << 168 if (count == ULLONG_MAX) << 169 events |= EPOLLERR; << 170 if (ULLONG_MAX - 1 > count) << 171 events |= EPOLLOUT; << 172 << 173 return events; << 174 } << 175 << 176 void eventfd_ctx_do_read(struct eventfd_ctx *c << 177 { << 178 lockdep_assert_held(&ctx->wqh.lock); << 179 << 180 *cnt = ((ctx->flags & EFD_SEMAPHORE) & << 181 ctx->count -= *cnt; << 182 } << 183 EXPORT_SYMBOL_GPL(eventfd_ctx_do_read); << 184 << 185 /** << 186 * eventfd_ctx_remove_wait_queue - Read the cu << 187 * @ctx: [in] Pointer to eventfd context. << 188 * @wait: [in] Wait queue to be removed. << 189 * @cnt: [out] Pointer to the 64-bit counter v << 190 * << 191 * Returns %0 if successful, or the following << 192 * << 193 * -EAGAIN : The operation would have blo << 194 * << 195 * This is used to atomically remove a wait qu << 196 * queue head, and read/reset the counter valu << 197 */ << 198 int eventfd_ctx_remove_wait_queue(struct event << 199 __u64 *cnt) << 200 { << 201 unsigned long flags; << 202 << 203 spin_lock_irqsave(&ctx->wqh.lock, flag 126 spin_lock_irqsave(&ctx->wqh.lock, flags); 204 eventfd_ctx_do_read(ctx, cnt); !! 127 if (ctx->count > 0) 205 __remove_wait_queue(&ctx->wqh, wait); !! 128 events |= POLLIN; 206 if (*cnt != 0 && waitqueue_active(&ctx !! 129 if (ctx->count == ULLONG_MAX) 207 wake_up_locked_poll(&ctx->wqh, !! 130 events |= POLLERR; >> 131 if (ULLONG_MAX - 1 > ctx->count) >> 132 events |= POLLOUT; 208 spin_unlock_irqrestore(&ctx->wqh.lock, 133 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 209 134 210 return *cnt != 0 ? 0 : -EAGAIN; !! 135 return events; 211 } 136 } 212 EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queu << 213 137 214 static ssize_t eventfd_read(struct kiocb *iocb !! 138 static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count, >> 139 loff_t *ppos) 215 { 140 { 216 struct file *file = iocb->ki_filp; << 217 struct eventfd_ctx *ctx = file->privat 141 struct eventfd_ctx *ctx = file->private_data; >> 142 ssize_t res; 218 __u64 ucnt = 0; 143 __u64 ucnt = 0; >> 144 DECLARE_WAITQUEUE(wait, current); 219 145 220 if (iov_iter_count(to) < sizeof(ucnt)) !! 146 if (count < sizeof(ucnt)) 221 return -EINVAL; 147 return -EINVAL; 222 spin_lock_irq(&ctx->wqh.lock); 148 spin_lock_irq(&ctx->wqh.lock); 223 if (!ctx->count) { !! 149 res = -EAGAIN; 224 if ((file->f_flags & O_NONBLOC !! 150 if (ctx->count > 0) 225 (iocb->ki_flags & IOCB_NOW !! 151 res = sizeof(ucnt); 226 spin_unlock_irq(&ctx-> !! 152 else if (!(file->f_flags & O_NONBLOCK)) { 227 return -EAGAIN; !! 153 __add_wait_queue(&ctx->wqh, &wait); 228 } !! 154 for (res = 0;;) { 229 !! 155 set_current_state(TASK_INTERRUPTIBLE); 230 if (wait_event_interruptible_l !! 156 if (ctx->count > 0) { >> 157 res = sizeof(ucnt); >> 158 break; >> 159 } >> 160 if (signal_pending(current)) { >> 161 res = -ERESTARTSYS; >> 162 break; >> 163 } 231 spin_unlock_irq(&ctx-> 164 spin_unlock_irq(&ctx->wqh.lock); 232 return -ERESTARTSYS; !! 165 schedule(); >> 166 spin_lock_irq(&ctx->wqh.lock); 233 } 167 } >> 168 __remove_wait_queue(&ctx->wqh, &wait); >> 169 __set_current_state(TASK_RUNNING); >> 170 } >> 171 if (likely(res > 0)) { >> 172 ucnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; >> 173 ctx->count -= ucnt; >> 174 if (waitqueue_active(&ctx->wqh)) >> 175 wake_up_locked_poll(&ctx->wqh, POLLOUT); 234 } 176 } 235 eventfd_ctx_do_read(ctx, &ucnt); << 236 current->in_eventfd = 1; << 237 if (waitqueue_active(&ctx->wqh)) << 238 wake_up_locked_poll(&ctx->wqh, << 239 current->in_eventfd = 0; << 240 spin_unlock_irq(&ctx->wqh.lock); 177 spin_unlock_irq(&ctx->wqh.lock); 241 if (unlikely(copy_to_iter(&ucnt, sizeo !! 178 if (res > 0 && put_user(ucnt, (__u64 __user *) buf)) 242 return -EFAULT; 179 return -EFAULT; 243 180 244 return sizeof(ucnt); !! 181 return res; 245 } 182 } 246 183 247 static ssize_t eventfd_write(struct file *file 184 static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, 248 loff_t *ppos) 185 loff_t *ppos) 249 { 186 { 250 struct eventfd_ctx *ctx = file->privat 187 struct eventfd_ctx *ctx = file->private_data; 251 ssize_t res; 188 ssize_t res; 252 __u64 ucnt; 189 __u64 ucnt; >> 190 DECLARE_WAITQUEUE(wait, current); 253 191 254 if (count != sizeof(ucnt)) !! 192 if (count < sizeof(ucnt)) 255 return -EINVAL; 193 return -EINVAL; 256 if (copy_from_user(&ucnt, buf, sizeof( 194 if (copy_from_user(&ucnt, buf, sizeof(ucnt))) 257 return -EFAULT; 195 return -EFAULT; 258 if (ucnt == ULLONG_MAX) 196 if (ucnt == ULLONG_MAX) 259 return -EINVAL; 197 return -EINVAL; 260 spin_lock_irq(&ctx->wqh.lock); 198 spin_lock_irq(&ctx->wqh.lock); 261 res = -EAGAIN; 199 res = -EAGAIN; 262 if (ULLONG_MAX - ctx->count > ucnt) 200 if (ULLONG_MAX - ctx->count > ucnt) 263 res = sizeof(ucnt); 201 res = sizeof(ucnt); 264 else if (!(file->f_flags & O_NONBLOCK) 202 else if (!(file->f_flags & O_NONBLOCK)) { 265 res = wait_event_interruptible !! 203 __add_wait_queue(&ctx->wqh, &wait); 266 ULLONG_MAX - c !! 204 for (res = 0;;) { 267 if (!res) !! 205 set_current_state(TASK_INTERRUPTIBLE); 268 res = sizeof(ucnt); !! 206 if (ULLONG_MAX - ctx->count > ucnt) { >> 207 res = sizeof(ucnt); >> 208 break; >> 209 } >> 210 if (signal_pending(current)) { >> 211 res = -ERESTARTSYS; >> 212 break; >> 213 } >> 214 spin_unlock_irq(&ctx->wqh.lock); >> 215 schedule(); >> 216 spin_lock_irq(&ctx->wqh.lock); >> 217 } >> 218 __remove_wait_queue(&ctx->wqh, &wait); >> 219 __set_current_state(TASK_RUNNING); 269 } 220 } 270 if (likely(res > 0)) { 221 if (likely(res > 0)) { 271 ctx->count += ucnt; 222 ctx->count += ucnt; 272 current->in_eventfd = 1; << 273 if (waitqueue_active(&ctx->wqh 223 if (waitqueue_active(&ctx->wqh)) 274 wake_up_locked_poll(&c !! 224 wake_up_locked_poll(&ctx->wqh, POLLIN); 275 current->in_eventfd = 0; << 276 } 225 } 277 spin_unlock_irq(&ctx->wqh.lock); 226 spin_unlock_irq(&ctx->wqh.lock); 278 227 279 return res; 228 return res; 280 } 229 } 281 230 282 #ifdef CONFIG_PROC_FS << 283 static void eventfd_show_fdinfo(struct seq_fil << 284 { << 285 struct eventfd_ctx *ctx = f->private_d << 286 __u64 cnt; << 287 << 288 spin_lock_irq(&ctx->wqh.lock); << 289 cnt = ctx->count; << 290 spin_unlock_irq(&ctx->wqh.lock); << 291 << 292 seq_printf(m, << 293 "eventfd-count: %16llx\n" << 294 "eventfd-id: %d\n" << 295 "eventfd-semaphore: %d\n", << 296 cnt, << 297 ctx->id, << 298 !!(ctx->flags & EFD_SEMAPHO << 299 } << 300 #endif << 301 << 302 static const struct file_operations eventfd_fo 231 static const struct file_operations eventfd_fops = { 303 #ifdef CONFIG_PROC_FS << 304 .show_fdinfo = eventfd_show_fdinfo, << 305 #endif << 306 .release = eventfd_release, 232 .release = eventfd_release, 307 .poll = eventfd_poll, 233 .poll = eventfd_poll, 308 .read_iter = eventfd_read, !! 234 .read = eventfd_read, 309 .write = eventfd_write, 235 .write = eventfd_write, 310 .llseek = noop_llseek, << 311 }; 236 }; 312 237 313 /** 238 /** 314 * eventfd_fget - Acquire a reference of an ev 239 * eventfd_fget - Acquire a reference of an eventfd file descriptor. 315 * @fd: [in] Eventfd file descriptor. 240 * @fd: [in] Eventfd file descriptor. 316 * 241 * 317 * Returns a pointer to the eventfd file struc 242 * Returns a pointer to the eventfd file structure in case of success, or the 318 * following error pointer: 243 * following error pointer: 319 * 244 * 320 * -EBADF : Invalid @fd file descriptor. 245 * -EBADF : Invalid @fd file descriptor. 321 * -EINVAL : The @fd file descriptor is not 246 * -EINVAL : The @fd file descriptor is not an eventfd file. 322 */ 247 */ 323 struct file *eventfd_fget(int fd) 248 struct file *eventfd_fget(int fd) 324 { 249 { 325 struct file *file; 250 struct file *file; 326 251 327 file = fget(fd); 252 file = fget(fd); 328 if (!file) 253 if (!file) 329 return ERR_PTR(-EBADF); 254 return ERR_PTR(-EBADF); 330 if (file->f_op != &eventfd_fops) { 255 if (file->f_op != &eventfd_fops) { 331 fput(file); 256 fput(file); 332 return ERR_PTR(-EINVAL); 257 return ERR_PTR(-EINVAL); 333 } 258 } 334 259 335 return file; 260 return file; 336 } 261 } 337 EXPORT_SYMBOL_GPL(eventfd_fget); 262 EXPORT_SYMBOL_GPL(eventfd_fget); 338 263 339 /** 264 /** 340 * eventfd_ctx_fdget - Acquires a reference to 265 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context. 341 * @fd: [in] Eventfd file descriptor. 266 * @fd: [in] Eventfd file descriptor. 342 * 267 * 343 * Returns a pointer to the internal eventfd c 268 * Returns a pointer to the internal eventfd context, otherwise the error 344 * pointers returned by the following function 269 * pointers returned by the following functions: 345 * 270 * 346 * eventfd_fget 271 * eventfd_fget 347 */ 272 */ 348 struct eventfd_ctx *eventfd_ctx_fdget(int fd) 273 struct eventfd_ctx *eventfd_ctx_fdget(int fd) 349 { 274 { >> 275 struct file *file; 350 struct eventfd_ctx *ctx; 276 struct eventfd_ctx *ctx; 351 struct fd f = fdget(fd); !! 277 352 if (!f.file) !! 278 file = eventfd_fget(fd); 353 return ERR_PTR(-EBADF); !! 279 if (IS_ERR(file)) 354 ctx = eventfd_ctx_fileget(f.file); !! 280 return (struct eventfd_ctx *) file; 355 fdput(f); !! 281 ctx = eventfd_ctx_get(file->private_data); >> 282 fput(file); >> 283 356 return ctx; 284 return ctx; 357 } 285 } 358 EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); 286 EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); 359 287 360 /** 288 /** 361 * eventfd_ctx_fileget - Acquires a reference 289 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context. 362 * @file: [in] Eventfd file pointer. 290 * @file: [in] Eventfd file pointer. 363 * 291 * 364 * Returns a pointer to the internal eventfd c 292 * Returns a pointer to the internal eventfd context, otherwise the error 365 * pointer: 293 * pointer: 366 * 294 * 367 * -EINVAL : The @fd file descriptor is not 295 * -EINVAL : The @fd file descriptor is not an eventfd file. 368 */ 296 */ 369 struct eventfd_ctx *eventfd_ctx_fileget(struct 297 struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) 370 { 298 { 371 struct eventfd_ctx *ctx; << 372 << 373 if (file->f_op != &eventfd_fops) 299 if (file->f_op != &eventfd_fops) 374 return ERR_PTR(-EINVAL); 300 return ERR_PTR(-EINVAL); 375 301 376 ctx = file->private_data; !! 302 return eventfd_ctx_get(file->private_data); 377 kref_get(&ctx->kref); << 378 return ctx; << 379 } 303 } 380 EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); 304 EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); 381 305 382 static int do_eventfd(unsigned int count, int !! 306 /** >> 307 * eventfd_file_create - Creates an eventfd file pointer. >> 308 * @count: Initial eventfd counter value. >> 309 * @flags: Flags for the eventfd file. >> 310 * >> 311 * This function creates an eventfd file pointer, w/out installing it into >> 312 * the fd table. This is useful when the eventfd file is used during the >> 313 * initialization of data structures that require extra setup after the eventfd >> 314 * creation. So the eventfd creation is split into the file pointer creation >> 315 * phase, and the file descriptor installation phase. >> 316 * In this way races with userspace closing the newly installed file descriptor >> 317 * can be avoided. >> 318 * Returns an eventfd file pointer, or a proper error pointer. >> 319 */ >> 320 struct file *eventfd_file_create(unsigned int count, int flags) 383 { 321 { 384 struct eventfd_ctx *ctx; << 385 struct file *file; 322 struct file *file; 386 int fd; !! 323 struct eventfd_ctx *ctx; 387 324 388 /* Check the EFD_* constants for consi 325 /* Check the EFD_* constants for consistency. */ 389 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC) 326 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC); 390 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOC 327 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK); 391 BUILD_BUG_ON(EFD_SEMAPHORE != (1 << 0) << 392 328 393 if (flags & ~EFD_FLAGS_SET) 329 if (flags & ~EFD_FLAGS_SET) 394 return -EINVAL; !! 330 return ERR_PTR(-EINVAL); 395 331 396 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL 332 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 397 if (!ctx) 333 if (!ctx) 398 return -ENOMEM; !! 334 return ERR_PTR(-ENOMEM); 399 335 400 kref_init(&ctx->kref); 336 kref_init(&ctx->kref); 401 init_waitqueue_head(&ctx->wqh); 337 init_waitqueue_head(&ctx->wqh); 402 ctx->count = count; 338 ctx->count = count; 403 ctx->flags = flags; 339 ctx->flags = flags; 404 ctx->id = ida_alloc(&eventfd_ida, GFP_ << 405 340 406 flags &= EFD_SHARED_FCNTL_FLAGS; !! 341 file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, 407 flags |= O_RDWR; !! 342 flags & EFD_SHARED_FCNTL_FLAGS); 408 fd = get_unused_fd_flags(flags); !! 343 if (IS_ERR(file)) 409 if (fd < 0) !! 344 eventfd_free_ctx(ctx); 410 goto err; !! 345 >> 346 return file; >> 347 } >> 348 >> 349 SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) >> 350 { >> 351 int fd, error; >> 352 struct file *file; >> 353 >> 354 error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS); >> 355 if (error < 0) >> 356 return error; >> 357 fd = error; 411 358 412 file = anon_inode_getfile("[eventfd]", !! 359 file = eventfd_file_create(count, flags); 413 if (IS_ERR(file)) { 360 if (IS_ERR(file)) { 414 put_unused_fd(fd); !! 361 error = PTR_ERR(file); 415 fd = PTR_ERR(file); !! 362 goto err_put_unused_fd; 416 goto err; << 417 } 363 } 418 << 419 file->f_mode |= FMODE_NOWAIT; << 420 fd_install(fd, file); 364 fd_install(fd, file); >> 365 421 return fd; 366 return fd; 422 err: << 423 eventfd_free_ctx(ctx); << 424 return fd; << 425 } << 426 367 427 SYSCALL_DEFINE2(eventfd2, unsigned int, count, !! 368 err_put_unused_fd: 428 { !! 369 put_unused_fd(fd); 429 return do_eventfd(count, flags); !! 370 >> 371 return error; 430 } 372 } 431 373 432 SYSCALL_DEFINE1(eventfd, unsigned int, count) 374 SYSCALL_DEFINE1(eventfd, unsigned int, count) 433 { 375 { 434 return do_eventfd(count, 0); !! 376 return sys_eventfd2(count, 0); 435 } 377 } 436 378 437 379
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.