~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/eventpoll.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /fs/eventpoll.c (Version linux-6.11.5) and /fs/eventpoll.c (Version linux-2.4.37.11)


  1 // SPDX-License-Identifier: GPL-2.0-or-later        1 
  2 /*                                                
  3  *  fs/eventpoll.c (Efficient event retrieval     
  4  *  Copyright (C) 2001,...,2009  Davide Libenz    
  5  *                                                
  6  *  Davide Libenzi <davidel@xmailserver.org>      
  7  */                                               
  8                                                   
  9 #include <linux/init.h>                           
 10 #include <linux/kernel.h>                         
 11 #include <linux/sched/signal.h>                   
 12 #include <linux/fs.h>                             
 13 #include <linux/file.h>                           
 14 #include <linux/signal.h>                         
 15 #include <linux/errno.h>                          
 16 #include <linux/mm.h>                             
 17 #include <linux/slab.h>                           
 18 #include <linux/poll.h>                           
 19 #include <linux/string.h>                         
 20 #include <linux/list.h>                           
 21 #include <linux/hash.h>                           
 22 #include <linux/spinlock.h>                       
 23 #include <linux/syscalls.h>                       
 24 #include <linux/rbtree.h>                         
 25 #include <linux/wait.h>                           
 26 #include <linux/eventpoll.h>                      
 27 #include <linux/mount.h>                          
 28 #include <linux/bitops.h>                         
 29 #include <linux/mutex.h>                          
 30 #include <linux/anon_inodes.h>                    
 31 #include <linux/device.h>                         
 32 #include <linux/uaccess.h>                        
 33 #include <asm/io.h>                               
 34 #include <asm/mman.h>                             
 35 #include <linux/atomic.h>                         
 36 #include <linux/proc_fs.h>                        
 37 #include <linux/seq_file.h>                       
 38 #include <linux/compat.h>                         
 39 #include <linux/rculist.h>                        
 40 #include <linux/capability.h>                     
 41 #include <net/busy_poll.h>                        
 42                                                   
 43 /*                                                
 44  * LOCKING:                                       
 45  * There are three level of locking required b    
 46  *                                                
 47  * 1) epnested_mutex (mutex)                      
 48  * 2) ep->mtx (mutex)                             
 49  * 3) ep->lock (rwlock)                           
 50  *                                                
 51  * The acquire order is the one listed above,     
 52  * We need a rwlock (ep->lock) because we mani    
 53  * from inside the poll callback, that might b    
 54  * a wake_up() that in turn might be called fr    
 55  * So we can't sleep inside the poll callback     
 56  * a spinlock. During the event transfer loop     
 57  * user space) we could end up sleeping due a     
 58  * we need a lock that will allow us to sleep.    
 59  * mutex (ep->mtx). It is acquired during the     
 60  * during epoll_ctl(EPOLL_CTL_DEL) and during     
 61  * The epnested_mutex is acquired when inserti    
 62  * epoll fd. We do this so that we walk the ep    
 63  * insertion does not create a cycle of epoll     
 64  * could lead to deadlock. We need a global mu    
 65  * simultaneous inserts (A into B and B into A    
 66  * constructing a cycle without either insert     
 67  * going to.                                      
 68  * It is necessary to acquire multiple "ep->mt    
 69  * case when one epoll fd is added to another.    
 70  * always acquire the locks in the order of ne    
 71  * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx w    
 72  * before e2->mtx). Since we disallow cycles o    
 73  * descriptors, this ensures that the mutexes     
 74  * order to communicate this nesting to lockde    
 75  * of epoll file descriptors, we use the curre    
 76  * the lockdep subkey.                            
 77  * It is possible to drop the "ep->mtx" and to    
 78  * mutex "epnested_mutex" (together with "ep->    
 79  * but having "ep->mtx" will make the interfac    
 80  * Events that require holding "epnested_mutex    
 81  * normal operations the epoll private "ep->mt    
 82  * a better scalability.                          
 83  */                                               
 84                                                   
 85 /* Epoll private bits inside the event mask */    
 86 #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLON    
 87                                                   
 88 #define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT)      
 89                                                   
 90 #define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BIT    
 91                                 EPOLLWAKEUP |     
 92                                                   
 93 /* Maximum number of nesting allowed inside ep    
 94 #define EP_MAX_NESTS 4                            
 95                                                   
 96 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct    
 97                                                   
 98 #define EP_UNACTIVE_PTR ((void *) -1L)            
 99                                                   
100 #define EP_ITEM_COST (sizeof(struct epitem) +     
101                                                   
102 struct epoll_filefd {                             
103         struct file *file;                        
104         int fd;                                   
105 } __packed;                                       
106                                                   
107 /* Wait structure used by the poll hooks */       
108 struct eppoll_entry {                             
109         /* List header used to link this struc    
110         struct eppoll_entry *next;                
111                                                   
112         /* The "base" pointer is set to the co    
113         struct epitem *base;                      
114                                                   
115         /*                                        
116          * Wait queue item that will be linked    
117          * queue head.                            
118          */                                       
119         wait_queue_entry_t wait;                  
120                                                   
121         /* The wait queue head that linked the    
122         wait_queue_head_t *whead;                 
123 };                                                
124                                                   
125 /*                                                
126  * Each file descriptor added to the eventpoll    
127  * have an entry of this type linked to the "r    
128  * Avoid increasing the size of this struct, t    
129  * of these on a server and we do not want thi    
130  */                                               
131 struct epitem {                                   
132         union {                                   
133                 /* RB tree node links this str    
134                 struct rb_node rbn;               
135                 /* Used to free the struct epi    
136                 struct rcu_head rcu;              
137         };                                        
138                                                   
139         /* List header used to link this struc    
140         struct list_head rdllink;                 
141                                                   
142         /*                                        
143          * Works together "struct eventpoll"->    
144          * single linked chain of items.          
145          */                                       
146         struct epitem *next;                      
147                                                   
148         /* The file descriptor information thi    
149         struct epoll_filefd ffd;                  
150                                                   
151         /*                                        
152          * Protected by file->f_lock, true for    
153          * removed from the "struct file" item    
154          * eventpoll->refcount orchestrates "s    
155          */                                       
156         bool dying;                               
157                                                   
158         /* List containing poll wait queues */    
159         struct eppoll_entry *pwqlist;             
160                                                   
161         /* The "container" of this item */        
162         struct eventpoll *ep;                     
163                                                   
164         /* List header used to link this item     
165         struct hlist_node fllink;                 
166                                                   
167         /* wakeup_source used when EPOLLWAKEUP    
168         struct wakeup_source __rcu *ws;           
169                                                   
170         /* The structure that describe the int    
171         struct epoll_event event;                 
172 };                                                
173                                                   
174 /*                                                
175  * This structure is stored inside the "privat    
176  * structure and represents the main data stru    
177  * interface.                                     
178  */                                               
179 struct eventpoll {                                
180         /*                                        
181          * This mutex is used to ensure that f    
182          * while epoll is using them. This is     
183          * collection loop, the file cleanup p    
184          * code and the ctl operations.           
185          */                                       
186         struct mutex mtx;                         
187                                                   
188         /* Wait queue used by sys_epoll_wait()    
189         wait_queue_head_t wq;                     
190                                                   
191         /* Wait queue used by file->poll() */     
192         wait_queue_head_t poll_wait;              
193                                                   
194         /* List of ready file descriptors */      
195         struct list_head rdllist;                 
196                                                   
197         /* Lock which protects rdllist and ovf    
198         rwlock_t lock;                            
199                                                   
200         /* RB tree root used to store monitore    
201         struct rb_root_cached rbr;                
202                                                   
203         /*                                        
204          * This is a single linked list that c    
205          * happened while transferring ready e    
206          * holding ->lock.                        
207          */                                       
208         struct epitem *ovflist;                   
209                                                   
210         /* wakeup_source used when ep_send_eve    
211         struct wakeup_source *ws;                 
212                                                   
213         /* The user that created the eventpoll    
214         struct user_struct *user;                 
215                                                   
216         struct file *file;                        
217                                                   
218         /* used to optimize loop detection che    
219         u64 gen;                                  
220         struct hlist_head refs;                   
221                                                   
222         /*                                        
223          * usage count, used together with epi    
224          * orchestrate the disposal of this st    
225          */                                       
226         refcount_t refcount;                      
227                                                   
228 #ifdef CONFIG_NET_RX_BUSY_POLL                    
229         /* used to track busy poll napi_id */     
230         unsigned int napi_id;                     
231         /* busy poll timeout */                   
232         u32 busy_poll_usecs;                      
233         /* busy poll packet budget */             
234         u16 busy_poll_budget;                     
235         bool prefer_busy_poll;                    
236 #endif                                            
237                                                   
238 #ifdef CONFIG_DEBUG_LOCK_ALLOC                    
239         /* tracks wakeup nests for lockdep val    
240         u8 nests;                                 
241 #endif                                            
242 };                                                
243                                                   
244 /* Wrapper struct used by poll queueing */        
245 struct ep_pqueue {                                
246         poll_table pt;                            
247         struct epitem *epi;                       
248 };                                                
249                                                   
250 /*                                                
251  * Configuration options available inside /pro    
252  */                                               
253 /* Maximum number of epoll watched descriptors    
254 static long max_user_watches __read_mostly;       
255                                                   
256 /* Used for cycles detection */                   
257 static DEFINE_MUTEX(epnested_mutex);              
258                                                   
259 static u64 loop_check_gen = 0;                    
260                                                   
261 /* Used to check for epoll file descriptor inc    
262 static struct eventpoll *inserting_into;          
263                                                   
264 /* Slab cache used to allocate "struct epitem"    
265 static struct kmem_cache *epi_cache __ro_after    
266                                                   
267 /* Slab cache used to allocate "struct eppoll_    
268 static struct kmem_cache *pwq_cache __ro_after    
269                                                   
270 /*                                                
271  * List of files with newly added links, where    
272  * of emanating paths. Protected by the epnest    
273  */                                               
274 struct epitems_head {                             
275         struct hlist_head epitems;                
276         struct epitems_head *next;                
277 };                                                
278 static struct epitems_head *tfile_check_list =    
279                                                   
280 static struct kmem_cache *ephead_cache __ro_af    
281                                                   
282 static inline void free_ephead(struct epitems_    
283 {                                                 
284         if (head)                                 
285                 kmem_cache_free(ephead_cache,     
286 }                                                 
287                                                   
288 static void list_file(struct file *file)          
289 {                                                 
290         struct epitems_head *head;                
291                                                   
292         head = container_of(file->f_ep, struct    
293         if (!head->next) {                        
294                 head->next = tfile_check_list;    
295                 tfile_check_list = head;          
296         }                                         
297 }                                                 
298                                                   
299 static void unlist_file(struct epitems_head *h    
300 {                                                 
301         struct epitems_head *to_free = head;      
302         struct hlist_node *p = rcu_dereference    
303         if (p) {                                  
304                 struct epitem *epi= container_    
305                 spin_lock(&epi->ffd.file->f_lo    
306                 if (!hlist_empty(&head->epitem    
307                         to_free = NULL;           
308                 head->next = NULL;                
309                 spin_unlock(&epi->ffd.file->f_    
310         }                                         
311         free_ephead(to_free);                     
312 }                                                 
313                                                   
314 #ifdef CONFIG_SYSCTL                              
315                                                   
316 #include <linux/sysctl.h>                         
317                                                   
318 static long long_zero;                            
319 static long long_max = LONG_MAX;                  
320                                                   
321 static struct ctl_table epoll_table[] = {         
322         {                                         
323                 .procname       = "max_user_wa    
324                 .data           = &max_user_wa    
325                 .maxlen         = sizeof(max_u    
326                 .mode           = 0644,           
327                 .proc_handler   = proc_doulong    
328                 .extra1         = &long_zero,     
329                 .extra2         = &long_max,      
330         },                                        
331 };                                                
332                                                   
333 static void __init epoll_sysctls_init(void)       
334 {                                                 
335         register_sysctl("fs/epoll", epoll_tabl    
336 }                                                 
337 #else                                             
338 #define epoll_sysctls_init() do { } while (0)     
339 #endif /* CONFIG_SYSCTL */                        
340                                                   
341 static const struct file_operations eventpoll_    
342                                                   
343 static inline int is_file_epoll(struct file *f    
344 {                                                 
345         return f->f_op == &eventpoll_fops;        
346 }                                                 
347                                                   
348 /* Setup the structure that is used as key for    
349 static inline void ep_set_ffd(struct epoll_fil    
350                               struct file *fil    
351 {                                                 
352         ffd->file = file;                         
353         ffd->fd = fd;                             
354 }                                                 
355                                                   
356 /* Compare RB tree keys */                        
357 static inline int ep_cmp_ffd(struct epoll_file    
358                              struct epoll_file    
359 {                                                 
360         return (p1->file > p2->file ? +1:         
361                 (p1->file < p2->file ? -1 : p1    
362 }                                                 
363                                                   
364 /* Tells us if the item is currently linked */    
365 static inline int ep_is_linked(struct epitem *    
366 {                                                 
367         return !list_empty(&epi->rdllink);        
368 }                                                 
369                                                   
370 static inline struct eppoll_entry *ep_pwq_from    
371 {                                                 
372         return container_of(p, struct eppoll_e    
373 }                                                 
374                                                   
375 /* Get the "struct epitem" from a wait queue p    
376 static inline struct epitem *ep_item_from_wait    
377 {                                                 
378         return container_of(p, struct eppoll_e    
379 }                                                 
380                                                   
381 /**                                               
382  * ep_events_available - Checks if ready event    
383  *                                                
384  * @ep: Pointer to the eventpoll context.         
385  *                                                
386  * Return: a value different than %zero if rea    
387  *          or %zero otherwise.                   
388  */                                               
389 static inline int ep_events_available(struct e    
390 {                                                 
391         return !list_empty_careful(&ep->rdllis    
392                 READ_ONCE(ep->ovflist) != EP_U    
393 }                                                 
394                                                   
395 #ifdef CONFIG_NET_RX_BUSY_POLL                    
396 /**                                               
397  * busy_loop_ep_timeout - check if busy poll h    
398  * from the epoll instance ep is preferred, bu    
399  * the system-wide global via busy_loop_timeou    
400  *                                                
401  * @start_time: The start time used to compute    
402  * @ep: Pointer to the eventpoll context.         
403  *                                                
404  * Return: true if the timeout has expired, fa    
405  */                                               
406 static bool busy_loop_ep_timeout(unsigned long    
407                                  struct eventp    
408 {                                                 
409         unsigned long bp_usec = READ_ONCE(ep->    
410                                                   
411         if (bp_usec) {                            
412                 unsigned long end_time = start    
413                 unsigned long now = busy_loop_    
414                                                   
415                 return time_after(now, end_tim    
416         } else {                                  
417                 return busy_loop_timeout(start    
418         }                                         
419 }                                                 
420                                                   
421 static bool ep_busy_loop_on(struct eventpoll *    
422 {                                                 
423         return !!READ_ONCE(ep->busy_poll_usecs    
424 }                                                 
425                                                   
426 static bool ep_busy_loop_end(void *p, unsigned    
427 {                                                 
428         struct eventpoll *ep = p;                 
429                                                   
430         return ep_events_available(ep) || busy    
431 }                                                 
432                                                   
433 /*                                                
434  * Busy poll if globally on and supporting soc    
435  * busy loop will return if need_resched or ep    
436  *                                                
437  * we must do our busy polling with irqs enabl    
438  */                                               
439 static bool ep_busy_loop(struct eventpoll *ep,    
440 {                                                 
441         unsigned int napi_id = READ_ONCE(ep->n    
442         u16 budget = READ_ONCE(ep->busy_poll_b    
443         bool prefer_busy_poll = READ_ONCE(ep->    
444                                                   
445         if (!budget)                              
446                 budget = BUSY_POLL_BUDGET;        
447                                                   
448         if (napi_id >= MIN_NAPI_ID && ep_busy_    
449                 napi_busy_loop(napi_id, nonblo    
450                                ep, prefer_busy    
451                 if (ep_events_available(ep))      
452                         return true;              
453                 /*                                
454                  * Busy poll timed out.  Drop     
455                  * it back in when we have mov    
456                  * ID onto the ready list.        
457                  */                               
458                 ep->napi_id = 0;                  
459                 return false;                     
460         }                                         
461         return false;                             
462 }                                                 
463                                                   
464 /*                                                
465  * Set epoll busy poll NAPI ID from sk.           
466  */                                               
467 static inline void ep_set_busy_poll_napi_id(st    
468 {                                                 
469         struct eventpoll *ep = epi->ep;           
470         unsigned int napi_id;                     
471         struct socket *sock;                      
472         struct sock *sk;                          
473                                                   
474         if (!ep_busy_loop_on(ep))                 
475                 return;                           
476                                                   
477         sock = sock_from_file(epi->ffd.file);     
478         if (!sock)                                
479                 return;                           
480                                                   
481         sk = sock->sk;                            
482         if (!sk)                                  
483                 return;                           
484                                                   
485         napi_id = READ_ONCE(sk->sk_napi_id);      
486                                                   
487         /* Non-NAPI IDs can be rejected           
488          *      or                                
489          * Nothing to do if we already have th    
490          */                                       
491         if (napi_id < MIN_NAPI_ID || napi_id =    
492                 return;                           
493                                                   
494         /* record NAPI ID for use in next busy    
495         ep->napi_id = napi_id;                    
496 }                                                 
497                                                   
498 static long ep_eventpoll_bp_ioctl(struct file     
499                                   unsigned lon    
500 {                                                 
501         struct eventpoll *ep = file->private_d    
502         void __user *uarg = (void __user *)arg    
503         struct epoll_params epoll_params;         
504                                                   
505         switch (cmd) {                            
506         case EPIOCSPARAMS:                        
507                 if (copy_from_user(&epoll_para    
508                         return -EFAULT;           
509                                                   
510                 /* pad byte must be zero */       
511                 if (epoll_params.__pad)           
512                         return -EINVAL;           
513                                                   
514                 if (epoll_params.busy_poll_use    
515                         return -EINVAL;           
516                                                   
517                 if (epoll_params.prefer_busy_p    
518                         return -EINVAL;           
519                                                   
520                 if (epoll_params.busy_poll_bud    
521                     !capable(CAP_NET_ADMIN))      
522                         return -EPERM;            
523                                                   
524                 WRITE_ONCE(ep->busy_poll_usecs    
525                 WRITE_ONCE(ep->busy_poll_budge    
526                 WRITE_ONCE(ep->prefer_busy_pol    
527                 return 0;                         
528         case EPIOCGPARAMS:                        
529                 memset(&epoll_params, 0, sizeo    
530                 epoll_params.busy_poll_usecs =    
531                 epoll_params.busy_poll_budget     
532                 epoll_params.prefer_busy_poll     
533                 if (copy_to_user(uarg, &epoll_    
534                         return -EFAULT;           
535                 return 0;                         
536         default:                                  
537                 return -ENOIOCTLCMD;              
538         }                                         
539 }                                                 
540                                                   
541 #else                                             
542                                                   
543 static inline bool ep_busy_loop(struct eventpo    
544 {                                                 
545         return false;                             
546 }                                                 
547                                                   
548 static inline void ep_set_busy_poll_napi_id(st    
549 {                                                 
550 }                                                 
551                                                   
552 static long ep_eventpoll_bp_ioctl(struct file     
553                                   unsigned lon    
554 {                                                 
555         return -EOPNOTSUPP;                       
556 }                                                 
557                                                   
558 #endif /* CONFIG_NET_RX_BUSY_POLL */              
559                                                   
560 /*                                                
561  * As described in commit 0ccf831cb lockdep: a    
562  * the use of wait queues used by epoll is don    
563  * manner. Wake ups can nest inside each other    
564  * with the same locking. For example:            
565  *                                                
566  *   dfd = socket(...);                           
567  *   efd1 = epoll_create();                       
568  *   efd2 = epoll_create();                       
569  *   epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);    
570  *   epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...)    
571  *                                                
572  * When a packet arrives to the device underne    
573  * issue a wake_up() on its poll wake list. Ep    
574  * callback wakeup entry on that queue, and th    
575  * "dfd" net code will end up in ep_poll_callb    
576  * (efd1) notices that it may have some event     
577  * the waiters on its poll wait list (efd2). S    
578  * that ends up in another wake_up(), after ha    
579  * recursion constraints. That are, no more th    
580  * stack blasting.                                
581  *                                                
582  * When CONFIG_DEBUG_LOCK_ALLOC is enabled, ma    
583  * this special case of epoll.                    
584  */                                               
585 #ifdef CONFIG_DEBUG_LOCK_ALLOC                    
586                                                   
587 static void ep_poll_safewake(struct eventpoll     
588                              unsigned pollflag    
589 {                                                 
590         struct eventpoll *ep_src;                 
591         unsigned long flags;                      
592         u8 nests = 0;                             
593                                                   
594         /*                                        
595          * To set the subclass or nesting leve    
596          * it might be natural to create a per    
597          * we can recurse on ep->poll_wait.loc    
598          * schedule() in the -rt kernel, the p    
599          * protected. Thus, we are introducing    
600          * If we are not being call from ep_po    
601          * we are at the first level of nestin    
602          * called from ep_poll_callback() and     
603          * not an epoll file itself, we are at    
604          * is depth 0. If the wakeup source is    
605          * wakeup chain then we use its nests     
606          * nests + 1. The previous epoll file     
607          * already holding its own poll_wait.l    
608          */                                       
609         if (epi) {                                
610                 if ((is_file_epoll(epi->ffd.fi    
611                         ep_src = epi->ffd.file    
612                         nests = ep_src->nests;    
613                 } else {                          
614                         nests = 1;                
615                 }                                 
616         }                                         
617         spin_lock_irqsave_nested(&ep->poll_wai    
618         ep->nests = nests + 1;                    
619         wake_up_locked_poll(&ep->poll_wait, EP    
620         ep->nests = 0;                            
621         spin_unlock_irqrestore(&ep->poll_wait.    
622 }                                                 
623                                                   
624 #else                                             
625                                                   
626 static void ep_poll_safewake(struct eventpoll     
627                              __poll_t pollflag    
628 {                                                 
629         wake_up_poll(&ep->poll_wait, EPOLLIN |    
630 }                                                 
631                                                   
632 #endif                                            
633                                                   
634 static void ep_remove_wait_queue(struct eppoll    
635 {                                                 
636         wait_queue_head_t *whead;                 
637                                                   
638         rcu_read_lock();                          
639         /*                                        
640          * If it is cleared by POLLFREE, it sh    
641          * If we read NULL we need a barrier p    
642          * smp_store_release() in ep_poll_call    
643          * we rely on whead->lock.                
644          */                                       
645         whead = smp_load_acquire(&pwq->whead);    
646         if (whead)                                
647                 remove_wait_queue(whead, &pwq-    
648         rcu_read_unlock();                        
649 }                                                 
650                                                   
651 /*                                                
652  * This function unregisters poll callbacks fr    
653  * descriptor.  Must be called with "mtx" held    
654  */                                               
655 static void ep_unregister_pollwait(struct even    
656 {                                                 
657         struct eppoll_entry **p = &epi->pwqlis    
658         struct eppoll_entry *pwq;                 
659                                                   
660         while ((pwq = *p) != NULL) {              
661                 *p = pwq->next;                   
662                 ep_remove_wait_queue(pwq);        
663                 kmem_cache_free(pwq_cache, pwq    
664         }                                         
665 }                                                 
666                                                   
667 /* call only when ep->mtx is held */              
668 static inline struct wakeup_source *ep_wakeup_    
669 {                                                 
670         return rcu_dereference_check(epi->ws,     
671 }                                                 
672                                                   
673 /* call only when ep->mtx is held */              
674 static inline void ep_pm_stay_awake(struct epi    
675 {                                                 
676         struct wakeup_source *ws = ep_wakeup_s    
677                                                   
678         if (ws)                                   
679                 __pm_stay_awake(ws);              
680 }                                                 
681                                                   
682 static inline bool ep_has_wakeup_source(struct    
683 {                                                 
684         return rcu_access_pointer(epi->ws) ? t    
685 }                                                 
686                                                   
687 /* call when ep->mtx cannot be held (ep_poll_c    
688 static inline void ep_pm_stay_awake_rcu(struct    
689 {                                                 
690         struct wakeup_source *ws;                 
691                                                   
692         rcu_read_lock();                          
693         ws = rcu_dereference(epi->ws);            
694         if (ws)                                   
695                 __pm_stay_awake(ws);              
696         rcu_read_unlock();                        
697 }                                                 
698                                                   
699                                                   
700 /*                                                
701  * ep->mutex needs to be held because we could    
702  * eventpoll_release_file() and epoll_ctl().      
703  */                                               
704 static void ep_start_scan(struct eventpoll *ep    
705 {                                                 
706         /*                                        
707          * Steal the ready list, and re-init t    
708          * empty list. Also, set ep->ovflist t    
709          * happening while looping w/out locks    
710          * have the poll callback to queue dir    
711          * because we want the "sproc" callbac    
712          * in a lockless way.                     
713          */                                       
714         lockdep_assert_irqs_enabled();            
715         write_lock_irq(&ep->lock);                
716         list_splice_init(&ep->rdllist, txlist)    
717         WRITE_ONCE(ep->ovflist, NULL);            
718         write_unlock_irq(&ep->lock);              
719 }                                                 
720                                                   
721 static void ep_done_scan(struct eventpoll *ep,    
722                          struct list_head *txl    
723 {                                                 
724         struct epitem *epi, *nepi;                
725                                                   
726         write_lock_irq(&ep->lock);                
727         /*                                        
728          * During the time we spent inside the    
729          * other events might have been queued    
730          * We re-insert them inside the main r    
731          */                                       
732         for (nepi = READ_ONCE(ep->ovflist); (e    
733              nepi = epi->next, epi->next = EP_    
734                 /*                                
735                  * We need to check if the ite    
736                  * During the "sproc" callback    
737                  * queued into ->ovflist but t    
738                  * contain them, and the list_    
739                  */                               
740                 if (!ep_is_linked(epi)) {         
741                         /*                        
742                          * ->ovflist is LIFO,     
743                          * to keep in FIFO.       
744                          */                       
745                         list_add(&epi->rdllink    
746                         ep_pm_stay_awake(epi);    
747                 }                                 
748         }                                         
749         /*                                        
750          * We need to set back ep->ovflist to     
751          * releasing the lock, events will be     
752          * ep->rdllist.                           
753          */                                       
754         WRITE_ONCE(ep->ovflist, EP_UNACTIVE_PT    
755                                                   
756         /*                                        
757          * Quickly re-inject items left on "tx    
758          */                                       
759         list_splice(txlist, &ep->rdllist);        
760         __pm_relax(ep->ws);                       
761                                                   
762         if (!list_empty(&ep->rdllist)) {          
763                 if (waitqueue_active(&ep->wq))    
764                         wake_up(&ep->wq);         
765         }                                         
766                                                   
767         write_unlock_irq(&ep->lock);              
768 }                                                 
769                                                   
770 static void ep_get(struct eventpoll *ep)          
771 {                                                 
772         refcount_inc(&ep->refcount);              
773 }                                                 
774                                                   
775 /*                                                
776  * Returns true if the event poll can be dispo    
777  */                                               
778 static bool ep_refcount_dec_and_test(struct ev    
779 {                                                 
780         if (!refcount_dec_and_test(&ep->refcou    
781                 return false;                     
782                                                   
783         WARN_ON_ONCE(!RB_EMPTY_ROOT(&ep->rbr.r    
784         return true;                              
785 }                                                 
786                                                   
787 static void ep_free(struct eventpoll *ep)         
788 {                                                 
789         mutex_destroy(&ep->mtx);                  
790         free_uid(ep->user);                       
791         wakeup_source_unregister(ep->ws);         
792         kfree(ep);                                
793 }                                                 
794                                                   
795 /*                                                
796  * Removes a "struct epitem" from the eventpol    
797  * all the associated resources. Must be calle    
798  * If the dying flag is set, do the removal on    
799  * This prevents ep_clear_and_put() from dropp    
800  * while running concurrently with eventpoll_r    
801  * Returns true if the eventpoll can be dispos    
802  */                                               
803 static bool __ep_remove(struct eventpoll *ep,     
804 {                                                 
805         struct file *file = epi->ffd.file;        
806         struct epitems_head *to_free;             
807         struct hlist_head *head;                  
808                                                   
809         lockdep_assert_irqs_enabled();            
810                                                   
811         /*                                        
812          * Removes poll wait queue hooks.         
813          */                                       
814         ep_unregister_pollwait(ep, epi);          
815                                                   
816         /* Remove the current item from the li    
817         spin_lock(&file->f_lock);                 
818         if (epi->dying && !force) {               
819                 spin_unlock(&file->f_lock);       
820                 return false;                     
821         }                                         
822                                                   
823         to_free = NULL;                           
824         head = file->f_ep;                        
825         if (head->first == &epi->fllink && !ep    
826                 file->f_ep = NULL;                
827                 if (!is_file_epoll(file)) {       
828                         struct epitems_head *v    
829                         v = container_of(head,    
830                         if (!smp_load_acquire(    
831                                 to_free = v;      
832                 }                                 
833         }                                         
834         hlist_del_rcu(&epi->fllink);              
835         spin_unlock(&file->f_lock);               
836         free_ephead(to_free);                     
837                                                   
838         rb_erase_cached(&epi->rbn, &ep->rbr);     
839                                                   
840         write_lock_irq(&ep->lock);                
841         if (ep_is_linked(epi))                    
842                 list_del_init(&epi->rdllink);     
843         write_unlock_irq(&ep->lock);              
844                                                   
845         wakeup_source_unregister(ep_wakeup_sou    
846         /*                                        
847          * At this point it is safe to free th    
848          * field epi->rcu, since we are trying    
849          * 'struct epitem'. The 'rbn' field is    
850          * ep->mtx. The rcu read side, reverse    
851          * use of the rbn field.                  
852          */                                       
853         kfree_rcu(epi, rcu);                      
854                                                   
855         percpu_counter_dec(&ep->user->epoll_wa    
856         return ep_refcount_dec_and_test(ep);      
857 }                                                 
858                                                   
859 /*                                                
860  * ep_remove variant for callers owing an addi    
861  */                                               
862 static void ep_remove_safe(struct eventpoll *e    
863 {                                                 
864         WARN_ON_ONCE(__ep_remove(ep, epi, fals    
865 }                                                 
866                                                   
867 static void ep_clear_and_put(struct eventpoll     
868 {                                                 
869         struct rb_node *rbp, *next;               
870         struct epitem *epi;                       
871         bool dispose;                             
872                                                   
873         /* We need to release all tasks waitin    
874         if (waitqueue_active(&ep->poll_wait))     
875                 ep_poll_safewake(ep, NULL, 0);    
876                                                   
877         mutex_lock(&ep->mtx);                     
878                                                   
879         /*                                        
880          * Walks through the whole tree by unr    
881          */                                       
882         for (rbp = rb_first_cached(&ep->rbr);     
883                 epi = rb_entry(rbp, struct epi    
884                                                   
885                 ep_unregister_pollwait(ep, epi    
886                 cond_resched();                   
887         }                                         
888                                                   
889         /*                                        
890          * Walks through the whole tree and tr    
891          * Note that ep_remove_safe() will not    
892          * racing eventpoll_release_file(); th    
893          * At this point we are sure no poll c    
894          * Since we still own a reference to t    
895          * dispose it.                            
896          */                                       
897         for (rbp = rb_first_cached(&ep->rbr);     
898                 next = rb_next(rbp);              
899                 epi = rb_entry(rbp, struct epi    
900                 ep_remove_safe(ep, epi);          
901                 cond_resched();                   
902         }                                         
903                                                   
904         dispose = ep_refcount_dec_and_test(ep)    
905         mutex_unlock(&ep->mtx);                   
906                                                   
907         if (dispose)                              
908                 ep_free(ep);                      
909 }                                                 
910                                                   
911 static long ep_eventpoll_ioctl(struct file *fi    
912                                unsigned long a    
913 {                                                 
914         int ret;                                  
915                                                   
916         if (!is_file_epoll(file))                 
917                 return -EINVAL;                   
918                                                   
919         switch (cmd) {                            
920         case EPIOCSPARAMS:                        
921         case EPIOCGPARAMS:                        
922                 ret = ep_eventpoll_bp_ioctl(fi    
923                 break;                            
924         default:                                  
925                 ret = -EINVAL;                    
926                 break;                            
927         }                                         
928                                                   
929         return ret;                               
930 }                                                 
931                                                   
932 static int ep_eventpoll_release(struct inode *    
933 {                                                 
934         struct eventpoll *ep = file->private_d    
935                                                   
936         if (ep)                                   
937                 ep_clear_and_put(ep);             
938                                                   
939         return 0;                                 
940 }                                                 
941                                                   
942 static __poll_t ep_item_poll(const struct epit    
943                                                   
944 static __poll_t __ep_eventpoll_poll(struct fil    
945 {                                                 
946         struct eventpoll *ep = file->private_d    
947         LIST_HEAD(txlist);                        
948         struct epitem *epi, *tmp;                 
949         poll_table pt;                            
950         __poll_t res = 0;                         
951                                                   
952         init_poll_funcptr(&pt, NULL);             
953                                                   
954         /* Insert inside our poll wait queue *    
955         poll_wait(file, &ep->poll_wait, wait);    
956                                                   
957         /*                                        
958          * Proceed to find out if wanted event    
959          * the ready list.                        
960          */                                       
961         mutex_lock_nested(&ep->mtx, depth);       
962         ep_start_scan(ep, &txlist);               
963         list_for_each_entry_safe(epi, tmp, &tx    
964                 if (ep_item_poll(epi, &pt, dep    
965                         res = EPOLLIN | EPOLLR    
966                         break;                    
967                 } else {                          
968                         /*                        
969                          * Item has been dropp    
970                          * callback, but it's     
971                          * caller requested ev    
972                          */                       
973                         __pm_relax(ep_wakeup_s    
974                         list_del_init(&epi->rd    
975                 }                                 
976         }                                         
977         ep_done_scan(ep, &txlist);                
978         mutex_unlock(&ep->mtx);                   
979         return res;                               
980 }                                                 
981                                                   
982 /*                                                
983  * The ffd.file pointer may be in the process     
984  * being closed, but we may not have finished     
985  *                                                
986  * Normally, even with the atomic_long_inc_not    
987  * been free'd and then gotten re-allocated to    
988  * files are not RCU-delayed, they are SLAB_TY    
989  *                                                
990  * But for epoll, users hold the ep->mtx mutex    
991  * the process of being free'd will block in e    
992  * and thus the underlying file allocation wil    
993  * file re-use cannot happen.                     
994  *                                                
995  * For the same reason we can avoid a rcu_read    
996  * operation - 'ffd.file' cannot go away even     
997  * reached zero (but we must still not call ou    
998  * etc).                                          
999  */                                               
1000 static struct file *epi_fget(const struct epi    
1001 {                                                
1002         struct file *file;                       
1003                                                  
1004         file = epi->ffd.file;                    
1005         if (!atomic_long_inc_not_zero(&file->    
1006                 file = NULL;                     
1007         return file;                             
1008 }                                                
1009                                                  
1010 /*                                               
1011  * Differs from ep_eventpoll_poll() in that i    
1012  * the ep->mtx so we need to start from depth    
1013  * is correctly annotated.                       
1014  */                                              
1015 static __poll_t ep_item_poll(const struct epi    
1016                                  int depth)      
1017 {                                                
1018         struct file *file = epi_fget(epi);       
1019         __poll_t res;                            
1020                                                  
1021         /*                                       
1022          * We could return EPOLLERR | EPOLLHU    
1023          * treat this more as "file doesn't e    
1024          */                                      
1025         if (!file)                               
1026                 return 0;                        
1027                                                  
1028         pt->_key = epi->event.events;            
1029         if (!is_file_epoll(file))                
1030                 res = vfs_poll(file, pt);        
1031         else                                     
1032                 res = __ep_eventpoll_poll(fil    
1033         fput(file);                              
1034         return res & epi->event.events;          
1035 }                                                
1036                                                  
1037 static __poll_t ep_eventpoll_poll(struct file    
1038 {                                                
1039         return __ep_eventpoll_poll(file, wait    
1040 }                                                
1041                                                  
1042 #ifdef CONFIG_PROC_FS                            
1043 static void ep_show_fdinfo(struct seq_file *m    
1044 {                                                
1045         struct eventpoll *ep = f->private_dat    
1046         struct rb_node *rbp;                     
1047                                                  
1048         mutex_lock(&ep->mtx);                    
1049         for (rbp = rb_first_cached(&ep->rbr);    
1050                 struct epitem *epi = rb_entry    
1051                 struct inode *inode = file_in    
1052                                                  
1053                 seq_printf(m, "tfd: %8d event    
1054                            " pos:%lli ino:%lx    
1055                            epi->ffd.fd, epi->    
1056                            (long long)epi->ev    
1057                            (long long)epi->ff    
1058                            inode->i_ino, inod    
1059                 if (seq_has_overflowed(m))       
1060                         break;                   
1061         }                                        
1062         mutex_unlock(&ep->mtx);                  
1063 }                                                
1064 #endif                                           
1065                                                  
1066 /* File callbacks that implement the eventpol    
1067 static const struct file_operations eventpoll    
1068 #ifdef CONFIG_PROC_FS                            
1069         .show_fdinfo    = ep_show_fdinfo,        
1070 #endif                                           
1071         .release        = ep_eventpoll_releas    
1072         .poll           = ep_eventpoll_poll,     
1073         .llseek         = noop_llseek,           
1074         .unlocked_ioctl = ep_eventpoll_ioctl,    
1075         .compat_ioctl   = compat_ptr_ioctl,      
1076 };                                               
1077                                                  
1078 /*                                               
1079  * This is called from eventpoll_release() to    
1080  * interface. We need to have this facility t    
1081  * closed without being removed from the even    
1082  */                                              
1083 void eventpoll_release_file(struct file *file    
1084 {                                                
1085         struct eventpoll *ep;                    
1086         struct epitem *epi;                      
1087         bool dispose;                            
1088                                                  
1089         /*                                       
1090          * Use the 'dying' flag to prevent a     
1091          * touching the epitems list before e    
1092          * the ep->mtx.                          
1093          */                                      
1094 again:                                           
1095         spin_lock(&file->f_lock);                
1096         if (file->f_ep && file->f_ep->first)     
1097                 epi = hlist_entry(file->f_ep-    
1098                 epi->dying = true;               
1099                 spin_unlock(&file->f_lock);      
1100                                                  
1101                 /*                               
1102                  * ep access is safe as we st    
1103                  * struct                        
1104                  */                              
1105                 ep = epi->ep;                    
1106                 mutex_lock(&ep->mtx);            
1107                 dispose = __ep_remove(ep, epi    
1108                 mutex_unlock(&ep->mtx);          
1109                                                  
1110                 if (dispose)                     
1111                         ep_free(ep);             
1112                 goto again;                      
1113         }                                        
1114         spin_unlock(&file->f_lock);              
1115 }                                                
1116                                                  
1117 static int ep_alloc(struct eventpoll **pep)      
1118 {                                                
1119         struct eventpoll *ep;                    
1120                                                  
1121         ep = kzalloc(sizeof(*ep), GFP_KERNEL)    
1122         if (unlikely(!ep))                       
1123                 return -ENOMEM;                  
1124                                                  
1125         mutex_init(&ep->mtx);                    
1126         rwlock_init(&ep->lock);                  
1127         init_waitqueue_head(&ep->wq);            
1128         init_waitqueue_head(&ep->poll_wait);     
1129         INIT_LIST_HEAD(&ep->rdllist);            
1130         ep->rbr = RB_ROOT_CACHED;                
1131         ep->ovflist = EP_UNACTIVE_PTR;           
1132         ep->user = get_current_user();           
1133         refcount_set(&ep->refcount, 1);          
1134                                                  
1135         *pep = ep;                               
1136                                                  
1137         return 0;                                
1138 }                                                
1139                                                  
1140 /*                                               
1141  * Search the file inside the eventpoll tree.    
1142  * are protected by the "mtx" mutex, and ep_f    
1143  * "mtx" held.                                   
1144  */                                              
1145 static struct epitem *ep_find(struct eventpol    
1146 {                                                
1147         int kcmp;                                
1148         struct rb_node *rbp;                     
1149         struct epitem *epi, *epir = NULL;        
1150         struct epoll_filefd ffd;                 
1151                                                  
1152         ep_set_ffd(&ffd, file, fd);              
1153         for (rbp = ep->rbr.rb_root.rb_node; r    
1154                 epi = rb_entry(rbp, struct ep    
1155                 kcmp = ep_cmp_ffd(&ffd, &epi-    
1156                 if (kcmp > 0)                    
1157                         rbp = rbp->rb_right;     
1158                 else if (kcmp < 0)               
1159                         rbp = rbp->rb_left;      
1160                 else {                           
1161                         epir = epi;              
1162                         break;                   
1163                 }                                
1164         }                                        
1165                                                  
1166         return epir;                             
1167 }                                                
1168                                                  
1169 #ifdef CONFIG_KCMP                               
1170 static struct epitem *ep_find_tfd(struct even    
1171 {                                                
1172         struct rb_node *rbp;                     
1173         struct epitem *epi;                      
1174                                                  
1175         for (rbp = rb_first_cached(&ep->rbr);    
1176                 epi = rb_entry(rbp, struct ep    
1177                 if (epi->ffd.fd == tfd) {        
1178                         if (toff == 0)           
1179                                 return epi;      
1180                         else                     
1181                                 toff--;          
1182                 }                                
1183                 cond_resched();                  
1184         }                                        
1185                                                  
1186         return NULL;                             
1187 }                                                
1188                                                  
1189 struct file *get_epoll_tfile_raw_ptr(struct f    
1190                                      unsigned    
1191 {                                                
1192         struct file *file_raw;                   
1193         struct eventpoll *ep;                    
1194         struct epitem *epi;                      
1195                                                  
1196         if (!is_file_epoll(file))                
1197                 return ERR_PTR(-EINVAL);         
1198                                                  
1199         ep = file->private_data;                 
1200                                                  
1201         mutex_lock(&ep->mtx);                    
1202         epi = ep_find_tfd(ep, tfd, toff);        
1203         if (epi)                                 
1204                 file_raw = epi->ffd.file;        
1205         else                                     
1206                 file_raw = ERR_PTR(-ENOENT);     
1207         mutex_unlock(&ep->mtx);                  
1208                                                  
1209         return file_raw;                         
1210 }                                                
1211 #endif /* CONFIG_KCMP */                         
1212                                                  
1213 /*                                               
1214  * Adds a new entry to the tail of the list i    
1215  * multiple CPUs are allowed to call this fun    
1216  *                                               
1217  * Beware: it is necessary to prevent any oth    
1218  *         existing list until all changes ar    
1219  *         concurrent list_add_tail_lockless(    
1220  *         with a read lock, where write lock    
1221  *         makes sure all list_add_tail_lockl    
1222  *         completed.                            
1223  *                                               
1224  *        Also an element can be locklessly a    
1225  *        direction i.e. either to the tail o    
1226  *        concurrent access will corrupt the     
1227  *                                               
1228  * Return: %false if element has been already    
1229  * otherwise.                                    
1230  */                                              
1231 static inline bool list_add_tail_lockless(str    
1232                                           str    
1233 {                                                
1234         struct list_head *prev;                  
1235                                                  
1236         /*                                       
1237          * This is simple 'new->next = head'     
1238          * is used in order to detect that sa    
1239          * added to the list from another CPU    
1240          * new->next == new.                     
1241          */                                      
1242         if (!try_cmpxchg(&new->next, &new, he    
1243                 return false;                    
1244                                                  
1245         /*                                       
1246          * Initially ->next of a new element     
1247          * (we are inserting to the tail) and    
1248          * exchanged.  XCHG guarantees memory    
1249          * updated before pointers are actual    
1250          * swapped before prev->next is updat    
1251          */                                      
1252                                                  
1253         prev = xchg(&head->prev, new);           
1254                                                  
1255         /*                                       
1256          * It is safe to modify prev->next an    
1257          * is added only to the tail and new-    
1258          */                                      
1259                                                  
1260         prev->next = new;                        
1261         new->prev = prev;                        
1262                                                  
1263         return true;                             
1264 }                                                
1265                                                  
1266 /*                                               
1267  * Chains a new epi entry to the tail of the     
1268  * i.e. multiple CPUs are allowed to call thi    
1269  *                                               
1270  * Return: %false if epi element has been alr    
1271  */                                              
1272 static inline bool chain_epi_lockless(struct     
1273 {                                                
1274         struct eventpoll *ep = epi->ep;          
1275                                                  
1276         /* Fast preliminary check */             
1277         if (epi->next != EP_UNACTIVE_PTR)        
1278                 return false;                    
1279                                                  
1280         /* Check that the same epi has not be    
1281         if (cmpxchg(&epi->next, EP_UNACTIVE_P    
1282                 return false;                    
1283                                                  
1284         /* Atomically exchange tail */           
1285         epi->next = xchg(&ep->ovflist, epi);     
1286                                                  
1287         return true;                             
1288 }                                                
1289                                                  
1290 /*                                               
1291  * This is the callback that is passed to the    
1292  * mechanism. It is called by the stored file    
1293  * have events to report.                        
1294  *                                               
1295  * This callback takes a read lock in order n    
1296  * events from another file descriptor, thus     
1297  * or ->ovflist are lockless.  Read lock is p    
1298  * ep_start/done_scan(), which stops all list    
1299  * that lists state is seen correctly.           
1300  *                                               
1301  * Another thing worth to mention is that ep_    
1302  * concurrently for the same @epi from differ    
1303  * with several wait queues entries.  Plural     
1304  * single wait queue is serialized by wq.lock    
1305  * queues are used should be detected accordi    
1306  * cmpxchg() operation.                          
1307  */                                              
1308 static int ep_poll_callback(wait_queue_entry_    
1309 {                                                
1310         int pwake = 0;                           
1311         struct epitem *epi = ep_item_from_wai    
1312         struct eventpoll *ep = epi->ep;          
1313         __poll_t pollflags = key_to_poll(key)    
1314         unsigned long flags;                     
1315         int ewake = 0;                           
1316                                                  
1317         read_lock_irqsave(&ep->lock, flags);     
1318                                                  
1319         ep_set_busy_poll_napi_id(epi);           
1320                                                  
1321         /*                                       
1322          * If the event mask does not contain    
1323          * descriptor to be disabled. This co    
1324          * EPOLLONESHOT bit that disables the    
1325          * until the next EPOLL_CTL_MOD will     
1326          */                                      
1327         if (!(epi->event.events & ~EP_PRIVATE    
1328                 goto out_unlock;                 
1329                                                  
1330         /*                                       
1331          * Check the events coming with the c    
1332          * every device reports the events in    
1333          * callback. We need to be able to ha    
1334          * test for "key" != NULL before the     
1335          */                                      
1336         if (pollflags && !(pollflags & epi->e    
1337                 goto out_unlock;                 
1338                                                  
1339         /*                                       
1340          * If we are transferring events to u    
1341          * (because we're accessing user memo    
1342          * semantics). All the events that ha    
1343          * chained in ep->ovflist and requeue    
1344          */                                      
1345         if (READ_ONCE(ep->ovflist) != EP_UNAC    
1346                 if (chain_epi_lockless(epi))     
1347                         ep_pm_stay_awake_rcu(    
1348         } else if (!ep_is_linked(epi)) {         
1349                 /* In the usual case, add eve    
1350                 if (list_add_tail_lockless(&e    
1351                         ep_pm_stay_awake_rcu(    
1352         }                                        
1353                                                  
1354         /*                                       
1355          * Wake up ( if active ) both the eve    
1356          * wait list.                            
1357          */                                      
1358         if (waitqueue_active(&ep->wq)) {         
1359                 if ((epi->event.events & EPOL    
1360                                         !(pol    
1361                         switch (pollflags & E    
1362                         case EPOLLIN:            
1363                                 if (epi->even    
1364                                         ewake    
1365                                 break;           
1366                         case EPOLLOUT:           
1367                                 if (epi->even    
1368                                         ewake    
1369                                 break;           
1370                         case 0:                  
1371                                 ewake = 1;       
1372                                 break;           
1373                         }                        
1374                 }                                
1375                 wake_up(&ep->wq);                
1376         }                                        
1377         if (waitqueue_active(&ep->poll_wait))    
1378                 pwake++;                         
1379                                                  
1380 out_unlock:                                      
1381         read_unlock_irqrestore(&ep->lock, fla    
1382                                                  
1383         /* We have to call this outside the l    
1384         if (pwake)                               
1385                 ep_poll_safewake(ep, epi, pol    
1386                                                  
1387         if (!(epi->event.events & EPOLLEXCLUS    
1388                 ewake = 1;                       
1389                                                  
1390         if (pollflags & POLLFREE) {              
1391                 /*                               
1392                  * If we race with ep_remove_    
1393                  * ->whead = NULL and do anot    
1394                  * us, so we can't use __remo    
1395                  */                              
1396                 list_del_init(&wait->entry);     
1397                 /*                               
1398                  * ->whead != NULL protects u    
1399                  * ep_clear_and_put() or ep_r    
1400                  * takes whead->lock held by     
1401                  * nothing protects ep/epi or    
1402                  */                              
1403                 smp_store_release(&ep_pwq_fro    
1404         }                                        
1405                                                  
1406         return ewake;                            
1407 }                                                
1408                                                  
1409 /*                                               
1410  * This is the callback that is used to add o    
1411  * target file wakeup lists.                     
1412  */                                              
1413 static void ep_ptable_queue_proc(struct file     
1414                                  poll_table *    
1415 {                                                
1416         struct ep_pqueue *epq = container_of(    
1417         struct epitem *epi = epq->epi;           
1418         struct eppoll_entry *pwq;                
1419                                                  
1420         if (unlikely(!epi))     // an earlier    
1421                 return;                          
1422                                                  
1423         pwq = kmem_cache_alloc(pwq_cache, GFP    
1424         if (unlikely(!pwq)) {                    
1425                 epq->epi = NULL;                 
1426                 return;                          
1427         }                                        
1428                                                  
1429         init_waitqueue_func_entry(&pwq->wait,    
1430         pwq->whead = whead;                      
1431         pwq->base = epi;                         
1432         if (epi->event.events & EPOLLEXCLUSIV    
1433                 add_wait_queue_exclusive(whea    
1434         else                                     
1435                 add_wait_queue(whead, &pwq->w    
1436         pwq->next = epi->pwqlist;                
1437         epi->pwqlist = pwq;                      
1438 }                                                
1439                                                  
1440 static void ep_rbtree_insert(struct eventpoll    
1441 {                                                
1442         int kcmp;                                
1443         struct rb_node **p = &ep->rbr.rb_root    
1444         struct epitem *epic;                     
1445         bool leftmost = true;                    
1446                                                  
1447         while (*p) {                             
1448                 parent = *p;                     
1449                 epic = rb_entry(parent, struc    
1450                 kcmp = ep_cmp_ffd(&epi->ffd,     
1451                 if (kcmp > 0) {                  
1452                         p = &parent->rb_right    
1453                         leftmost = false;        
1454                 } else                           
1455                         p = &parent->rb_left;    
1456         }                                        
1457         rb_link_node(&epi->rbn, parent, p);      
1458         rb_insert_color_cached(&epi->rbn, &ep    
1459 }                                                
1460                                                  
1461                                                  
1462                                                  
1463 #define PATH_ARR_SIZE 5                          
1464 /*                                               
1465  * These are the number paths of length 1 to     
1466  * from a single file of interest. For exampl    
1467  * 1, to emanate from each file of interest.     
1468  * potential wakeup paths, which need to be l    
1469  * uncontrolled wakeup storms. The common use    
1470  * is connected to n file sources. In this ca    
1471  * of length 1. Thus, the numbers below shoul    
1472  * path limits are enforced during an EPOLL_C    
1473  * and delete can't add additional paths. Pro    
1474  */                                              
1475 static const int path_limits[PATH_ARR_SIZE] =    
1476 static int path_count[PATH_ARR_SIZE];            
1477                                                  
1478 static int path_count_inc(int nests)             
1479 {                                                
1480         /* Allow an arbitrary number of depth    
1481         if (nests == 0)                          
1482                 return 0;                        
1483                                                  
1484         if (++path_count[nests] > path_limits    
1485                 return -1;                       
1486         return 0;                                
1487 }                                                
1488                                                  
1489 static void path_count_init(void)                
1490 {                                                
1491         int i;                                   
1492                                                  
1493         for (i = 0; i < PATH_ARR_SIZE; i++)      
1494                 path_count[i] = 0;               
1495 }                                                
1496                                                  
1497 static int reverse_path_check_proc(struct hli    
1498 {                                                
1499         int error = 0;                           
1500         struct epitem *epi;                      
1501                                                  
1502         if (depth > EP_MAX_NESTS) /* too deep    
1503                 return -1;                       
1504                                                  
1505         /* CTL_DEL can remove links here, but    
1506         hlist_for_each_entry_rcu(epi, refs, f    
1507                 struct hlist_head *refs = &ep    
1508                 if (hlist_empty(refs))           
1509                         error = path_count_in    
1510                 else                             
1511                         error = reverse_path_    
1512                 if (error != 0)                  
1513                         break;                   
1514         }                                        
1515         return error;                            
1516 }                                                
1517                                                  
1518 /**                                              
1519  * reverse_path_check - The tfile_check_list     
1520  *                      links that are propos    
1521  *                      make sure that those     
1522  *                      paths such that we wi    
1523  *                      eventpoll objects.       
1524  *                                               
1525  * Return: %zero if the proposed links don't     
1526  *          %-1 otherwise.                       
1527  */                                              
1528 static int reverse_path_check(void)              
1529 {                                                
1530         struct epitems_head *p;                  
1531                                                  
1532         for (p = tfile_check_list; p != EP_UN    
1533                 int error;                       
1534                 path_count_init();               
1535                 rcu_read_lock();                 
1536                 error = reverse_path_check_pr    
1537                 rcu_read_unlock();               
1538                 if (error)                       
1539                         return error;            
1540         }                                        
1541         return 0;                                
1542 }                                                
1543                                                  
1544 static int ep_create_wakeup_source(struct epi    
1545 {                                                
1546         struct name_snapshot n;                  
1547         struct wakeup_source *ws;                
1548                                                  
1549         if (!epi->ep->ws) {                      
1550                 epi->ep->ws = wakeup_source_r    
1551                 if (!epi->ep->ws)                
1552                         return -ENOMEM;          
1553         }                                        
1554                                                  
1555         take_dentry_name_snapshot(&n, epi->ff    
1556         ws = wakeup_source_register(NULL, n.n    
1557         release_dentry_name_snapshot(&n);        
1558                                                  
1559         if (!ws)                                 
1560                 return -ENOMEM;                  
1561         rcu_assign_pointer(epi->ws, ws);         
1562                                                  
1563         return 0;                                
1564 }                                                
1565                                                  
1566 /* rare code path, only used when EPOLL_CTL_M    
1567 static noinline void ep_destroy_wakeup_source    
1568 {                                                
1569         struct wakeup_source *ws = ep_wakeup_    
1570                                                  
1571         RCU_INIT_POINTER(epi->ws, NULL);         
1572                                                  
1573         /*                                       
1574          * wait for ep_pm_stay_awake_rcu to f    
1575          * used internally by wakeup_source_r    
1576          * wakeup_source_unregister), so we c    
1577          */                                      
1578         synchronize_rcu();                       
1579         wakeup_source_unregister(ws);            
1580 }                                                
1581                                                  
1582 static int attach_epitem(struct file *file, s    
1583 {                                                
1584         struct epitems_head *to_free = NULL;     
1585         struct hlist_head *head = NULL;          
1586         struct eventpoll *ep = NULL;             
1587                                                  
1588         if (is_file_epoll(file))                 
1589                 ep = file->private_data;         
1590                                                  
1591         if (ep) {                                
1592                 head = &ep->refs;                
1593         } else if (!READ_ONCE(file->f_ep)) {     
1594 allocate:                                        
1595                 to_free = kmem_cache_zalloc(e    
1596                 if (!to_free)                    
1597                         return -ENOMEM;          
1598                 head = &to_free->epitems;        
1599         }                                        
1600         spin_lock(&file->f_lock);                
1601         if (!file->f_ep) {                       
1602                 if (unlikely(!head)) {           
1603                         spin_unlock(&file->f_    
1604                         goto allocate;           
1605                 }                                
1606                 file->f_ep = head;               
1607                 to_free = NULL;                  
1608         }                                        
1609         hlist_add_head_rcu(&epi->fllink, file    
1610         spin_unlock(&file->f_lock);              
1611         free_ephead(to_free);                    
1612         return 0;                                
1613 }                                                
1614                                                  
1615 /*                                               
1616  * Must be called with "mtx" held.               
1617  */                                              
1618 static int ep_insert(struct eventpoll *ep, co    
1619                      struct file *tfile, int     
1620 {                                                
1621         int error, pwake = 0;                    
1622         __poll_t revents;                        
1623         struct epitem *epi;                      
1624         struct ep_pqueue epq;                    
1625         struct eventpoll *tep = NULL;            
1626                                                  
1627         if (is_file_epoll(tfile))                
1628                 tep = tfile->private_data;       
1629                                                  
1630         lockdep_assert_irqs_enabled();           
1631                                                  
1632         if (unlikely(percpu_counter_compare(&    
1633                                             m    
1634                 return -ENOSPC;                  
1635         percpu_counter_inc(&ep->user->epoll_w    
1636                                                  
1637         if (!(epi = kmem_cache_zalloc(epi_cac    
1638                 percpu_counter_dec(&ep->user-    
1639                 return -ENOMEM;                  
1640         }                                        
1641                                                  
1642         /* Item initialization follow here ..    
1643         INIT_LIST_HEAD(&epi->rdllink);           
1644         epi->ep = ep;                            
1645         ep_set_ffd(&epi->ffd, tfile, fd);        
1646         epi->event = *event;                     
1647         epi->next = EP_UNACTIVE_PTR;             
1648                                                  
1649         if (tep)                                 
1650                 mutex_lock_nested(&tep->mtx,     
1651         /* Add the current item to the list o    
1652         if (unlikely(attach_epitem(tfile, epi    
1653                 if (tep)                         
1654                         mutex_unlock(&tep->mt    
1655                 kmem_cache_free(epi_cache, ep    
1656                 percpu_counter_dec(&ep->user-    
1657                 return -ENOMEM;                  
1658         }                                        
1659                                                  
1660         if (full_check && !tep)                  
1661                 list_file(tfile);                
1662                                                  
1663         /*                                       
1664          * Add the current item to the RB tre    
1665          * protected by "mtx", and ep_insert(    
1666          */                                      
1667         ep_rbtree_insert(ep, epi);               
1668         if (tep)                                 
1669                 mutex_unlock(&tep->mtx);         
1670                                                  
1671         /*                                       
1672          * ep_remove_safe() calls in the late    
1673          * ep_free() as the ep file itself st    
1674          */                                      
1675         ep_get(ep);                              
1676                                                  
1677         /* now check if we've created too man    
1678         if (unlikely(full_check && reverse_pa    
1679                 ep_remove_safe(ep, epi);         
1680                 return -EINVAL;                  
1681         }                                        
1682                                                  
1683         if (epi->event.events & EPOLLWAKEUP)     
1684                 error = ep_create_wakeup_sour    
1685                 if (error) {                     
1686                         ep_remove_safe(ep, ep    
1687                         return error;            
1688                 }                                
1689         }                                        
1690                                                  
1691         /* Initialize the poll table using th    
1692         epq.epi = epi;                           
1693         init_poll_funcptr(&epq.pt, ep_ptable_    
1694                                                  
1695         /*                                       
1696          * Attach the item to the poll hooks     
1697          * We can safely use the file* here b    
1698          * been increased by the caller of th    
1699          * this operation completes, the poll    
1700          * the new item.                         
1701          */                                      
1702         revents = ep_item_poll(epi, &epq.pt,     
1703                                                  
1704         /*                                       
1705          * We have to check if something went    
1706          * install process. Namely an allocat    
1707          * high memory pressure.                 
1708          */                                      
1709         if (unlikely(!epq.epi)) {                
1710                 ep_remove_safe(ep, epi);         
1711                 return -ENOMEM;                  
1712         }                                        
1713                                                  
1714         /* We have to drop the new item insid    
1715         write_lock_irq(&ep->lock);               
1716                                                  
1717         /* record NAPI ID of new item if pres    
1718         ep_set_busy_poll_napi_id(epi);           
1719                                                  
1720         /* If the file is already "ready" we     
1721         if (revents && !ep_is_linked(epi)) {     
1722                 list_add_tail(&epi->rdllink,     
1723                 ep_pm_stay_awake(epi);           
1724                                                  
1725                 /* Notify waiting tasks that     
1726                 if (waitqueue_active(&ep->wq)    
1727                         wake_up(&ep->wq);        
1728                 if (waitqueue_active(&ep->pol    
1729                         pwake++;                 
1730         }                                        
1731                                                  
1732         write_unlock_irq(&ep->lock);             
1733                                                  
1734         /* We have to call this outside the l    
1735         if (pwake)                               
1736                 ep_poll_safewake(ep, NULL, 0)    
1737                                                  
1738         return 0;                                
1739 }                                                
1740                                                  
1741 /*                                               
1742  * Modify the interest event mask by dropping    
1743  * has a match in the current file status. Mu    
1744  */                                              
1745 static int ep_modify(struct eventpoll *ep, st    
1746                      const struct epoll_event    
1747 {                                                
1748         int pwake = 0;                           
1749         poll_table pt;                           
1750                                                  
1751         lockdep_assert_irqs_enabled();           
1752                                                  
1753         init_poll_funcptr(&pt, NULL);            
1754                                                  
1755         /*                                       
1756          * Set the new event interest mask be    
1757          * otherwise we might miss an event t    
1758          * f_op->poll() call and the new even    
1759          */                                      
1760         epi->event.events = event->events; /*    
1761         epi->event.data = event->data; /* pro    
1762         if (epi->event.events & EPOLLWAKEUP)     
1763                 if (!ep_has_wakeup_source(epi    
1764                         ep_create_wakeup_sour    
1765         } else if (ep_has_wakeup_source(epi))    
1766                 ep_destroy_wakeup_source(epi)    
1767         }                                        
1768                                                  
1769         /*                                       
1770          * The following barrier has two effe    
1771          *                                       
1772          * 1) Flush epi changes above to othe    
1773          *    we do not miss events from ep_p    
1774          *    event occurs immediately after     
1775          *    We need this because we did not    
1776          *    changing epi above (but ep_poll    
1777          *    ep->lock).                         
1778          *                                       
1779          * 2) We also need to ensure we do no    
1780          *    when calling f_op->poll().  Thi    
1781          *    pairs with the barrier in wq_ha    
1782          *    comments for wq_has_sleeper).      
1783          *                                       
1784          * This barrier will now guarantee ep    
1785          * (or both) will notice the readines    
1786          */                                      
1787         smp_mb();                                
1788                                                  
1789         /*                                       
1790          * Get current event bits. We can saf    
1791          * its usage count has been increased    
1792          * If the item is "hot" and it is not    
1793          * list, push it inside.                 
1794          */                                      
1795         if (ep_item_poll(epi, &pt, 1)) {         
1796                 write_lock_irq(&ep->lock);       
1797                 if (!ep_is_linked(epi)) {        
1798                         list_add_tail(&epi->r    
1799                         ep_pm_stay_awake(epi)    
1800                                                  
1801                         /* Notify waiting tas    
1802                         if (waitqueue_active(    
1803                                 wake_up(&ep->    
1804                         if (waitqueue_active(    
1805                                 pwake++;         
1806                 }                                
1807                 write_unlock_irq(&ep->lock);     
1808         }                                        
1809                                                  
1810         /* We have to call this outside the l    
1811         if (pwake)                               
1812                 ep_poll_safewake(ep, NULL, 0)    
1813                                                  
1814         return 0;                                
1815 }                                                
1816                                                  
1817 static int ep_send_events(struct eventpoll *e    
1818                           struct epoll_event     
1819 {                                                
1820         struct epitem *epi, *tmp;                
1821         LIST_HEAD(txlist);                       
1822         poll_table pt;                           
1823         int res = 0;                             
1824                                                  
1825         /*                                       
1826          * Always short-circuit for fatal sig    
1827          * timely exit without the chance of     
1828          * fetching repeatedly.                  
1829          */                                      
1830         if (fatal_signal_pending(current))       
1831                 return -EINTR;                   
1832                                                  
1833         init_poll_funcptr(&pt, NULL);            
1834                                                  
1835         mutex_lock(&ep->mtx);                    
1836         ep_start_scan(ep, &txlist);              
1837                                                  
1838         /*                                       
1839          * We can loop without lock because w    
1840          * Items cannot vanish during the loo    
1841          */                                      
1842         list_for_each_entry_safe(epi, tmp, &t    
1843                 struct wakeup_source *ws;        
1844                 __poll_t revents;                
1845                                                  
1846                 if (res >= maxevents)            
1847                         break;                   
1848                                                  
1849                 /*                               
1850                  * Activate ep->ws before dea    
1851                  * triggering auto-suspend he    
1852                  * below).                       
1853                  *                               
1854                  * This could be rearranged t    
1855                  * instead, but then epi->ws     
1856                  * with ep_is_linked().          
1857                  */                              
1858                 ws = ep_wakeup_source(epi);      
1859                 if (ws) {                        
1860                         if (ws->active)          
1861                                 __pm_stay_awa    
1862                         __pm_relax(ws);          
1863                 }                                
1864                                                  
1865                 list_del_init(&epi->rdllink);    
1866                                                  
1867                 /*                               
1868                  * If the event mask intersec    
1869                  * deliver the event to users    
1870                  * so no operations coming fr    
1871                  */                              
1872                 revents = ep_item_poll(epi, &    
1873                 if (!revents)                    
1874                         continue;                
1875                                                  
1876                 events = epoll_put_uevent(rev    
1877                 if (!events) {                   
1878                         list_add(&epi->rdllin    
1879                         ep_pm_stay_awake(epi)    
1880                         if (!res)                
1881                                 res = -EFAULT    
1882                         break;                   
1883                 }                                
1884                 res++;                           
1885                 if (epi->event.events & EPOLL    
1886                         epi->event.events &=     
1887                 else if (!(epi->event.events     
1888                         /*                       
1889                          * If this file has b    
1890                          * Trigger mode, we n    
1891                          * the ready list, so    
1892                          * epoll_wait() will     
1893                          * availability. At t    
1894                          * into ep->rdllist b    
1895                          * callers are locked    
1896                          * ep_send_events() h    
1897                          * poll callback will    
1898                          */                      
1899                         list_add_tail(&epi->r    
1900                         ep_pm_stay_awake(epi)    
1901                 }                                
1902         }                                        
1903         ep_done_scan(ep, &txlist);               
1904         mutex_unlock(&ep->mtx);                  
1905                                                  
1906         return res;                              
1907 }                                                
1908                                                  
1909 static struct timespec64 *ep_timeout_to_times    
1910 {                                                
1911         struct timespec64 now;                   
1912                                                  
1913         if (ms < 0)                              
1914                 return NULL;                     
1915                                                  
1916         if (!ms) {                               
1917                 to->tv_sec = 0;                  
1918                 to->tv_nsec = 0;                 
1919                 return to;                       
1920         }                                        
1921                                                  
1922         to->tv_sec = ms / MSEC_PER_SEC;          
1923         to->tv_nsec = NSEC_PER_MSEC * (ms % M    
1924                                                  
1925         ktime_get_ts64(&now);                    
1926         *to = timespec64_add_safe(now, *to);     
1927         return to;                               
1928 }                                                
1929                                                  
1930 /*                                               
1931  * autoremove_wake_function, but remove even     
1932  * know that default_wake_function/ttwu will     
1933  * woken, and in that case the ep_poll loop w    
1934  * try to reuse it.                              
1935  */                                              
1936 static int ep_autoremove_wake_function(struct    
1937                                        unsign    
1938 {                                                
1939         int ret = default_wake_function(wq_en    
1940                                                  
1941         /*                                       
1942          * Pairs with list_empty_careful in e    
1943          * iterations see the cause of this w    
1944          */                                      
1945         list_del_init_careful(&wq_entry->entr    
1946         return ret;                              
1947 }                                                
1948                                                  
1949 /**                                              
1950  * ep_poll - Retrieves ready events, and deli    
1951  *           event buffer.                       
1952  *                                               
1953  * @ep: Pointer to the eventpoll context.        
1954  * @events: Pointer to the userspace buffer w    
1955  *          stored.                              
1956  * @maxevents: Size (in terms of number of ev    
1957  * @timeout: Maximum timeout for the ready ev    
1958  *           timespec. If the timeout is zero    
1959  *           while if the @timeout ptr is NUL    
1960  *           until at least one event has bee    
1961  *           occurred).                          
1962  *                                               
1963  * Return: the number of ready events which h    
1964  *          error code, in case of error.        
1965  */                                              
1966 static int ep_poll(struct eventpoll *ep, stru    
1967                    int maxevents, struct time    
1968 {                                                
1969         int res, eavail, timed_out = 0;          
1970         u64 slack = 0;                           
1971         wait_queue_entry_t wait;                 
1972         ktime_t expires, *to = NULL;             
1973                                                  
1974         lockdep_assert_irqs_enabled();           
1975                                                  
1976         if (timeout && (timeout->tv_sec | tim    
1977                 slack = select_estimate_accur    
1978                 to = &expires;                   
1979                 *to = timespec64_to_ktime(*ti    
1980         } else if (timeout) {                    
1981                 /*                               
1982                  * Avoid the unnecessary trip    
1983                  * caller specified a non blo    
1984                  */                              
1985                 timed_out = 1;                   
1986         }                                        
1987                                                  
1988         /*                                       
1989          * This call is racy: We may or may n    
1990          * to the ready list under the lock (    
1991          * with a non-zero timeout, this thre    
1992          * lock and will add to the wait queu    
1993          * timeout, the user by definition sh    
1994          * recheck again.                        
1995          */                                      
1996         eavail = ep_events_available(ep);        
1997                                                  
1998         while (1) {                              
1999                 if (eavail) {                    
2000                         /*                       
2001                          * Try to transfer ev    
2002                          * 0 events and there    
2003                          * trying again in se    
2004                          */                      
2005                         res = ep_send_events(    
2006                         if (res)                 
2007                                 return res;      
2008                 }                                
2009                                                  
2010                 if (timed_out)                   
2011                         return 0;                
2012                                                  
2013                 eavail = ep_busy_loop(ep, tim    
2014                 if (eavail)                      
2015                         continue;                
2016                                                  
2017                 if (signal_pending(current))     
2018                         return -EINTR;           
2019                                                  
2020                 /*                               
2021                  * Internally init_wait() use    
2022                  * thus wait entry is removed    
2023                  * wakeup. Why it is importan    
2024                  * each new wakeup will hit t    
2025                  * chance to harvest new even    
2026                  * lost. This is also good pe    
2027                  * normal wakeup path no need    
2028                  * explicitly, thus ep->lock     
2029                  * event delivery.               
2030                  *                               
2031                  * In fact, we now use an eve    
2032                  * unconditionally removes, b    
2033                  * entry between loop iterati    
2034                  * performance issue if a pro    
2035                  * threads to wake up without    
2036                  */                              
2037                 init_wait(&wait);                
2038                 wait.func = ep_autoremove_wak    
2039                                                  
2040                 write_lock_irq(&ep->lock);       
2041                 /*                               
2042                  * Barrierless variant, waitq    
2043                  * the same lock on wakeup ep    
2044                  * is safe to avoid an explic    
2045                  */                              
2046                 __set_current_state(TASK_INTE    
2047                                                  
2048                 /*                               
2049                  * Do the final check under t    
2050                  * plays with two lists (->rd    
2051                  * is always a race when both    
2052                  * period of time although ev    
2053                  * important.                    
2054                  */                              
2055                 eavail = ep_events_available(    
2056                 if (!eavail)                     
2057                         __add_wait_queue_excl    
2058                                                  
2059                 write_unlock_irq(&ep->lock);     
2060                                                  
2061                 if (!eavail)                     
2062                         timed_out = !schedule    
2063                                                  
2064                 __set_current_state(TASK_RUNN    
2065                                                  
2066                 /*                               
2067                  * We were woken up, thus go     
2068                  * If timed out and still on     
2069                  * carefully under lock, belo    
2070                  */                              
2071                 eavail = 1;                      
2072                                                  
2073                 if (!list_empty_careful(&wait    
2074                         write_lock_irq(&ep->l    
2075                         /*                       
2076                          * If the thread time    
2077                          * it means that the     
2078                          * timeout expired be    
2079                          * Thus, when wait.en    
2080                          * events.               
2081                          */                      
2082                         if (timed_out)           
2083                                 eavail = list    
2084                         __remove_wait_queue(&    
2085                         write_unlock_irq(&ep-    
2086                 }                                
2087         }                                        
2088 }                                                
2089                                                  
2090 /**                                              
2091  * ep_loop_check_proc - verify that adding an    
2092  *                      epoll structure does     
2093  *                      terms of closed loops    
2094  *                      result in excessive s    
2095  *                                               
2096  * @ep: the &struct eventpoll to be currently    
2097  * @depth: Current depth of the path being ch    
2098  *                                               
2099  * Return: %zero if adding the epoll @file in    
2100  *          structure @ep does not violate th    
2101  */                                              
2102 static int ep_loop_check_proc(struct eventpol    
2103 {                                                
2104         int error = 0;                           
2105         struct rb_node *rbp;                     
2106         struct epitem *epi;                      
2107                                                  
2108         mutex_lock_nested(&ep->mtx, depth + 1    
2109         ep->gen = loop_check_gen;                
2110         for (rbp = rb_first_cached(&ep->rbr);    
2111                 epi = rb_entry(rbp, struct ep    
2112                 if (unlikely(is_file_epoll(ep    
2113                         struct eventpoll *ep_    
2114                         ep_tovisit = epi->ffd    
2115                         if (ep_tovisit->gen =    
2116                                 continue;        
2117                         if (ep_tovisit == ins    
2118                                 error = -1;      
2119                         else                     
2120                                 error = ep_lo    
2121                         if (error != 0)          
2122                                 break;           
2123                 } else {                         
2124                         /*                       
2125                          * If we've reached a    
2126                          * an ep, then we nee    
2127                          * links are going to    
2128                          * this by adding it     
2129                          * not already there,    
2130                          * during ep_insert()    
2131                          */                      
2132                         list_file(epi->ffd.fi    
2133                 }                                
2134         }                                        
2135         mutex_unlock(&ep->mtx);                  
2136                                                  
2137         return error;                            
2138 }                                                
2139                                                  
2140 /**                                              
2141  * ep_loop_check - Performs a check to verify    
2142  *                 into another epoll file (r    
2143  *                 closed loops or too deep c    
2144  *                                               
2145  * @ep: Pointer to the epoll we are inserting    
2146  * @to: Pointer to the epoll to be inserted.     
2147  *                                               
2148  * Return: %zero if adding the epoll @to insi    
2149  * does not violate the constraints, or %-1 o    
2150  */                                              
2151 static int ep_loop_check(struct eventpoll *ep    
2152 {                                                
2153         inserting_into = ep;                     
2154         return ep_loop_check_proc(to, 0);        
2155 }                                                
2156                                                  
2157 static void clear_tfile_check_list(void)         
2158 {                                                
2159         rcu_read_lock();                         
2160         while (tfile_check_list != EP_UNACTIV    
2161                 struct epitems_head *head = t    
2162                 tfile_check_list = head->next    
2163                 unlist_file(head);               
2164         }                                        
2165         rcu_read_unlock();                       
2166 }                                                
2167                                                  
2168 /*                                               
2169  * Open an eventpoll file descriptor.            
2170  */                                              
2171 static int do_epoll_create(int flags)            
2172 {                                                
2173         int error, fd;                           
2174         struct eventpoll *ep = NULL;             
2175         struct file *file;                       
2176                                                  
2177         /* Check the EPOLL_* constant for con    
2178         BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEX    
2179                                                  
2180         if (flags & ~EPOLL_CLOEXEC)              
2181                 return -EINVAL;                  
2182         /*                                       
2183          * Create the internal data structure    
2184          */                                      
2185         error = ep_alloc(&ep);                   
2186         if (error < 0)                           
2187                 return error;                    
2188         /*                                       
2189          * Creates all the items needed to se    
2190          * a file structure and a free file d    
2191          */                                      
2192         fd = get_unused_fd_flags(O_RDWR | (fl    
2193         if (fd < 0) {                            
2194                 error = fd;                      
2195                 goto out_free_ep;                
2196         }                                        
2197         file = anon_inode_getfile("[eventpoll    
2198                                  O_RDWR | (fl    
2199         if (IS_ERR(file)) {                      
2200                 error = PTR_ERR(file);           
2201                 goto out_free_fd;                
2202         }                                        
2203 #ifdef CONFIG_NET_RX_BUSY_POLL                   
2204         ep->busy_poll_usecs = 0;                 
2205         ep->busy_poll_budget = 0;                
2206         ep->prefer_busy_poll = false;            
2207 #endif                                           
2208         ep->file = file;                         
2209         fd_install(fd, file);                    
2210         return fd;                               
2211                                                  
2212 out_free_fd:                                     
2213         put_unused_fd(fd);                       
2214 out_free_ep:                                     
2215         ep_clear_and_put(ep);                    
2216         return error;                            
2217 }                                                
2218                                                  
2219 SYSCALL_DEFINE1(epoll_create1, int, flags)       
2220 {                                                
2221         return do_epoll_create(flags);           
2222 }                                                
2223                                                  
2224 SYSCALL_DEFINE1(epoll_create, int, size)         
2225 {                                                
2226         if (size <= 0)                           
2227                 return -EINVAL;                  
2228                                                  
2229         return do_epoll_create(0);               
2230 }                                                
2231                                                  
2232 #ifdef CONFIG_PM_SLEEP                           
2233 static inline void ep_take_care_of_epollwakeu    
2234 {                                                
2235         if ((epev->events & EPOLLWAKEUP) && !    
2236                 epev->events &= ~EPOLLWAKEUP;    
2237 }                                                
2238 #else                                            
2239 static inline void ep_take_care_of_epollwakeu    
2240 {                                                
2241         epev->events &= ~EPOLLWAKEUP;            
2242 }                                                
2243 #endif                                           
2244                                                  
2245 static inline int epoll_mutex_lock(struct mut    
2246                                    bool nonbl    
2247 {                                                
2248         if (!nonblock) {                         
2249                 mutex_lock_nested(mutex, dept    
2250                 return 0;                        
2251         }                                        
2252         if (mutex_trylock(mutex))                
2253                 return 0;                        
2254         return -EAGAIN;                          
2255 }                                                
2256                                                  
2257 int do_epoll_ctl(int epfd, int op, int fd, st    
2258                  bool nonblock)                  
2259 {                                                
2260         int error;                               
2261         int full_check = 0;                      
2262         struct fd f, tf;                         
2263         struct eventpoll *ep;                    
2264         struct epitem *epi;                      
2265         struct eventpoll *tep = NULL;            
2266                                                  
2267         error = -EBADF;                          
2268         f = fdget(epfd);                         
2269         if (!f.file)                             
2270                 goto error_return;               
2271                                                  
2272         /* Get the "struct file *" for the ta    
2273         tf = fdget(fd);                          
2274         if (!tf.file)                            
2275                 goto error_fput;                 
2276                                                  
2277         /* The target file descriptor must su    
2278         error = -EPERM;                          
2279         if (!file_can_poll(tf.file))             
2280                 goto error_tgt_fput;             
2281                                                  
2282         /* Check if EPOLLWAKEUP is allowed */    
2283         if (ep_op_has_event(op))                 
2284                 ep_take_care_of_epollwakeup(e    
2285                                                  
2286         /*                                       
2287          * We have to check that the file str    
2288          * the user passed to us _is_ an even    
2289          * adding an epoll file descriptor in    
2290          */                                      
2291         error = -EINVAL;                         
2292         if (f.file == tf.file || !is_file_epo    
2293                 goto error_tgt_fput;             
2294                                                  
2295         /*                                       
2296          * epoll adds to the wakeup queue at     
2297          * so EPOLLEXCLUSIVE is not allowed f    
2298          * Also, we do not currently supporte    
2299          */                                      
2300         if (ep_op_has_event(op) && (epds->eve    
2301                 if (op == EPOLL_CTL_MOD)         
2302                         goto error_tgt_fput;     
2303                 if (op == EPOLL_CTL_ADD && (i    
2304                                 (epds->events    
2305                         goto error_tgt_fput;     
2306         }                                        
2307                                                  
2308         /*                                       
2309          * At this point it is safe to assume    
2310          * our own data structure.               
2311          */                                      
2312         ep = f.file->private_data;               
2313                                                  
2314         /*                                       
2315          * When we insert an epoll file descr    
2316          * descriptor, there is the chance of    
2317          * better be handled here, than in mo    
2318          * checking for loops we also determi    
2319          * and hang them on the tfile_check_l    
2320          * haven't created too many possible     
2321          *                                       
2322          * We do not need to take the global     
2323          * the epoll file descriptor is attac    
2324          * unless the epoll file descriptor i    
2325          * 'epnested_mutex' on add is to prev    
2326          * deep wakeup paths from forming in     
2327          * EPOLL_CTL_ADD operations.             
2328          */                                      
2329         error = epoll_mutex_lock(&ep->mtx, 0,    
2330         if (error)                               
2331                 goto error_tgt_fput;             
2332         if (op == EPOLL_CTL_ADD) {               
2333                 if (READ_ONCE(f.file->f_ep) |    
2334                     is_file_epoll(tf.file)) {    
2335                         mutex_unlock(&ep->mtx    
2336                         error = epoll_mutex_l    
2337                         if (error)               
2338                                 goto error_tg    
2339                         loop_check_gen++;        
2340                         full_check = 1;          
2341                         if (is_file_epoll(tf.    
2342                                 tep = tf.file    
2343                                 error = -ELOO    
2344                                 if (ep_loop_c    
2345                                         goto     
2346                         }                        
2347                         error = epoll_mutex_l    
2348                         if (error)               
2349                                 goto error_tg    
2350                 }                                
2351         }                                        
2352                                                  
2353         /*                                       
2354          * Try to lookup the file inside our     
2355          * above, we can be sure to be able t    
2356          * ep_find() till we release the mute    
2357          */                                      
2358         epi = ep_find(ep, tf.file, fd);          
2359                                                  
2360         error = -EINVAL;                         
2361         switch (op) {                            
2362         case EPOLL_CTL_ADD:                      
2363                 if (!epi) {                      
2364                         epds->events |= EPOLL    
2365                         error = ep_insert(ep,    
2366                 } else                           
2367                         error = -EEXIST;         
2368                 break;                           
2369         case EPOLL_CTL_DEL:                      
2370                 if (epi) {                       
2371                         /*                       
2372                          * The eventpoll itse    
2373                          * can't go to zero h    
2374                          */                      
2375                         ep_remove_safe(ep, ep    
2376                         error = 0;               
2377                 } else {                         
2378                         error = -ENOENT;         
2379                 }                                
2380                 break;                           
2381         case EPOLL_CTL_MOD:                      
2382                 if (epi) {                       
2383                         if (!(epi->event.even    
2384                                 epds->events     
2385                                 error = ep_mo    
2386                         }                        
2387                 } else                           
2388                         error = -ENOENT;         
2389                 break;                           
2390         }                                        
2391         mutex_unlock(&ep->mtx);                  
2392                                                  
2393 error_tgt_fput:                                  
2394         if (full_check) {                        
2395                 clear_tfile_check_list();        
2396                 loop_check_gen++;                
2397                 mutex_unlock(&epnested_mutex)    
2398         }                                        
2399                                                  
2400         fdput(tf);                               
2401 error_fput:                                      
2402         fdput(f);                                
2403 error_return:                                    
2404                                                  
2405         return error;                            
2406 }                                                
2407                                                  
2408 /*                                               
2409  * The following function implements the cont    
2410  * the eventpoll file that enables the insert    
2411  * file descriptors inside the interest set.     
2412  */                                              
2413 SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op    
2414                 struct epoll_event __user *,     
2415 {                                                
2416         struct epoll_event epds;                 
2417                                                  
2418         if (ep_op_has_event(op) &&               
2419             copy_from_user(&epds, event, size    
2420                 return -EFAULT;                  
2421                                                  
2422         return do_epoll_ctl(epfd, op, fd, &ep    
2423 }                                                
2424                                                  
2425 /*                                               
2426  * Implement the event wait interface for the    
2427  * part of the user space epoll_wait(2).         
2428  */                                              
2429 static int do_epoll_wait(int epfd, struct epo    
2430                          int maxevents, struc    
2431 {                                                
2432         int error;                               
2433         struct fd f;                             
2434         struct eventpoll *ep;                    
2435                                                  
2436         /* The maximum number of event must b    
2437         if (maxevents <= 0 || maxevents > EP_    
2438                 return -EINVAL;                  
2439                                                  
2440         /* Verify that the area passed by the    
2441         if (!access_ok(events, maxevents * si    
2442                 return -EFAULT;                  
2443                                                  
2444         /* Get the "struct file *" for the ev    
2445         f = fdget(epfd);                         
2446         if (!f.file)                             
2447                 return -EBADF;                   
2448                                                  
2449         /*                                       
2450          * We have to check that the file str    
2451          * the user passed to us _is_ an even    
2452          */                                      
2453         error = -EINVAL;                         
2454         if (!is_file_epoll(f.file))              
2455                 goto error_fput;                 
2456                                                  
2457         /*                                       
2458          * At this point it is safe to assume    
2459          * our own data structure.               
2460          */                                      
2461         ep = f.file->private_data;               
2462                                                  
2463         /* Time to fish for events ... */        
2464         error = ep_poll(ep, events, maxevents    
2465                                                  
2466 error_fput:                                      
2467         fdput(f);                                
2468         return error;                            
2469 }                                                
2470                                                  
2471 SYSCALL_DEFINE4(epoll_wait, int, epfd, struct    
2472                 int, maxevents, int, timeout)    
2473 {                                                
2474         struct timespec64 to;                    
2475                                                  
2476         return do_epoll_wait(epfd, events, ma    
2477                              ep_timeout_to_ti    
2478 }                                                
2479                                                  
2480 /*                                               
2481  * Implement the event wait interface for the    
2482  * part of the user space epoll_pwait(2).        
2483  */                                              
2484 static int do_epoll_pwait(int epfd, struct ep    
2485                           int maxevents, stru    
2486                           const sigset_t __us    
2487 {                                                
2488         int error;                               
2489                                                  
2490         /*                                       
2491          * If the caller wants a certain sign    
2492          * we apply it here.                     
2493          */                                      
2494         error = set_user_sigmask(sigmask, sig    
2495         if (error)                               
2496                 return error;                    
2497                                                  
2498         error = do_epoll_wait(epfd, events, m    
2499                                                  
2500         restore_saved_sigmask_unless(error ==    
2501                                                  
2502         return error;                            
2503 }                                                
2504                                                  
2505 SYSCALL_DEFINE6(epoll_pwait, int, epfd, struc    
2506                 int, maxevents, int, timeout,    
2507                 size_t, sigsetsize)              
2508 {                                                
2509         struct timespec64 to;                    
2510                                                  
2511         return do_epoll_pwait(epfd, events, m    
2512                               ep_timeout_to_t    
2513                               sigmask, sigset    
2514 }                                                
2515                                                  
2516 SYSCALL_DEFINE6(epoll_pwait2, int, epfd, stru    
2517                 int, maxevents, const struct     
2518                 const sigset_t __user *, sigm    
2519 {                                                
2520         struct timespec64 ts, *to = NULL;        
2521                                                  
2522         if (timeout) {                           
2523                 if (get_timespec64(&ts, timeo    
2524                         return -EFAULT;          
2525                 to = &ts;                        
2526                 if (poll_select_set_timeout(t    
2527                         return -EINVAL;          
2528         }                                        
2529                                                  
2530         return do_epoll_pwait(epfd, events, m    
2531                               sigmask, sigset    
2532 }                                                
2533                                                  
2534 #ifdef CONFIG_COMPAT                             
2535 static int do_compat_epoll_pwait(int epfd, st    
2536                                  int maxevent    
2537                                  const compat    
2538                                  compat_size_    
2539 {                                                
2540         long err;                                
2541                                                  
2542         /*                                       
2543          * If the caller wants a certain sign    
2544          * we apply it here.                     
2545          */                                      
2546         err = set_compat_user_sigmask(sigmask    
2547         if (err)                                 
2548                 return err;                      
2549                                                  
2550         err = do_epoll_wait(epfd, events, max    
2551                                                  
2552         restore_saved_sigmask_unless(err == -    
2553                                                  
2554         return err;                              
2555 }                                                
2556                                                  
2557 COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd    
2558                        struct epoll_event __u    
2559                        int, maxevents, int, t    
2560                        const compat_sigset_t     
2561                        compat_size_t, sigsets    
2562 {                                                
2563         struct timespec64 to;                    
2564                                                  
2565         return do_compat_epoll_pwait(epfd, ev    
2566                                      ep_timeo    
2567                                      sigmask,    
2568 }                                                
2569                                                  
2570 COMPAT_SYSCALL_DEFINE6(epoll_pwait2, int, epf    
2571                        struct epoll_event __u    
2572                        int, maxevents,           
2573                        const struct __kernel_    
2574                        const compat_sigset_t     
2575                        compat_size_t, sigsets    
2576 {                                                
2577         struct timespec64 ts, *to = NULL;        
2578                                                  
2579         if (timeout) {                           
2580                 if (get_timespec64(&ts, timeo    
2581                         return -EFAULT;          
2582                 to = &ts;                        
2583                 if (poll_select_set_timeout(t    
2584                         return -EINVAL;          
2585         }                                        
2586                                                  
2587         return do_compat_epoll_pwait(epfd, ev    
2588                                      sigmask,    
2589 }                                                
2590                                                  
2591 #endif                                           
2592                                                  
2593 static int __init eventpoll_init(void)           
2594 {                                                
2595         struct sysinfo si;                       
2596                                                  
2597         si_meminfo(&si);                         
2598         /*                                       
2599          * Allows top 4% of lomem to be alloc    
2600          */                                      
2601         max_user_watches = (((si.totalram - s    
2602                 EP_ITEM_COST;                    
2603         BUG_ON(max_user_watches < 0);            
2604                                                  
2605         /*                                       
2606          * We can have many thousands of epit    
2607          * using an extra cache line on 64-bi    
2608          */                                      
2609         BUILD_BUG_ON(sizeof(void *) <= 8 && s    
2610                                                  
2611         /* Allocates slab cache used to alloc    
2612         epi_cache = kmem_cache_create("eventp    
2613                         0, SLAB_HWCACHE_ALIGN    
2614                                                  
2615         /* Allocates slab cache used to alloc    
2616         pwq_cache = kmem_cache_create("eventp    
2617                 sizeof(struct eppoll_entry),     
2618         epoll_sysctls_init();                    
2619                                                  
2620         ephead_cache = kmem_cache_create("ep_    
2621                 sizeof(struct epitems_head),     
2622                                                  
2623         return 0;                                
2624 }                                                
2625 fs_initcall(eventpoll_init);                     
2626                                                  

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php