~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/irq_work.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/irq_work.c (Version linux-6.11.5) and /kernel/irq_work.c (Version ccs-tools-1.8.9)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 
  2 /*                                                
  3  * Copyright (C) 2010 Red Hat, Inc., Peter Zij    
  4  *                                                
  5  * Provides a framework for enqueueing and run    
  6  * context. The enqueueing is NMI-safe.           
  7  */                                               
  8                                                   
  9 #include <linux/bug.h>                            
 10 #include <linux/kernel.h>                         
 11 #include <linux/export.h>                         
 12 #include <linux/irq_work.h>                       
 13 #include <linux/percpu.h>                         
 14 #include <linux/hardirq.h>                        
 15 #include <linux/irqflags.h>                       
 16 #include <linux/sched.h>                          
 17 #include <linux/tick.h>                           
 18 #include <linux/cpu.h>                            
 19 #include <linux/notifier.h>                       
 20 #include <linux/smp.h>                            
 21 #include <linux/smpboot.h>                        
 22 #include <asm/processor.h>                        
 23 #include <linux/kasan.h>                          
 24                                                   
 25 #include <trace/events/ipi.h>                     
 26                                                   
 27 static DEFINE_PER_CPU(struct llist_head, raise    
 28 static DEFINE_PER_CPU(struct llist_head, lazy_    
 29 static DEFINE_PER_CPU(struct task_struct *, ir    
 30                                                   
 31 static void wake_irq_workd(void)                  
 32 {                                                 
 33         struct task_struct *tsk = __this_cpu_r    
 34                                                   
 35         if (!llist_empty(this_cpu_ptr(&lazy_li    
 36                 wake_up_process(tsk);             
 37 }                                                 
 38                                                   
 39 #ifdef CONFIG_SMP                                 
 40 static void irq_work_wake(struct irq_work *ent    
 41 {                                                 
 42         wake_irq_workd();                         
 43 }                                                 
 44                                                   
 45 static DEFINE_PER_CPU(struct irq_work, irq_wor    
 46         IRQ_WORK_INIT_HARD(irq_work_wake);        
 47 #endif                                            
 48                                                   
 49 static int irq_workd_should_run(unsigned int c    
 50 {                                                 
 51         return !llist_empty(this_cpu_ptr(&lazy    
 52 }                                                 
 53                                                   
 54 /*                                                
 55  * Claim the entry so that no one else will po    
 56  */                                               
 57 static bool irq_work_claim(struct irq_work *wo    
 58 {                                                 
 59         int oflags;                               
 60                                                   
 61         oflags = atomic_fetch_or(IRQ_WORK_CLAI    
 62         /*                                        
 63          * If the work is already pending, no     
 64          * The pairing smp_mb() in irq_work_si    
 65          * everything we did before is visible    
 66          */                                       
 67         if (oflags & IRQ_WORK_PENDING)            
 68                 return false;                     
 69         return true;                              
 70 }                                                 
 71                                                   
 72 void __weak arch_irq_work_raise(void)             
 73 {                                                 
 74         /*                                        
 75          * Lame architectures will get the tim    
 76          */                                       
 77 }                                                 
 78                                                   
 79 static __always_inline void irq_work_raise(str    
 80 {                                                 
 81         if (trace_ipi_send_cpu_enabled() && ar    
 82                 trace_ipi_send_cpu(smp_process    
 83                                                   
 84         arch_irq_work_raise();                    
 85 }                                                 
 86                                                   
 87 /* Enqueue on current CPU, work must already b    
 88 static void __irq_work_queue_local(struct irq_    
 89 {                                                 
 90         struct llist_head *list;                  
 91         bool rt_lazy_work = false;                
 92         bool lazy_work = false;                   
 93         int work_flags;                           
 94                                                   
 95         work_flags = atomic_read(&work->node.a    
 96         if (work_flags & IRQ_WORK_LAZY)           
 97                 lazy_work = true;                 
 98         else if (IS_ENABLED(CONFIG_PREEMPT_RT)    
 99                  !(work_flags & IRQ_WORK_HARD_    
100                 rt_lazy_work = true;              
101                                                   
102         if (lazy_work || rt_lazy_work)            
103                 list = this_cpu_ptr(&lazy_list    
104         else                                      
105                 list = this_cpu_ptr(&raised_li    
106                                                   
107         if (!llist_add(&work->node.llist, list    
108                 return;                           
109                                                   
110         /* If the work is "lazy", handle it fr    
111         if (!lazy_work || tick_nohz_tick_stopp    
112                 irq_work_raise(work);             
113 }                                                 
114                                                   
115 /* Enqueue the irq work @work on the current C    
116 bool irq_work_queue(struct irq_work *work)        
117 {                                                 
118         /* Only queue if not already pending *    
119         if (!irq_work_claim(work))                
120                 return false;                     
121                                                   
122         /* Queue the entry and raise the IPI i    
123         preempt_disable();                        
124         __irq_work_queue_local(work);             
125         preempt_enable();                         
126                                                   
127         return true;                              
128 }                                                 
129 EXPORT_SYMBOL_GPL(irq_work_queue);                
130                                                   
131 /*                                                
132  * Enqueue the irq_work @work on @cpu unless i    
133  * somewhere.                                     
134  *                                                
135  * Can be re-enqueued while the callback is st    
136  */                                               
137 bool irq_work_queue_on(struct irq_work *work,     
138 {                                                 
139 #ifndef CONFIG_SMP                                
140         return irq_work_queue(work);              
141                                                   
142 #else /* CONFIG_SMP: */                           
143         /* All work should have been flushed b    
144         WARN_ON_ONCE(cpu_is_offline(cpu));        
145                                                   
146         /* Only queue if not already pending *    
147         if (!irq_work_claim(work))                
148                 return false;                     
149                                                   
150         kasan_record_aux_stack_noalloc(work);     
151                                                   
152         preempt_disable();                        
153         if (cpu != smp_processor_id()) {          
154                 /* Arch remote IPI send/receiv    
155                 WARN_ON_ONCE(in_nmi());           
156                                                   
157                 /*                                
158                  * On PREEMPT_RT the items whi    
159                  * IRQ_WORK_HARD_IRQ are added    
160                  * item is used on the remote     
161                  */                               
162                 if (IS_ENABLED(CONFIG_PREEMPT_    
163                     !(atomic_read(&work->node.    
164                                                   
165                         if (!llist_add(&work->    
166                                 goto out;         
167                                                   
168                         work = &per_cpu(irq_wo    
169                         if (!irq_work_claim(wo    
170                                 goto out;         
171                 }                                 
172                                                   
173                 __smp_call_single_queue(cpu, &    
174         } else {                                  
175                 __irq_work_queue_local(work);     
176         }                                         
177 out:                                              
178         preempt_enable();                         
179                                                   
180         return true;                              
181 #endif /* CONFIG_SMP */                           
182 }                                                 
183                                                   
184 bool irq_work_needs_cpu(void)                     
185 {                                                 
186         struct llist_head *raised, *lazy;         
187                                                   
188         raised = this_cpu_ptr(&raised_list);      
189         lazy = this_cpu_ptr(&lazy_list);          
190                                                   
191         if (llist_empty(raised) || arch_irq_wo    
192                 if (llist_empty(lazy))            
193                         return false;             
194                                                   
195         /* All work should have been flushed b    
196         WARN_ON_ONCE(cpu_is_offline(smp_proces    
197                                                   
198         return true;                              
199 }                                                 
200                                                   
201 void irq_work_single(void *arg)                   
202 {                                                 
203         struct irq_work *work = arg;              
204         int flags;                                
205                                                   
206         /*                                        
207          * Clear the PENDING bit, after this p    
208          * The PENDING bit acts as a lock, and    
209          * without atomic ops.                    
210          */                                       
211         flags = atomic_read(&work->node.a_flag    
212         flags &= ~IRQ_WORK_PENDING;               
213         atomic_set(&work->node.a_flags, flags)    
214                                                   
215         /*                                        
216          * See irq_work_claim().                  
217          */                                       
218         smp_mb();                                 
219                                                   
220         lockdep_irq_work_enter(flags);            
221         work->func(work);                         
222         lockdep_irq_work_exit(flags);             
223                                                   
224         /*                                        
225          * Clear the BUSY bit, if set, and ret    
226          * else claimed it meanwhile.             
227          */                                       
228         (void)atomic_cmpxchg(&work->node.a_fla    
229                                                   
230         if ((IS_ENABLED(CONFIG_PREEMPT_RT) &&     
231             !arch_irq_work_has_interrupt())       
232                 rcuwait_wake_up(&work->irqwait    
233 }                                                 
234                                                   
235 static void irq_work_run_list(struct llist_hea    
236 {                                                 
237         struct irq_work *work, *tmp;              
238         struct llist_node *llnode;                
239                                                   
240         /*                                        
241          * On PREEMPT_RT IRQ-work which is not    
242          * in a per-CPU thread in preemptible     
243          * marked as IRQ_WORK_HARD_IRQ will be    
244          */                                       
245         BUG_ON(!irqs_disabled() && !IS_ENABLED    
246                                                   
247         if (llist_empty(list))                    
248                 return;                           
249                                                   
250         llnode = llist_del_all(list);             
251         llist_for_each_entry_safe(work, tmp, l    
252                 irq_work_single(work);            
253 }                                                 
254                                                   
255 /*                                                
256  * hotplug calls this through:                    
257  *  hotplug_cfd() -> flush_smp_call_function_q    
258  */                                               
259 void irq_work_run(void)                           
260 {                                                 
261         irq_work_run_list(this_cpu_ptr(&raised    
262         if (!IS_ENABLED(CONFIG_PREEMPT_RT))       
263                 irq_work_run_list(this_cpu_ptr    
264         else                                      
265                 wake_irq_workd();                 
266 }                                                 
267 EXPORT_SYMBOL_GPL(irq_work_run);                  
268                                                   
269 void irq_work_tick(void)                          
270 {                                                 
271         struct llist_head *raised = this_cpu_p    
272                                                   
273         if (!llist_empty(raised) && !arch_irq_    
274                 irq_work_run_list(raised);        
275                                                   
276         if (!IS_ENABLED(CONFIG_PREEMPT_RT))       
277                 irq_work_run_list(this_cpu_ptr    
278         else                                      
279                 wake_irq_workd();                 
280 }                                                 
281                                                   
282 /*                                                
283  * Synchronize against the irq_work @entry, en    
284  * currently in use.                              
285  */                                               
286 void irq_work_sync(struct irq_work *work)         
287 {                                                 
288         lockdep_assert_irqs_enabled();            
289         might_sleep();                            
290                                                   
291         if ((IS_ENABLED(CONFIG_PREEMPT_RT) &&     
292             !arch_irq_work_has_interrupt()) {     
293                 rcuwait_wait_event(&work->irqw    
294                                    TASK_UNINTE    
295                 return;                           
296         }                                         
297                                                   
298         while (irq_work_is_busy(work))            
299                 cpu_relax();                      
300 }                                                 
301 EXPORT_SYMBOL_GPL(irq_work_sync);                 
302                                                   
303 static void run_irq_workd(unsigned int cpu)       
304 {                                                 
305         irq_work_run_list(this_cpu_ptr(&lazy_l    
306 }                                                 
307                                                   
308 static void irq_workd_setup(unsigned int cpu)     
309 {                                                 
310         sched_set_fifo_low(current);              
311 }                                                 
312                                                   
313 static struct smp_hotplug_thread irqwork_threa    
314         .store                  = &irq_workd,     
315         .setup                  = irq_workd_se    
316         .thread_should_run      = irq_workd_sh    
317         .thread_fn              = run_irq_work    
318         .thread_comm            = "irq_work/%u    
319 };                                                
320                                                   
321 static __init int irq_work_init_threads(void)     
322 {                                                 
323         if (IS_ENABLED(CONFIG_PREEMPT_RT))        
324                 BUG_ON(smpboot_register_percpu    
325         return 0;                                 
326 }                                                 
327 early_initcall(irq_work_init_threads);            
328                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php