~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/irq_work.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/irq_work.c (Version linux-6.11.5) and /kernel/irq_work.c (Version linux-5.3.18)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*                                                  2 /*
  3  * Copyright (C) 2010 Red Hat, Inc., Peter Zij      3  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
  4  *                                                  4  *
  5  * Provides a framework for enqueueing and run      5  * Provides a framework for enqueueing and running callbacks from hardirq
  6  * context. The enqueueing is NMI-safe.             6  * context. The enqueueing is NMI-safe.
  7  */                                                 7  */
  8                                                     8 
  9 #include <linux/bug.h>                              9 #include <linux/bug.h>
 10 #include <linux/kernel.h>                          10 #include <linux/kernel.h>
 11 #include <linux/export.h>                          11 #include <linux/export.h>
 12 #include <linux/irq_work.h>                        12 #include <linux/irq_work.h>
 13 #include <linux/percpu.h>                          13 #include <linux/percpu.h>
 14 #include <linux/hardirq.h>                         14 #include <linux/hardirq.h>
 15 #include <linux/irqflags.h>                        15 #include <linux/irqflags.h>
 16 #include <linux/sched.h>                           16 #include <linux/sched.h>
 17 #include <linux/tick.h>                            17 #include <linux/tick.h>
 18 #include <linux/cpu.h>                             18 #include <linux/cpu.h>
 19 #include <linux/notifier.h>                        19 #include <linux/notifier.h>
 20 #include <linux/smp.h>                             20 #include <linux/smp.h>
 21 #include <linux/smpboot.h>                     << 
 22 #include <asm/processor.h>                         21 #include <asm/processor.h>
 23 #include <linux/kasan.h>                       << 
 24                                                    22 
 25 #include <trace/events/ipi.h>                  << 
 26                                                    23 
 27 static DEFINE_PER_CPU(struct llist_head, raise     24 static DEFINE_PER_CPU(struct llist_head, raised_list);
 28 static DEFINE_PER_CPU(struct llist_head, lazy_     25 static DEFINE_PER_CPU(struct llist_head, lazy_list);
 29 static DEFINE_PER_CPU(struct task_struct *, ir << 
 30                                                << 
 31 static void wake_irq_workd(void)               << 
 32 {                                              << 
 33         struct task_struct *tsk = __this_cpu_r << 
 34                                                << 
 35         if (!llist_empty(this_cpu_ptr(&lazy_li << 
 36                 wake_up_process(tsk);          << 
 37 }                                              << 
 38                                                << 
 39 #ifdef CONFIG_SMP                              << 
 40 static void irq_work_wake(struct irq_work *ent << 
 41 {                                              << 
 42         wake_irq_workd();                      << 
 43 }                                              << 
 44                                                << 
 45 static DEFINE_PER_CPU(struct irq_work, irq_wor << 
 46         IRQ_WORK_INIT_HARD(irq_work_wake);     << 
 47 #endif                                         << 
 48                                                << 
 49 static int irq_workd_should_run(unsigned int c << 
 50 {                                              << 
 51         return !llist_empty(this_cpu_ptr(&lazy << 
 52 }                                              << 
 53                                                    26 
 54 /*                                                 27 /*
 55  * Claim the entry so that no one else will po     28  * Claim the entry so that no one else will poke at it.
 56  */                                                29  */
 57 static bool irq_work_claim(struct irq_work *wo     30 static bool irq_work_claim(struct irq_work *work)
 58 {                                                  31 {
 59         int oflags;                            !!  32         unsigned long flags, oflags, nflags;
 60                                                    33 
 61         oflags = atomic_fetch_or(IRQ_WORK_CLAI << 
 62         /*                                         34         /*
 63          * If the work is already pending, no  !!  35          * Start with our best wish as a premise but only trust any
 64          * The pairing smp_mb() in irq_work_si !!  36          * flag value after cmpxchg() result.
 65          * everything we did before is visible << 
 66          */                                        37          */
 67         if (oflags & IRQ_WORK_PENDING)         !!  38         flags = work->flags & ~IRQ_WORK_PENDING;
 68                 return false;                  !!  39         for (;;) {
                                                   >>  40                 nflags = flags | IRQ_WORK_CLAIMED;
                                                   >>  41                 oflags = cmpxchg(&work->flags, flags, nflags);
                                                   >>  42                 if (oflags == flags)
                                                   >>  43                         break;
                                                   >>  44                 if (oflags & IRQ_WORK_PENDING)
                                                   >>  45                         return false;
                                                   >>  46                 flags = oflags;
                                                   >>  47                 cpu_relax();
                                                   >>  48         }
                                                   >>  49 
 69         return true;                               50         return true;
 70 }                                                  51 }
 71                                                    52 
 72 void __weak arch_irq_work_raise(void)              53 void __weak arch_irq_work_raise(void)
 73 {                                                  54 {
 74         /*                                         55         /*
 75          * Lame architectures will get the tim     56          * Lame architectures will get the timer tick callback
 76          */                                        57          */
 77 }                                                  58 }
 78                                                    59 
 79 static __always_inline void irq_work_raise(str << 
 80 {                                              << 
 81         if (trace_ipi_send_cpu_enabled() && ar << 
 82                 trace_ipi_send_cpu(smp_process << 
 83                                                << 
 84         arch_irq_work_raise();                 << 
 85 }                                              << 
 86                                                << 
 87 /* Enqueue on current CPU, work must already b     60 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
 88 static void __irq_work_queue_local(struct irq_     61 static void __irq_work_queue_local(struct irq_work *work)
 89 {                                                  62 {
 90         struct llist_head *list;               << 
 91         bool rt_lazy_work = false;             << 
 92         bool lazy_work = false;                << 
 93         int work_flags;                        << 
 94                                                << 
 95         work_flags = atomic_read(&work->node.a << 
 96         if (work_flags & IRQ_WORK_LAZY)        << 
 97                 lazy_work = true;              << 
 98         else if (IS_ENABLED(CONFIG_PREEMPT_RT) << 
 99                  !(work_flags & IRQ_WORK_HARD_ << 
100                 rt_lazy_work = true;           << 
101                                                << 
102         if (lazy_work || rt_lazy_work)         << 
103                 list = this_cpu_ptr(&lazy_list << 
104         else                                   << 
105                 list = this_cpu_ptr(&raised_li << 
106                                                << 
107         if (!llist_add(&work->node.llist, list << 
108                 return;                        << 
109                                                << 
110         /* If the work is "lazy", handle it fr     63         /* If the work is "lazy", handle it from next tick if any */
111         if (!lazy_work || tick_nohz_tick_stopp !!  64         if (work->flags & IRQ_WORK_LAZY) {
112                 irq_work_raise(work);          !!  65                 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
                                                   >>  66                     tick_nohz_tick_stopped())
                                                   >>  67                         arch_irq_work_raise();
                                                   >>  68         } else {
                                                   >>  69                 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
                                                   >>  70                         arch_irq_work_raise();
                                                   >>  71         }
113 }                                                  72 }
114                                                    73 
115 /* Enqueue the irq work @work on the current C     74 /* Enqueue the irq work @work on the current CPU */
116 bool irq_work_queue(struct irq_work *work)         75 bool irq_work_queue(struct irq_work *work)
117 {                                                  76 {
118         /* Only queue if not already pending *     77         /* Only queue if not already pending */
119         if (!irq_work_claim(work))                 78         if (!irq_work_claim(work))
120                 return false;                      79                 return false;
121                                                    80 
122         /* Queue the entry and raise the IPI i     81         /* Queue the entry and raise the IPI if needed. */
123         preempt_disable();                         82         preempt_disable();
124         __irq_work_queue_local(work);              83         __irq_work_queue_local(work);
125         preempt_enable();                          84         preempt_enable();
126                                                    85 
127         return true;                               86         return true;
128 }                                                  87 }
129 EXPORT_SYMBOL_GPL(irq_work_queue);                 88 EXPORT_SYMBOL_GPL(irq_work_queue);
130                                                    89 
131 /*                                                 90 /*
132  * Enqueue the irq_work @work on @cpu unless i     91  * Enqueue the irq_work @work on @cpu unless it's already pending
133  * somewhere.                                      92  * somewhere.
134  *                                                 93  *
135  * Can be re-enqueued while the callback is st     94  * Can be re-enqueued while the callback is still in progress.
136  */                                                95  */
137 bool irq_work_queue_on(struct irq_work *work,      96 bool irq_work_queue_on(struct irq_work *work, int cpu)
138 {                                                  97 {
139 #ifndef CONFIG_SMP                                 98 #ifndef CONFIG_SMP
140         return irq_work_queue(work);               99         return irq_work_queue(work);
141                                                   100 
142 #else /* CONFIG_SMP: */                           101 #else /* CONFIG_SMP: */
143         /* All work should have been flushed b    102         /* All work should have been flushed before going offline */
144         WARN_ON_ONCE(cpu_is_offline(cpu));        103         WARN_ON_ONCE(cpu_is_offline(cpu));
145                                                   104 
146         /* Only queue if not already pending *    105         /* Only queue if not already pending */
147         if (!irq_work_claim(work))                106         if (!irq_work_claim(work))
148                 return false;                     107                 return false;
149                                                   108 
150         kasan_record_aux_stack_noalloc(work);  << 
151                                                << 
152         preempt_disable();                        109         preempt_disable();
153         if (cpu != smp_processor_id()) {          110         if (cpu != smp_processor_id()) {
154                 /* Arch remote IPI send/receiv    111                 /* Arch remote IPI send/receive backend aren't NMI safe */
155                 WARN_ON_ONCE(in_nmi());           112                 WARN_ON_ONCE(in_nmi());
156                                                !! 113                 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
157                 /*                             !! 114                         arch_send_call_function_single_ipi(cpu);
158                  * On PREEMPT_RT the items whi << 
159                  * IRQ_WORK_HARD_IRQ are added << 
160                  * item is used on the remote  << 
161                  */                            << 
162                 if (IS_ENABLED(CONFIG_PREEMPT_ << 
163                     !(atomic_read(&work->node. << 
164                                                << 
165                         if (!llist_add(&work-> << 
166                                 goto out;      << 
167                                                << 
168                         work = &per_cpu(irq_wo << 
169                         if (!irq_work_claim(wo << 
170                                 goto out;      << 
171                 }                              << 
172                                                << 
173                 __smp_call_single_queue(cpu, & << 
174         } else {                                  115         } else {
175                 __irq_work_queue_local(work);     116                 __irq_work_queue_local(work);
176         }                                         117         }
177 out:                                           << 
178         preempt_enable();                         118         preempt_enable();
179                                                   119 
180         return true;                              120         return true;
181 #endif /* CONFIG_SMP */                           121 #endif /* CONFIG_SMP */
182 }                                                 122 }
183                                                   123 
                                                   >> 124 
184 bool irq_work_needs_cpu(void)                     125 bool irq_work_needs_cpu(void)
185 {                                                 126 {
186         struct llist_head *raised, *lazy;         127         struct llist_head *raised, *lazy;
187                                                   128 
188         raised = this_cpu_ptr(&raised_list);      129         raised = this_cpu_ptr(&raised_list);
189         lazy = this_cpu_ptr(&lazy_list);          130         lazy = this_cpu_ptr(&lazy_list);
190                                                   131 
191         if (llist_empty(raised) || arch_irq_wo    132         if (llist_empty(raised) || arch_irq_work_has_interrupt())
192                 if (llist_empty(lazy))            133                 if (llist_empty(lazy))
193                         return false;             134                         return false;
194                                                   135 
195         /* All work should have been flushed b    136         /* All work should have been flushed before going offline */
196         WARN_ON_ONCE(cpu_is_offline(smp_proces    137         WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
197                                                   138 
198         return true;                              139         return true;
199 }                                                 140 }
200                                                   141 
201 void irq_work_single(void *arg)                << 
202 {                                              << 
203         struct irq_work *work = arg;           << 
204         int flags;                             << 
205                                                << 
206         /*                                     << 
207          * Clear the PENDING bit, after this p << 
208          * The PENDING bit acts as a lock, and << 
209          * without atomic ops.                 << 
210          */                                    << 
211         flags = atomic_read(&work->node.a_flag << 
212         flags &= ~IRQ_WORK_PENDING;            << 
213         atomic_set(&work->node.a_flags, flags) << 
214                                                << 
215         /*                                     << 
216          * See irq_work_claim().               << 
217          */                                    << 
218         smp_mb();                              << 
219                                                << 
220         lockdep_irq_work_enter(flags);         << 
221         work->func(work);                      << 
222         lockdep_irq_work_exit(flags);          << 
223                                                << 
224         /*                                     << 
225          * Clear the BUSY bit, if set, and ret << 
226          * else claimed it meanwhile.          << 
227          */                                    << 
228         (void)atomic_cmpxchg(&work->node.a_fla << 
229                                                << 
230         if ((IS_ENABLED(CONFIG_PREEMPT_RT) &&  << 
231             !arch_irq_work_has_interrupt())    << 
232                 rcuwait_wake_up(&work->irqwait << 
233 }                                              << 
234                                                << 
235 static void irq_work_run_list(struct llist_hea    142 static void irq_work_run_list(struct llist_head *list)
236 {                                                 143 {
237         struct irq_work *work, *tmp;              144         struct irq_work *work, *tmp;
238         struct llist_node *llnode;                145         struct llist_node *llnode;
                                                   >> 146         unsigned long flags;
239                                                   147 
240         /*                                     !! 148         BUG_ON(!irqs_disabled());
241          * On PREEMPT_RT IRQ-work which is not << 
242          * in a per-CPU thread in preemptible  << 
243          * marked as IRQ_WORK_HARD_IRQ will be << 
244          */                                    << 
245         BUG_ON(!irqs_disabled() && !IS_ENABLED << 
246                                                   149 
247         if (llist_empty(list))                    150         if (llist_empty(list))
248                 return;                           151                 return;
249                                                   152 
250         llnode = llist_del_all(list);             153         llnode = llist_del_all(list);
251         llist_for_each_entry_safe(work, tmp, l !! 154         llist_for_each_entry_safe(work, tmp, llnode, llnode) {
252                 irq_work_single(work);         !! 155                 /*
                                                   >> 156                  * Clear the PENDING bit, after this point the @work
                                                   >> 157                  * can be re-used.
                                                   >> 158                  * Make it immediately visible so that other CPUs trying
                                                   >> 159                  * to claim that work don't rely on us to handle their data
                                                   >> 160                  * while we are in the middle of the func.
                                                   >> 161                  */
                                                   >> 162                 flags = work->flags & ~IRQ_WORK_PENDING;
                                                   >> 163                 xchg(&work->flags, flags);
                                                   >> 164 
                                                   >> 165                 work->func(work);
                                                   >> 166                 /*
                                                   >> 167                  * Clear the BUSY bit and return to the free state if
                                                   >> 168                  * no-one else claimed it meanwhile.
                                                   >> 169                  */
                                                   >> 170                 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
                                                   >> 171         }
253 }                                                 172 }
254                                                   173 
255 /*                                                174 /*
256  * hotplug calls this through:                    175  * hotplug calls this through:
257  *  hotplug_cfd() -> flush_smp_call_function_q    176  *  hotplug_cfd() -> flush_smp_call_function_queue()
258  */                                               177  */
259 void irq_work_run(void)                           178 void irq_work_run(void)
260 {                                                 179 {
261         irq_work_run_list(this_cpu_ptr(&raised    180         irq_work_run_list(this_cpu_ptr(&raised_list));
262         if (!IS_ENABLED(CONFIG_PREEMPT_RT))    !! 181         irq_work_run_list(this_cpu_ptr(&lazy_list));
263                 irq_work_run_list(this_cpu_ptr << 
264         else                                   << 
265                 wake_irq_workd();              << 
266 }                                                 182 }
267 EXPORT_SYMBOL_GPL(irq_work_run);                  183 EXPORT_SYMBOL_GPL(irq_work_run);
268                                                   184 
269 void irq_work_tick(void)                          185 void irq_work_tick(void)
270 {                                                 186 {
271         struct llist_head *raised = this_cpu_p    187         struct llist_head *raised = this_cpu_ptr(&raised_list);
272                                                   188 
273         if (!llist_empty(raised) && !arch_irq_    189         if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
274                 irq_work_run_list(raised);        190                 irq_work_run_list(raised);
275                                                !! 191         irq_work_run_list(this_cpu_ptr(&lazy_list));
276         if (!IS_ENABLED(CONFIG_PREEMPT_RT))    << 
277                 irq_work_run_list(this_cpu_ptr << 
278         else                                   << 
279                 wake_irq_workd();              << 
280 }                                                 192 }
281                                                   193 
282 /*                                                194 /*
283  * Synchronize against the irq_work @entry, en    195  * Synchronize against the irq_work @entry, ensures the entry is not
284  * currently in use.                              196  * currently in use.
285  */                                               197  */
286 void irq_work_sync(struct irq_work *work)         198 void irq_work_sync(struct irq_work *work)
287 {                                                 199 {
288         lockdep_assert_irqs_enabled();            200         lockdep_assert_irqs_enabled();
289         might_sleep();                         << 
290                                                   201 
291         if ((IS_ENABLED(CONFIG_PREEMPT_RT) &&  !! 202         while (work->flags & IRQ_WORK_BUSY)
292             !arch_irq_work_has_interrupt()) {  << 
293                 rcuwait_wait_event(&work->irqw << 
294                                    TASK_UNINTE << 
295                 return;                        << 
296         }                                      << 
297                                                << 
298         while (irq_work_is_busy(work))         << 
299                 cpu_relax();                      203                 cpu_relax();
300 }                                                 204 }
301 EXPORT_SYMBOL_GPL(irq_work_sync);                 205 EXPORT_SYMBOL_GPL(irq_work_sync);
302                                                << 
303 static void run_irq_workd(unsigned int cpu)    << 
304 {                                              << 
305         irq_work_run_list(this_cpu_ptr(&lazy_l << 
306 }                                              << 
307                                                << 
308 static void irq_workd_setup(unsigned int cpu)  << 
309 {                                              << 
310         sched_set_fifo_low(current);           << 
311 }                                              << 
312                                                << 
313 static struct smp_hotplug_thread irqwork_threa << 
314         .store                  = &irq_workd,  << 
315         .setup                  = irq_workd_se << 
316         .thread_should_run      = irq_workd_sh << 
317         .thread_fn              = run_irq_work << 
318         .thread_comm            = "irq_work/%u << 
319 };                                             << 
320                                                << 
321 static __init int irq_work_init_threads(void)  << 
322 {                                              << 
323         if (IS_ENABLED(CONFIG_PREEMPT_RT))     << 
324                 BUG_ON(smpboot_register_percpu << 
325         return 0;                              << 
326 }                                              << 
327 early_initcall(irq_work_init_threads);         << 
328                                                   206 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php