~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/irq_work.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/irq_work.c (Version linux-6.11.5) and /kernel/irq_work.c (Version linux-4.14.336)


  1 // SPDX-License-Identifier: GPL-2.0-only       << 
  2 /*                                                  1 /*
  3  * Copyright (C) 2010 Red Hat, Inc., Peter Zij      2  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
  4  *                                                  3  *
  5  * Provides a framework for enqueueing and run      4  * Provides a framework for enqueueing and running callbacks from hardirq
  6  * context. The enqueueing is NMI-safe.             5  * context. The enqueueing is NMI-safe.
  7  */                                                 6  */
  8                                                     7 
  9 #include <linux/bug.h>                              8 #include <linux/bug.h>
 10 #include <linux/kernel.h>                           9 #include <linux/kernel.h>
 11 #include <linux/export.h>                          10 #include <linux/export.h>
 12 #include <linux/irq_work.h>                        11 #include <linux/irq_work.h>
 13 #include <linux/percpu.h>                          12 #include <linux/percpu.h>
 14 #include <linux/hardirq.h>                         13 #include <linux/hardirq.h>
 15 #include <linux/irqflags.h>                        14 #include <linux/irqflags.h>
 16 #include <linux/sched.h>                           15 #include <linux/sched.h>
 17 #include <linux/tick.h>                            16 #include <linux/tick.h>
 18 #include <linux/cpu.h>                             17 #include <linux/cpu.h>
 19 #include <linux/notifier.h>                        18 #include <linux/notifier.h>
 20 #include <linux/smp.h>                             19 #include <linux/smp.h>
 21 #include <linux/smpboot.h>                     << 
 22 #include <asm/processor.h>                         20 #include <asm/processor.h>
 23 #include <linux/kasan.h>                       << 
 24                                                    21 
 25 #include <trace/events/ipi.h>                  << 
 26                                                    22 
 27 static DEFINE_PER_CPU(struct llist_head, raise     23 static DEFINE_PER_CPU(struct llist_head, raised_list);
 28 static DEFINE_PER_CPU(struct llist_head, lazy_     24 static DEFINE_PER_CPU(struct llist_head, lazy_list);
 29 static DEFINE_PER_CPU(struct task_struct *, ir << 
 30                                                << 
 31 static void wake_irq_workd(void)               << 
 32 {                                              << 
 33         struct task_struct *tsk = __this_cpu_r << 
 34                                                << 
 35         if (!llist_empty(this_cpu_ptr(&lazy_li << 
 36                 wake_up_process(tsk);          << 
 37 }                                              << 
 38                                                << 
 39 #ifdef CONFIG_SMP                              << 
 40 static void irq_work_wake(struct irq_work *ent << 
 41 {                                              << 
 42         wake_irq_workd();                      << 
 43 }                                              << 
 44                                                << 
 45 static DEFINE_PER_CPU(struct irq_work, irq_wor << 
 46         IRQ_WORK_INIT_HARD(irq_work_wake);     << 
 47 #endif                                         << 
 48                                                << 
 49 static int irq_workd_should_run(unsigned int c << 
 50 {                                              << 
 51         return !llist_empty(this_cpu_ptr(&lazy << 
 52 }                                              << 
 53                                                    25 
 54 /*                                                 26 /*
 55  * Claim the entry so that no one else will po     27  * Claim the entry so that no one else will poke at it.
 56  */                                                28  */
 57 static bool irq_work_claim(struct irq_work *wo     29 static bool irq_work_claim(struct irq_work *work)
 58 {                                                  30 {
 59         int oflags;                            !!  31         unsigned long flags, oflags, nflags;
 60                                                    32 
 61         oflags = atomic_fetch_or(IRQ_WORK_CLAI << 
 62         /*                                         33         /*
 63          * If the work is already pending, no  !!  34          * Start with our best wish as a premise but only trust any
 64          * The pairing smp_mb() in irq_work_si !!  35          * flag value after cmpxchg() result.
 65          * everything we did before is visible << 
 66          */                                        36          */
 67         if (oflags & IRQ_WORK_PENDING)         !!  37         flags = work->flags & ~IRQ_WORK_PENDING;
 68                 return false;                  !!  38         for (;;) {
                                                   >>  39                 nflags = flags | IRQ_WORK_FLAGS;
                                                   >>  40                 oflags = cmpxchg(&work->flags, flags, nflags);
                                                   >>  41                 if (oflags == flags)
                                                   >>  42                         break;
                                                   >>  43                 if (oflags & IRQ_WORK_PENDING)
                                                   >>  44                         return false;
                                                   >>  45                 flags = oflags;
                                                   >>  46                 cpu_relax();
                                                   >>  47         }
                                                   >>  48 
 69         return true;                               49         return true;
 70 }                                                  50 }
 71                                                    51 
 72 void __weak arch_irq_work_raise(void)              52 void __weak arch_irq_work_raise(void)
 73 {                                                  53 {
 74         /*                                         54         /*
 75          * Lame architectures will get the tim     55          * Lame architectures will get the timer tick callback
 76          */                                        56          */
 77 }                                                  57 }
 78                                                    58 
 79 static __always_inline void irq_work_raise(str !!  59 #ifdef CONFIG_SMP
 80 {                                              << 
 81         if (trace_ipi_send_cpu_enabled() && ar << 
 82                 trace_ipi_send_cpu(smp_process << 
 83                                                << 
 84         arch_irq_work_raise();                 << 
 85 }                                              << 
 86                                                << 
 87 /* Enqueue on current CPU, work must already b << 
 88 static void __irq_work_queue_local(struct irq_ << 
 89 {                                              << 
 90         struct llist_head *list;               << 
 91         bool rt_lazy_work = false;             << 
 92         bool lazy_work = false;                << 
 93         int work_flags;                        << 
 94                                                << 
 95         work_flags = atomic_read(&work->node.a << 
 96         if (work_flags & IRQ_WORK_LAZY)        << 
 97                 lazy_work = true;              << 
 98         else if (IS_ENABLED(CONFIG_PREEMPT_RT) << 
 99                  !(work_flags & IRQ_WORK_HARD_ << 
100                 rt_lazy_work = true;           << 
101                                                << 
102         if (lazy_work || rt_lazy_work)         << 
103                 list = this_cpu_ptr(&lazy_list << 
104         else                                   << 
105                 list = this_cpu_ptr(&raised_li << 
106                                                << 
107         if (!llist_add(&work->node.llist, list << 
108                 return;                        << 
109                                                << 
110         /* If the work is "lazy", handle it fr << 
111         if (!lazy_work || tick_nohz_tick_stopp << 
112                 irq_work_raise(work);          << 
113 }                                              << 
114                                                << 
115 /* Enqueue the irq work @work on the current C << 
116 bool irq_work_queue(struct irq_work *work)     << 
117 {                                              << 
118         /* Only queue if not already pending * << 
119         if (!irq_work_claim(work))             << 
120                 return false;                  << 
121                                                << 
122         /* Queue the entry and raise the IPI i << 
123         preempt_disable();                     << 
124         __irq_work_queue_local(work);          << 
125         preempt_enable();                      << 
126                                                << 
127         return true;                           << 
128 }                                              << 
129 EXPORT_SYMBOL_GPL(irq_work_queue);             << 
130                                                << 
131 /*                                                 60 /*
132  * Enqueue the irq_work @work on @cpu unless i     61  * Enqueue the irq_work @work on @cpu unless it's already pending
133  * somewhere.                                      62  * somewhere.
134  *                                                 63  *
135  * Can be re-enqueued while the callback is st     64  * Can be re-enqueued while the callback is still in progress.
136  */                                                65  */
137 bool irq_work_queue_on(struct irq_work *work,      66 bool irq_work_queue_on(struct irq_work *work, int cpu)
138 {                                                  67 {
139 #ifndef CONFIG_SMP                             << 
140         return irq_work_queue(work);           << 
141                                                << 
142 #else /* CONFIG_SMP: */                        << 
143         /* All work should have been flushed b     68         /* All work should have been flushed before going offline */
144         WARN_ON_ONCE(cpu_is_offline(cpu));         69         WARN_ON_ONCE(cpu_is_offline(cpu));
145                                                    70 
                                                   >>  71         /* Arch remote IPI send/receive backend aren't NMI safe */
                                                   >>  72         WARN_ON_ONCE(in_nmi());
                                                   >>  73 
146         /* Only queue if not already pending *     74         /* Only queue if not already pending */
147         if (!irq_work_claim(work))                 75         if (!irq_work_claim(work))
148                 return false;                      76                 return false;
149                                                    77 
150         kasan_record_aux_stack_noalloc(work);  !!  78         if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
                                                   >>  79                 arch_send_call_function_single_ipi(cpu);
151                                                    80 
152         preempt_disable();                     !!  81         return true;
153         if (cpu != smp_processor_id()) {       !!  82 }
154                 /* Arch remote IPI send/receiv !!  83 EXPORT_SYMBOL_GPL(irq_work_queue_on);
155                 WARN_ON_ONCE(in_nmi());        !!  84 #endif
156                                                << 
157                 /*                             << 
158                  * On PREEMPT_RT the items whi << 
159                  * IRQ_WORK_HARD_IRQ are added << 
160                  * item is used on the remote  << 
161                  */                            << 
162                 if (IS_ENABLED(CONFIG_PREEMPT_ << 
163                     !(atomic_read(&work->node. << 
164                                                    85 
165                         if (!llist_add(&work-> !!  86 /* Enqueue the irq work @work on the current CPU */
166                                 goto out;      !!  87 bool irq_work_queue(struct irq_work *work)
                                                   >>  88 {
                                                   >>  89         /* Only queue if not already pending */
                                                   >>  90         if (!irq_work_claim(work))
                                                   >>  91                 return false;
167                                                    92 
168                         work = &per_cpu(irq_wo !!  93         /* Queue the entry and raise the IPI if needed. */
169                         if (!irq_work_claim(wo !!  94         preempt_disable();
170                                 goto out;      << 
171                 }                              << 
172                                                    95 
173                 __smp_call_single_queue(cpu, & !!  96         /* If the work is "lazy", handle it from next tick if any */
                                                   >>  97         if (work->flags & IRQ_WORK_LAZY) {
                                                   >>  98                 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
                                                   >>  99                     tick_nohz_tick_stopped())
                                                   >> 100                         arch_irq_work_raise();
174         } else {                                  101         } else {
175                 __irq_work_queue_local(work);  !! 102                 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
                                                   >> 103                         arch_irq_work_raise();
176         }                                         104         }
177 out:                                           !! 105 
178         preempt_enable();                         106         preempt_enable();
179                                                   107 
180         return true;                              108         return true;
181 #endif /* CONFIG_SMP */                        << 
182 }                                                 109 }
                                                   >> 110 EXPORT_SYMBOL_GPL(irq_work_queue);
183                                                   111 
184 bool irq_work_needs_cpu(void)                     112 bool irq_work_needs_cpu(void)
185 {                                                 113 {
186         struct llist_head *raised, *lazy;         114         struct llist_head *raised, *lazy;
187                                                   115 
188         raised = this_cpu_ptr(&raised_list);      116         raised = this_cpu_ptr(&raised_list);
189         lazy = this_cpu_ptr(&lazy_list);          117         lazy = this_cpu_ptr(&lazy_list);
190                                                   118 
191         if (llist_empty(raised) || arch_irq_wo    119         if (llist_empty(raised) || arch_irq_work_has_interrupt())
192                 if (llist_empty(lazy))            120                 if (llist_empty(lazy))
193                         return false;             121                         return false;
194                                                   122 
195         /* All work should have been flushed b    123         /* All work should have been flushed before going offline */
196         WARN_ON_ONCE(cpu_is_offline(smp_proces    124         WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
197                                                   125 
198         return true;                              126         return true;
199 }                                                 127 }
200                                                   128 
201 void irq_work_single(void *arg)                << 
202 {                                              << 
203         struct irq_work *work = arg;           << 
204         int flags;                             << 
205                                                << 
206         /*                                     << 
207          * Clear the PENDING bit, after this p << 
208          * The PENDING bit acts as a lock, and << 
209          * without atomic ops.                 << 
210          */                                    << 
211         flags = atomic_read(&work->node.a_flag << 
212         flags &= ~IRQ_WORK_PENDING;            << 
213         atomic_set(&work->node.a_flags, flags) << 
214                                                << 
215         /*                                     << 
216          * See irq_work_claim().               << 
217          */                                    << 
218         smp_mb();                              << 
219                                                << 
220         lockdep_irq_work_enter(flags);         << 
221         work->func(work);                      << 
222         lockdep_irq_work_exit(flags);          << 
223                                                << 
224         /*                                     << 
225          * Clear the BUSY bit, if set, and ret << 
226          * else claimed it meanwhile.          << 
227          */                                    << 
228         (void)atomic_cmpxchg(&work->node.a_fla << 
229                                                << 
230         if ((IS_ENABLED(CONFIG_PREEMPT_RT) &&  << 
231             !arch_irq_work_has_interrupt())    << 
232                 rcuwait_wake_up(&work->irqwait << 
233 }                                              << 
234                                                << 
235 static void irq_work_run_list(struct llist_hea    129 static void irq_work_run_list(struct llist_head *list)
236 {                                                 130 {
237         struct irq_work *work, *tmp;           !! 131         unsigned long flags;
                                                   >> 132         struct irq_work *work;
238         struct llist_node *llnode;                133         struct llist_node *llnode;
239                                                   134 
240         /*                                     !! 135         BUG_ON(!irqs_disabled());
241          * On PREEMPT_RT IRQ-work which is not << 
242          * in a per-CPU thread in preemptible  << 
243          * marked as IRQ_WORK_HARD_IRQ will be << 
244          */                                    << 
245         BUG_ON(!irqs_disabled() && !IS_ENABLED << 
246                                                   136 
247         if (llist_empty(list))                    137         if (llist_empty(list))
248                 return;                           138                 return;
249                                                   139 
250         llnode = llist_del_all(list);             140         llnode = llist_del_all(list);
251         llist_for_each_entry_safe(work, tmp, l !! 141         while (llnode != NULL) {
252                 irq_work_single(work);         !! 142                 work = llist_entry(llnode, struct irq_work, llnode);
                                                   >> 143 
                                                   >> 144                 llnode = llist_next(llnode);
                                                   >> 145 
                                                   >> 146                 /*
                                                   >> 147                  * Clear the PENDING bit, after this point the @work
                                                   >> 148                  * can be re-used.
                                                   >> 149                  * Make it immediately visible so that other CPUs trying
                                                   >> 150                  * to claim that work don't rely on us to handle their data
                                                   >> 151                  * while we are in the middle of the func.
                                                   >> 152                  */
                                                   >> 153                 flags = work->flags & ~IRQ_WORK_PENDING;
                                                   >> 154                 xchg(&work->flags, flags);
                                                   >> 155 
                                                   >> 156                 work->func(work);
                                                   >> 157                 /*
                                                   >> 158                  * Clear the BUSY bit and return to the free state if
                                                   >> 159                  * no-one else claimed it meanwhile.
                                                   >> 160                  */
                                                   >> 161                 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
                                                   >> 162         }
253 }                                                 163 }
254                                                   164 
255 /*                                                165 /*
256  * hotplug calls this through:                    166  * hotplug calls this through:
257  *  hotplug_cfd() -> flush_smp_call_function_q    167  *  hotplug_cfd() -> flush_smp_call_function_queue()
258  */                                               168  */
259 void irq_work_run(void)                           169 void irq_work_run(void)
260 {                                                 170 {
261         irq_work_run_list(this_cpu_ptr(&raised    171         irq_work_run_list(this_cpu_ptr(&raised_list));
262         if (!IS_ENABLED(CONFIG_PREEMPT_RT))    !! 172         irq_work_run_list(this_cpu_ptr(&lazy_list));
263                 irq_work_run_list(this_cpu_ptr << 
264         else                                   << 
265                 wake_irq_workd();              << 
266 }                                                 173 }
267 EXPORT_SYMBOL_GPL(irq_work_run);                  174 EXPORT_SYMBOL_GPL(irq_work_run);
268                                                   175 
269 void irq_work_tick(void)                          176 void irq_work_tick(void)
270 {                                                 177 {
271         struct llist_head *raised = this_cpu_p    178         struct llist_head *raised = this_cpu_ptr(&raised_list);
272                                                   179 
273         if (!llist_empty(raised) && !arch_irq_    180         if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
274                 irq_work_run_list(raised);        181                 irq_work_run_list(raised);
275                                                !! 182         irq_work_run_list(this_cpu_ptr(&lazy_list));
276         if (!IS_ENABLED(CONFIG_PREEMPT_RT))    << 
277                 irq_work_run_list(this_cpu_ptr << 
278         else                                   << 
279                 wake_irq_workd();              << 
280 }                                                 183 }
281                                                   184 
282 /*                                                185 /*
283  * Synchronize against the irq_work @entry, en    186  * Synchronize against the irq_work @entry, ensures the entry is not
284  * currently in use.                              187  * currently in use.
285  */                                               188  */
286 void irq_work_sync(struct irq_work *work)         189 void irq_work_sync(struct irq_work *work)
287 {                                                 190 {
288         lockdep_assert_irqs_enabled();         !! 191         WARN_ON_ONCE(irqs_disabled());
289         might_sleep();                         << 
290                                                << 
291         if ((IS_ENABLED(CONFIG_PREEMPT_RT) &&  << 
292             !arch_irq_work_has_interrupt()) {  << 
293                 rcuwait_wait_event(&work->irqw << 
294                                    TASK_UNINTE << 
295                 return;                        << 
296         }                                      << 
297                                                   192 
298         while (irq_work_is_busy(work))         !! 193         while (work->flags & IRQ_WORK_BUSY)
299                 cpu_relax();                      194                 cpu_relax();
300 }                                                 195 }
301 EXPORT_SYMBOL_GPL(irq_work_sync);                 196 EXPORT_SYMBOL_GPL(irq_work_sync);
302                                                << 
303 static void run_irq_workd(unsigned int cpu)    << 
304 {                                              << 
305         irq_work_run_list(this_cpu_ptr(&lazy_l << 
306 }                                              << 
307                                                << 
308 static void irq_workd_setup(unsigned int cpu)  << 
309 {                                              << 
310         sched_set_fifo_low(current);           << 
311 }                                              << 
312                                                << 
313 static struct smp_hotplug_thread irqwork_threa << 
314         .store                  = &irq_workd,  << 
315         .setup                  = irq_workd_se << 
316         .thread_should_run      = irq_workd_sh << 
317         .thread_fn              = run_irq_work << 
318         .thread_comm            = "irq_work/%u << 
319 };                                             << 
320                                                << 
321 static __init int irq_work_init_threads(void)  << 
322 {                                              << 
323         if (IS_ENABLED(CONFIG_PREEMPT_RT))     << 
324                 BUG_ON(smpboot_register_percpu << 
325         return 0;                              << 
326 }                                              << 
327 early_initcall(irq_work_init_threads);         << 
328                                                   197 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php