1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * linux/kernel/softirq.c 3 * linux/kernel/softirq.c 4 * 4 * 5 * Copyright (C) 1992 Linus Torvalds 5 * Copyright (C) 1992 Linus Torvalds 6 * 6 * 7 * Rewritten. Old one was good in 2.2, bu 7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) 8 */ 8 */ 9 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 11 12 #include <linux/export.h> 12 #include <linux/export.h> 13 #include <linux/kernel_stat.h> 13 #include <linux/kernel_stat.h> 14 #include <linux/interrupt.h> 14 #include <linux/interrupt.h> 15 #include <linux/init.h> 15 #include <linux/init.h> 16 #include <linux/local_lock.h> 16 #include <linux/local_lock.h> 17 #include <linux/mm.h> 17 #include <linux/mm.h> 18 #include <linux/notifier.h> 18 #include <linux/notifier.h> 19 #include <linux/percpu.h> 19 #include <linux/percpu.h> 20 #include <linux/cpu.h> 20 #include <linux/cpu.h> 21 #include <linux/freezer.h> 21 #include <linux/freezer.h> 22 #include <linux/kthread.h> 22 #include <linux/kthread.h> 23 #include <linux/rcupdate.h> 23 #include <linux/rcupdate.h> 24 #include <linux/ftrace.h> 24 #include <linux/ftrace.h> 25 #include <linux/smp.h> 25 #include <linux/smp.h> 26 #include <linux/smpboot.h> 26 #include <linux/smpboot.h> 27 #include <linux/tick.h> 27 #include <linux/tick.h> 28 #include <linux/irq.h> 28 #include <linux/irq.h> 29 #include <linux/wait_bit.h> 29 #include <linux/wait_bit.h> 30 #include <linux/workqueue.h> << 31 30 32 #include <asm/softirq_stack.h> 31 #include <asm/softirq_stack.h> 33 32 34 #define CREATE_TRACE_POINTS 33 #define CREATE_TRACE_POINTS 35 #include <trace/events/irq.h> 34 #include <trace/events/irq.h> 36 35 37 /* 36 /* 38 - No shared variables, all the data are CPU 37 - No shared variables, all the data are CPU local. 39 - If a softirq needs serialization, let it 38 - If a softirq needs serialization, let it serialize itself 40 by its own spinlocks. 39 by its own spinlocks. 41 - Even if softirq is serialized, only local 40 - Even if softirq is serialized, only local cpu is marked for 42 execution. Hence, we get something sort o 41 execution. Hence, we get something sort of weak cpu binding. 43 Though it is still not clear, will it res 42 Though it is still not clear, will it result in better locality 44 or will not. 43 or will not. 45 44 46 Examples: 45 Examples: 47 - NET RX softirq. It is multithreaded and d 46 - NET RX softirq. It is multithreaded and does not require 48 any global serialization. 47 any global serialization. 49 - NET TX softirq. It kicks software netdevi 48 - NET TX softirq. It kicks software netdevice queues, hence 50 it is logically serialized per device, bu 49 it is logically serialized per device, but this serialization 51 is invisible to common code. 50 is invisible to common code. 52 - Tasklets: serialized wrt itself. 51 - Tasklets: serialized wrt itself. 53 */ 52 */ 54 53 55 #ifndef __ARCH_IRQ_STAT 54 #ifndef __ARCH_IRQ_STAT 56 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat 55 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); 57 EXPORT_PER_CPU_SYMBOL(irq_stat); 56 EXPORT_PER_CPU_SYMBOL(irq_stat); 58 #endif 57 #endif 59 58 60 static struct softirq_action softirq_vec[NR_SO 59 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; 61 60 62 DEFINE_PER_CPU(struct task_struct *, ksoftirqd 61 DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 63 62 64 const char * const softirq_to_name[NR_SOFTIRQS 63 const char * const softirq_to_name[NR_SOFTIRQS] = { 65 "HI", "TIMER", "NET_TX", "NET_RX", "BL 64 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", 66 "TASKLET", "SCHED", "HRTIMER", "RCU" 65 "TASKLET", "SCHED", "HRTIMER", "RCU" 67 }; 66 }; 68 67 69 /* 68 /* 70 * we cannot loop indefinitely here to avoid u 69 * we cannot loop indefinitely here to avoid userspace starvation, 71 * but we also don't want to introduce a worst 70 * but we also don't want to introduce a worst case 1/HZ latency 72 * to the pending events, so lets the schedule 71 * to the pending events, so lets the scheduler to balance 73 * the softirq load for us. 72 * the softirq load for us. 74 */ 73 */ 75 static void wakeup_softirqd(void) 74 static void wakeup_softirqd(void) 76 { 75 { 77 /* Interrupts are disabled: no need to 76 /* Interrupts are disabled: no need to stop preemption */ 78 struct task_struct *tsk = __this_cpu_r 77 struct task_struct *tsk = __this_cpu_read(ksoftirqd); 79 78 80 if (tsk) 79 if (tsk) 81 wake_up_process(tsk); 80 wake_up_process(tsk); 82 } 81 } 83 82 >> 83 /* >> 84 * If ksoftirqd is scheduled, we do not want to process pending softirqs >> 85 * right now. Let ksoftirqd handle this at its own rate, to get fairness, >> 86 * unless we're doing some of the synchronous softirqs. >> 87 */ >> 88 #define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) >> 89 static bool ksoftirqd_running(unsigned long pending) >> 90 { >> 91 struct task_struct *tsk = __this_cpu_read(ksoftirqd); >> 92 >> 93 if (pending & SOFTIRQ_NOW_MASK) >> 94 return false; >> 95 return tsk && task_is_running(tsk) && !__kthread_should_park(tsk); >> 96 } >> 97 84 #ifdef CONFIG_TRACE_IRQFLAGS 98 #ifdef CONFIG_TRACE_IRQFLAGS 85 DEFINE_PER_CPU(int, hardirqs_enabled); 99 DEFINE_PER_CPU(int, hardirqs_enabled); 86 DEFINE_PER_CPU(int, hardirq_context); 100 DEFINE_PER_CPU(int, hardirq_context); 87 EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled); 101 EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled); 88 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context); 102 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context); 89 #endif 103 #endif 90 104 91 /* 105 /* 92 * SOFTIRQ_OFFSET usage: 106 * SOFTIRQ_OFFSET usage: 93 * 107 * 94 * On !RT kernels 'count' is the preempt count 108 * On !RT kernels 'count' is the preempt counter, on RT kernels this applies 95 * to a per CPU counter and to task::softirqs_ 109 * to a per CPU counter and to task::softirqs_disabled_cnt. 96 * 110 * 97 * - count is changed by SOFTIRQ_OFFSET on ent 111 * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq 98 * processing. 112 * processing. 99 * 113 * 100 * - count is changed by SOFTIRQ_DISABLE_OFFSE 114 * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) 101 * on local_bh_disable or local_bh_enable. 115 * on local_bh_disable or local_bh_enable. 102 * 116 * 103 * This lets us distinguish between whether we 117 * This lets us distinguish between whether we are currently processing 104 * softirq and whether we just have bh disable 118 * softirq and whether we just have bh disabled. 105 */ 119 */ 106 #ifdef CONFIG_PREEMPT_RT 120 #ifdef CONFIG_PREEMPT_RT 107 121 108 /* 122 /* 109 * RT accounts for BH disabled sections in tas 123 * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and 110 * also in per CPU softirq_ctrl::cnt. This is 124 * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a 111 * softirq disabled section to be preempted. 125 * softirq disabled section to be preempted. 112 * 126 * 113 * The per task counter is used for softirq_co 127 * The per task counter is used for softirq_count(), in_softirq() and 114 * in_serving_softirqs() because these counts 128 * in_serving_softirqs() because these counts are only valid when the task 115 * holding softirq_ctrl::lock is running. 129 * holding softirq_ctrl::lock is running. 116 * 130 * 117 * The per CPU counter prevents pointless wake 131 * The per CPU counter prevents pointless wakeups of ksoftirqd in case that 118 * the task which is in a softirq disabled sec 132 * the task which is in a softirq disabled section is preempted or blocks. 119 */ 133 */ 120 struct softirq_ctrl { 134 struct softirq_ctrl { 121 local_lock_t lock; 135 local_lock_t lock; 122 int cnt; 136 int cnt; 123 }; 137 }; 124 138 125 static DEFINE_PER_CPU(struct softirq_ctrl, sof 139 static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = { 126 .lock = INIT_LOCAL_LOCK(softirq_ctrl 140 .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock), 127 }; 141 }; 128 142 129 /** 143 /** 130 * local_bh_blocked() - Check for idle whether 144 * local_bh_blocked() - Check for idle whether BH processing is blocked 131 * 145 * 132 * Returns false if the per CPU softirq::cnt i 146 * Returns false if the per CPU softirq::cnt is 0 otherwise true. 133 * 147 * 134 * This is invoked from the idle task to guard 148 * This is invoked from the idle task to guard against false positive 135 * softirq pending warnings, which would happe 149 * softirq pending warnings, which would happen when the task which holds 136 * softirq_ctrl::lock was the only running tas 150 * softirq_ctrl::lock was the only running task on the CPU and blocks on 137 * some other lock. 151 * some other lock. 138 */ 152 */ 139 bool local_bh_blocked(void) 153 bool local_bh_blocked(void) 140 { 154 { 141 return __this_cpu_read(softirq_ctrl.cn 155 return __this_cpu_read(softirq_ctrl.cnt) != 0; 142 } 156 } 143 157 144 void __local_bh_disable_ip(unsigned long ip, u 158 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) 145 { 159 { 146 unsigned long flags; 160 unsigned long flags; 147 int newcnt; 161 int newcnt; 148 162 149 WARN_ON_ONCE(in_hardirq()); 163 WARN_ON_ONCE(in_hardirq()); 150 164 151 /* First entry of a task into a BH dis 165 /* First entry of a task into a BH disabled section? */ 152 if (!current->softirq_disable_cnt) { 166 if (!current->softirq_disable_cnt) { 153 if (preemptible()) { 167 if (preemptible()) { 154 local_lock(&softirq_ct 168 local_lock(&softirq_ctrl.lock); 155 /* Required to meet th 169 /* Required to meet the RCU bottomhalf requirements. */ 156 rcu_read_lock(); 170 rcu_read_lock(); 157 } else { 171 } else { 158 DEBUG_LOCKS_WARN_ON(th 172 DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt)); 159 } 173 } 160 } 174 } 161 175 162 /* 176 /* 163 * Track the per CPU softirq disabled 177 * Track the per CPU softirq disabled state. On RT this is per CPU 164 * state to allow preemption of bottom 178 * state to allow preemption of bottom half disabled sections. 165 */ 179 */ 166 newcnt = __this_cpu_add_return(softirq 180 newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt); 167 /* 181 /* 168 * Reflect the result in the task stat 182 * Reflect the result in the task state to prevent recursion on the 169 * local lock and to make softirq_coun 183 * local lock and to make softirq_count() & al work. 170 */ 184 */ 171 current->softirq_disable_cnt = newcnt; 185 current->softirq_disable_cnt = newcnt; 172 186 173 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) 187 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) { 174 raw_local_irq_save(flags); 188 raw_local_irq_save(flags); 175 lockdep_softirqs_off(ip); 189 lockdep_softirqs_off(ip); 176 raw_local_irq_restore(flags); 190 raw_local_irq_restore(flags); 177 } 191 } 178 } 192 } 179 EXPORT_SYMBOL(__local_bh_disable_ip); 193 EXPORT_SYMBOL(__local_bh_disable_ip); 180 194 181 static void __local_bh_enable(unsigned int cnt 195 static void __local_bh_enable(unsigned int cnt, bool unlock) 182 { 196 { 183 unsigned long flags; 197 unsigned long flags; 184 int newcnt; 198 int newcnt; 185 199 186 DEBUG_LOCKS_WARN_ON(current->softirq_d 200 DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt != 187 this_cpu_read(soft 201 this_cpu_read(softirq_ctrl.cnt)); 188 202 189 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) 203 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) { 190 raw_local_irq_save(flags); 204 raw_local_irq_save(flags); 191 lockdep_softirqs_on(_RET_IP_); 205 lockdep_softirqs_on(_RET_IP_); 192 raw_local_irq_restore(flags); 206 raw_local_irq_restore(flags); 193 } 207 } 194 208 195 newcnt = __this_cpu_sub_return(softirq 209 newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt); 196 current->softirq_disable_cnt = newcnt; 210 current->softirq_disable_cnt = newcnt; 197 211 198 if (!newcnt && unlock) { 212 if (!newcnt && unlock) { 199 rcu_read_unlock(); 213 rcu_read_unlock(); 200 local_unlock(&softirq_ctrl.loc 214 local_unlock(&softirq_ctrl.lock); 201 } 215 } 202 } 216 } 203 217 204 void __local_bh_enable_ip(unsigned long ip, un 218 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) 205 { 219 { 206 bool preempt_on = preemptible(); 220 bool preempt_on = preemptible(); 207 unsigned long flags; 221 unsigned long flags; 208 u32 pending; 222 u32 pending; 209 int curcnt; 223 int curcnt; 210 224 211 WARN_ON_ONCE(in_hardirq()); 225 WARN_ON_ONCE(in_hardirq()); 212 lockdep_assert_irqs_enabled(); 226 lockdep_assert_irqs_enabled(); 213 227 214 local_irq_save(flags); 228 local_irq_save(flags); 215 curcnt = __this_cpu_read(softirq_ctrl. 229 curcnt = __this_cpu_read(softirq_ctrl.cnt); 216 230 217 /* 231 /* 218 * If this is not reenabling soft inte 232 * If this is not reenabling soft interrupts, no point in trying to 219 * run pending ones. 233 * run pending ones. 220 */ 234 */ 221 if (curcnt != cnt) 235 if (curcnt != cnt) 222 goto out; 236 goto out; 223 237 224 pending = local_softirq_pending(); 238 pending = local_softirq_pending(); 225 if (!pending) !! 239 if (!pending || ksoftirqd_running(pending)) 226 goto out; 240 goto out; 227 241 228 /* 242 /* 229 * If this was called from non preempt 243 * If this was called from non preemptible context, wake up the 230 * softirq daemon. 244 * softirq daemon. 231 */ 245 */ 232 if (!preempt_on) { 246 if (!preempt_on) { 233 wakeup_softirqd(); 247 wakeup_softirqd(); 234 goto out; 248 goto out; 235 } 249 } 236 250 237 /* 251 /* 238 * Adjust softirq count to SOFTIRQ_OFF 252 * Adjust softirq count to SOFTIRQ_OFFSET which makes 239 * in_serving_softirq() become true. 253 * in_serving_softirq() become true. 240 */ 254 */ 241 cnt = SOFTIRQ_OFFSET; 255 cnt = SOFTIRQ_OFFSET; 242 __local_bh_enable(cnt, false); 256 __local_bh_enable(cnt, false); 243 __do_softirq(); 257 __do_softirq(); 244 258 245 out: 259 out: 246 __local_bh_enable(cnt, preempt_on); 260 __local_bh_enable(cnt, preempt_on); 247 local_irq_restore(flags); 261 local_irq_restore(flags); 248 } 262 } 249 EXPORT_SYMBOL(__local_bh_enable_ip); 263 EXPORT_SYMBOL(__local_bh_enable_ip); 250 264 251 /* 265 /* 252 * Invoked from ksoftirqd_run() outside of the 266 * Invoked from ksoftirqd_run() outside of the interrupt disabled section 253 * to acquire the per CPU local lock for reent 267 * to acquire the per CPU local lock for reentrancy protection. 254 */ 268 */ 255 static inline void ksoftirqd_run_begin(void) 269 static inline void ksoftirqd_run_begin(void) 256 { 270 { 257 __local_bh_disable_ip(_RET_IP_, SOFTIR 271 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); 258 local_irq_disable(); 272 local_irq_disable(); 259 } 273 } 260 274 261 /* Counterpart to ksoftirqd_run_begin() */ 275 /* Counterpart to ksoftirqd_run_begin() */ 262 static inline void ksoftirqd_run_end(void) 276 static inline void ksoftirqd_run_end(void) 263 { 277 { 264 __local_bh_enable(SOFTIRQ_OFFSET, true 278 __local_bh_enable(SOFTIRQ_OFFSET, true); 265 WARN_ON_ONCE(in_interrupt()); 279 WARN_ON_ONCE(in_interrupt()); 266 local_irq_enable(); 280 local_irq_enable(); 267 } 281 } 268 282 269 static inline void softirq_handle_begin(void) 283 static inline void softirq_handle_begin(void) { } 270 static inline void softirq_handle_end(void) { 284 static inline void softirq_handle_end(void) { } 271 285 272 static inline bool should_wake_ksoftirqd(void) 286 static inline bool should_wake_ksoftirqd(void) 273 { 287 { 274 return !this_cpu_read(softirq_ctrl.cnt 288 return !this_cpu_read(softirq_ctrl.cnt); 275 } 289 } 276 290 277 static inline void invoke_softirq(void) 291 static inline void invoke_softirq(void) 278 { 292 { 279 if (should_wake_ksoftirqd()) 293 if (should_wake_ksoftirqd()) 280 wakeup_softirqd(); 294 wakeup_softirqd(); 281 } 295 } 282 296 283 /* 297 /* 284 * flush_smp_call_function_queue() can raise a 298 * flush_smp_call_function_queue() can raise a soft interrupt in a function 285 * call. On RT kernels this is undesired and t 299 * call. On RT kernels this is undesired and the only known functionality 286 * in the block layer which does this is disab 300 * in the block layer which does this is disabled on RT. If soft interrupts 287 * get raised which haven't been raised before 301 * get raised which haven't been raised before the flush, warn so it can be 288 * investigated. 302 * investigated. 289 */ 303 */ 290 void do_softirq_post_smp_call_flush(unsigned i 304 void do_softirq_post_smp_call_flush(unsigned int was_pending) 291 { 305 { 292 if (WARN_ON_ONCE(was_pending != local_ 306 if (WARN_ON_ONCE(was_pending != local_softirq_pending())) 293 invoke_softirq(); 307 invoke_softirq(); 294 } 308 } 295 309 296 #else /* CONFIG_PREEMPT_RT */ 310 #else /* CONFIG_PREEMPT_RT */ 297 311 298 /* 312 /* 299 * This one is for softirq.c-internal use, whe 313 * This one is for softirq.c-internal use, where hardirqs are disabled 300 * legitimately: 314 * legitimately: 301 */ 315 */ 302 #ifdef CONFIG_TRACE_IRQFLAGS 316 #ifdef CONFIG_TRACE_IRQFLAGS 303 void __local_bh_disable_ip(unsigned long ip, u 317 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) 304 { 318 { 305 unsigned long flags; 319 unsigned long flags; 306 320 307 WARN_ON_ONCE(in_hardirq()); 321 WARN_ON_ONCE(in_hardirq()); 308 322 309 raw_local_irq_save(flags); 323 raw_local_irq_save(flags); 310 /* 324 /* 311 * The preempt tracer hooks into preem 325 * The preempt tracer hooks into preempt_count_add and will break 312 * lockdep because it calls back into 326 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET 313 * is set and before current->softirq_ 327 * is set and before current->softirq_enabled is cleared. 314 * We must manually increment preempt_ 328 * We must manually increment preempt_count here and manually 315 * call the trace_preempt_off later. 329 * call the trace_preempt_off later. 316 */ 330 */ 317 __preempt_count_add(cnt); 331 __preempt_count_add(cnt); 318 /* 332 /* 319 * Were softirqs turned off above: 333 * Were softirqs turned off above: 320 */ 334 */ 321 if (softirq_count() == (cnt & SOFTIRQ_ 335 if (softirq_count() == (cnt & SOFTIRQ_MASK)) 322 lockdep_softirqs_off(ip); 336 lockdep_softirqs_off(ip); 323 raw_local_irq_restore(flags); 337 raw_local_irq_restore(flags); 324 338 325 if (preempt_count() == cnt) { 339 if (preempt_count() == cnt) { 326 #ifdef CONFIG_DEBUG_PREEMPT 340 #ifdef CONFIG_DEBUG_PREEMPT 327 current->preempt_disable_ip = 341 current->preempt_disable_ip = get_lock_parent_ip(); 328 #endif 342 #endif 329 trace_preempt_off(CALLER_ADDR0 343 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); 330 } 344 } 331 } 345 } 332 EXPORT_SYMBOL(__local_bh_disable_ip); 346 EXPORT_SYMBOL(__local_bh_disable_ip); 333 #endif /* CONFIG_TRACE_IRQFLAGS */ 347 #endif /* CONFIG_TRACE_IRQFLAGS */ 334 348 335 static void __local_bh_enable(unsigned int cnt 349 static void __local_bh_enable(unsigned int cnt) 336 { 350 { 337 lockdep_assert_irqs_disabled(); 351 lockdep_assert_irqs_disabled(); 338 352 339 if (preempt_count() == cnt) 353 if (preempt_count() == cnt) 340 trace_preempt_on(CALLER_ADDR0, 354 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 341 355 342 if (softirq_count() == (cnt & SOFTIRQ_ 356 if (softirq_count() == (cnt & SOFTIRQ_MASK)) 343 lockdep_softirqs_on(_RET_IP_); 357 lockdep_softirqs_on(_RET_IP_); 344 358 345 __preempt_count_sub(cnt); 359 __preempt_count_sub(cnt); 346 } 360 } 347 361 348 /* 362 /* 349 * Special-case - softirqs can safely be enabl 363 * Special-case - softirqs can safely be enabled by __do_softirq(), 350 * without processing still-pending softirqs: 364 * without processing still-pending softirqs: 351 */ 365 */ 352 void _local_bh_enable(void) 366 void _local_bh_enable(void) 353 { 367 { 354 WARN_ON_ONCE(in_hardirq()); 368 WARN_ON_ONCE(in_hardirq()); 355 __local_bh_enable(SOFTIRQ_DISABLE_OFFS 369 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); 356 } 370 } 357 EXPORT_SYMBOL(_local_bh_enable); 371 EXPORT_SYMBOL(_local_bh_enable); 358 372 359 void __local_bh_enable_ip(unsigned long ip, un 373 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) 360 { 374 { 361 WARN_ON_ONCE(in_hardirq()); 375 WARN_ON_ONCE(in_hardirq()); 362 lockdep_assert_irqs_enabled(); 376 lockdep_assert_irqs_enabled(); 363 #ifdef CONFIG_TRACE_IRQFLAGS 377 #ifdef CONFIG_TRACE_IRQFLAGS 364 local_irq_disable(); 378 local_irq_disable(); 365 #endif 379 #endif 366 /* 380 /* 367 * Are softirqs going to be turned on 381 * Are softirqs going to be turned on now: 368 */ 382 */ 369 if (softirq_count() == SOFTIRQ_DISABLE 383 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) 370 lockdep_softirqs_on(ip); 384 lockdep_softirqs_on(ip); 371 /* 385 /* 372 * Keep preemption disabled until we a 386 * Keep preemption disabled until we are done with 373 * softirq processing: 387 * softirq processing: 374 */ 388 */ 375 __preempt_count_sub(cnt - 1); 389 __preempt_count_sub(cnt - 1); 376 390 377 if (unlikely(!in_interrupt() && local_ 391 if (unlikely(!in_interrupt() && local_softirq_pending())) { 378 /* 392 /* 379 * Run softirq if any pending. 393 * Run softirq if any pending. And do it in its own stack 380 * as we may be calling this d 394 * as we may be calling this deep in a task call stack already. 381 */ 395 */ 382 do_softirq(); 396 do_softirq(); 383 } 397 } 384 398 385 preempt_count_dec(); 399 preempt_count_dec(); 386 #ifdef CONFIG_TRACE_IRQFLAGS 400 #ifdef CONFIG_TRACE_IRQFLAGS 387 local_irq_enable(); 401 local_irq_enable(); 388 #endif 402 #endif 389 preempt_check_resched(); 403 preempt_check_resched(); 390 } 404 } 391 EXPORT_SYMBOL(__local_bh_enable_ip); 405 EXPORT_SYMBOL(__local_bh_enable_ip); 392 406 393 static inline void softirq_handle_begin(void) 407 static inline void softirq_handle_begin(void) 394 { 408 { 395 __local_bh_disable_ip(_RET_IP_, SOFTIR 409 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); 396 } 410 } 397 411 398 static inline void softirq_handle_end(void) 412 static inline void softirq_handle_end(void) 399 { 413 { 400 __local_bh_enable(SOFTIRQ_OFFSET); 414 __local_bh_enable(SOFTIRQ_OFFSET); 401 WARN_ON_ONCE(in_interrupt()); 415 WARN_ON_ONCE(in_interrupt()); 402 } 416 } 403 417 404 static inline void ksoftirqd_run_begin(void) 418 static inline void ksoftirqd_run_begin(void) 405 { 419 { 406 local_irq_disable(); 420 local_irq_disable(); 407 } 421 } 408 422 409 static inline void ksoftirqd_run_end(void) 423 static inline void ksoftirqd_run_end(void) 410 { 424 { 411 local_irq_enable(); 425 local_irq_enable(); 412 } 426 } 413 427 414 static inline bool should_wake_ksoftirqd(void) 428 static inline bool should_wake_ksoftirqd(void) 415 { 429 { 416 return true; 430 return true; 417 } 431 } 418 432 419 static inline void invoke_softirq(void) 433 static inline void invoke_softirq(void) 420 { 434 { >> 435 if (ksoftirqd_running(local_softirq_pending())) >> 436 return; >> 437 421 if (!force_irqthreads() || !__this_cpu 438 if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) { 422 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK 439 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK 423 /* 440 /* 424 * We can safely execute softi 441 * We can safely execute softirq on the current stack if 425 * it is the irq stack, becaus 442 * it is the irq stack, because it should be near empty 426 * at this stage. 443 * at this stage. 427 */ 444 */ 428 __do_softirq(); 445 __do_softirq(); 429 #else 446 #else 430 /* 447 /* 431 * Otherwise, irq_exit() is ca 448 * Otherwise, irq_exit() is called on the task stack that can 432 * be potentially deep already 449 * be potentially deep already. So call softirq in its own stack 433 * to prevent from any overrun 450 * to prevent from any overrun. 434 */ 451 */ 435 do_softirq_own_stack(); 452 do_softirq_own_stack(); 436 #endif 453 #endif 437 } else { 454 } else { 438 wakeup_softirqd(); 455 wakeup_softirqd(); 439 } 456 } 440 } 457 } 441 458 442 asmlinkage __visible void do_softirq(void) 459 asmlinkage __visible void do_softirq(void) 443 { 460 { 444 __u32 pending; 461 __u32 pending; 445 unsigned long flags; 462 unsigned long flags; 446 463 447 if (in_interrupt()) 464 if (in_interrupt()) 448 return; 465 return; 449 466 450 local_irq_save(flags); 467 local_irq_save(flags); 451 468 452 pending = local_softirq_pending(); 469 pending = local_softirq_pending(); 453 470 454 if (pending) !! 471 if (pending && !ksoftirqd_running(pending)) 455 do_softirq_own_stack(); 472 do_softirq_own_stack(); 456 473 457 local_irq_restore(flags); 474 local_irq_restore(flags); 458 } 475 } 459 476 460 #endif /* !CONFIG_PREEMPT_RT */ 477 #endif /* !CONFIG_PREEMPT_RT */ 461 478 462 /* 479 /* 463 * We restart softirq processing for at most M 480 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, 464 * but break the loop if need_resched() is set 481 * but break the loop if need_resched() is set or after 2 ms. 465 * The MAX_SOFTIRQ_TIME provides a nice upper 482 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in 466 * certain cases, such as stop_machine(), jiff 483 * certain cases, such as stop_machine(), jiffies may cease to 467 * increment and so we need the MAX_SOFTIRQ_RE 484 * increment and so we need the MAX_SOFTIRQ_RESTART limit as 468 * well to make sure we eventually return from 485 * well to make sure we eventually return from this method. 469 * 486 * 470 * These limits have been established via expe 487 * These limits have been established via experimentation. 471 * The two things to balance is latency agains 488 * The two things to balance is latency against fairness - 472 * we want to handle softirqs as soon as possi 489 * we want to handle softirqs as soon as possible, but they 473 * should not be able to lock up the box. 490 * should not be able to lock up the box. 474 */ 491 */ 475 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) 492 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) 476 #define MAX_SOFTIRQ_RESTART 10 493 #define MAX_SOFTIRQ_RESTART 10 477 494 478 #ifdef CONFIG_TRACE_IRQFLAGS 495 #ifdef CONFIG_TRACE_IRQFLAGS 479 /* 496 /* 480 * When we run softirqs from irq_exit() and th 497 * When we run softirqs from irq_exit() and thus on the hardirq stack we need 481 * to keep the lockdep irq context tracking as 498 * to keep the lockdep irq context tracking as tight as possible in order to 482 * not miss-qualify lock contexts and miss pos 499 * not miss-qualify lock contexts and miss possible deadlocks. 483 */ 500 */ 484 501 485 static inline bool lockdep_softirq_start(void) 502 static inline bool lockdep_softirq_start(void) 486 { 503 { 487 bool in_hardirq = false; 504 bool in_hardirq = false; 488 505 489 if (lockdep_hardirq_context()) { 506 if (lockdep_hardirq_context()) { 490 in_hardirq = true; 507 in_hardirq = true; 491 lockdep_hardirq_exit(); 508 lockdep_hardirq_exit(); 492 } 509 } 493 510 494 lockdep_softirq_enter(); 511 lockdep_softirq_enter(); 495 512 496 return in_hardirq; 513 return in_hardirq; 497 } 514 } 498 515 499 static inline void lockdep_softirq_end(bool in 516 static inline void lockdep_softirq_end(bool in_hardirq) 500 { 517 { 501 lockdep_softirq_exit(); 518 lockdep_softirq_exit(); 502 519 503 if (in_hardirq) 520 if (in_hardirq) 504 lockdep_hardirq_enter(); 521 lockdep_hardirq_enter(); 505 } 522 } 506 #else 523 #else 507 static inline bool lockdep_softirq_start(void) 524 static inline bool lockdep_softirq_start(void) { return false; } 508 static inline void lockdep_softirq_end(bool in 525 static inline void lockdep_softirq_end(bool in_hardirq) { } 509 #endif 526 #endif 510 527 511 static void handle_softirqs(bool ksirqd) !! 528 asmlinkage __visible void __softirq_entry __do_softirq(void) 512 { 529 { 513 unsigned long end = jiffies + MAX_SOFT 530 unsigned long end = jiffies + MAX_SOFTIRQ_TIME; 514 unsigned long old_flags = current->fla 531 unsigned long old_flags = current->flags; 515 int max_restart = MAX_SOFTIRQ_RESTART; 532 int max_restart = MAX_SOFTIRQ_RESTART; 516 struct softirq_action *h; 533 struct softirq_action *h; 517 bool in_hardirq; 534 bool in_hardirq; 518 __u32 pending; 535 __u32 pending; 519 int softirq_bit; 536 int softirq_bit; 520 537 521 /* 538 /* 522 * Mask out PF_MEMALLOC as the current 539 * Mask out PF_MEMALLOC as the current task context is borrowed for the 523 * softirq. A softirq handled, such as 540 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC 524 * again if the socket is related to s 541 * again if the socket is related to swapping. 525 */ 542 */ 526 current->flags &= ~PF_MEMALLOC; 543 current->flags &= ~PF_MEMALLOC; 527 544 528 pending = local_softirq_pending(); 545 pending = local_softirq_pending(); 529 546 530 softirq_handle_begin(); 547 softirq_handle_begin(); 531 in_hardirq = lockdep_softirq_start(); 548 in_hardirq = lockdep_softirq_start(); 532 account_softirq_enter(current); 549 account_softirq_enter(current); 533 550 534 restart: 551 restart: 535 /* Reset the pending bitmask before en 552 /* Reset the pending bitmask before enabling irqs */ 536 set_softirq_pending(0); 553 set_softirq_pending(0); 537 554 538 local_irq_enable(); 555 local_irq_enable(); 539 556 540 h = softirq_vec; 557 h = softirq_vec; 541 558 542 while ((softirq_bit = ffs(pending))) { 559 while ((softirq_bit = ffs(pending))) { 543 unsigned int vec_nr; 560 unsigned int vec_nr; 544 int prev_count; 561 int prev_count; 545 562 546 h += softirq_bit - 1; 563 h += softirq_bit - 1; 547 564 548 vec_nr = h - softirq_vec; 565 vec_nr = h - softirq_vec; 549 prev_count = preempt_count(); 566 prev_count = preempt_count(); 550 567 551 kstat_incr_softirqs_this_cpu(v 568 kstat_incr_softirqs_this_cpu(vec_nr); 552 569 553 trace_softirq_entry(vec_nr); 570 trace_softirq_entry(vec_nr); 554 h->action(); !! 571 h->action(h); 555 trace_softirq_exit(vec_nr); 572 trace_softirq_exit(vec_nr); 556 if (unlikely(prev_count != pre 573 if (unlikely(prev_count != preempt_count())) { 557 pr_err("huh, entered s 574 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", 558 vec_nr, softirq 575 vec_nr, softirq_to_name[vec_nr], h->action, 559 prev_count, pre 576 prev_count, preempt_count()); 560 preempt_count_set(prev 577 preempt_count_set(prev_count); 561 } 578 } 562 h++; 579 h++; 563 pending >>= softirq_bit; 580 pending >>= softirq_bit; 564 } 581 } 565 582 566 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && !! 583 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && >> 584 __this_cpu_read(ksoftirqd) == current) 567 rcu_softirq_qs(); 585 rcu_softirq_qs(); 568 586 569 local_irq_disable(); 587 local_irq_disable(); 570 588 571 pending = local_softirq_pending(); 589 pending = local_softirq_pending(); 572 if (pending) { 590 if (pending) { 573 if (time_before(jiffies, end) 591 if (time_before(jiffies, end) && !need_resched() && 574 --max_restart) 592 --max_restart) 575 goto restart; 593 goto restart; 576 594 577 wakeup_softirqd(); 595 wakeup_softirqd(); 578 } 596 } 579 597 580 account_softirq_exit(current); 598 account_softirq_exit(current); 581 lockdep_softirq_end(in_hardirq); 599 lockdep_softirq_end(in_hardirq); 582 softirq_handle_end(); 600 softirq_handle_end(); 583 current_restore_flags(old_flags, PF_ME 601 current_restore_flags(old_flags, PF_MEMALLOC); 584 } 602 } 585 603 586 asmlinkage __visible void __softirq_entry __do << 587 { << 588 handle_softirqs(false); << 589 } << 590 << 591 /** 604 /** 592 * irq_enter_rcu - Enter an interrupt context 605 * irq_enter_rcu - Enter an interrupt context with RCU watching 593 */ 606 */ 594 void irq_enter_rcu(void) 607 void irq_enter_rcu(void) 595 { 608 { 596 __irq_enter_raw(); 609 __irq_enter_raw(); 597 610 598 if (tick_nohz_full_cpu(smp_processor_i 611 if (tick_nohz_full_cpu(smp_processor_id()) || 599 (is_idle_task(current) && (irq_cou 612 (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))) 600 tick_irq_enter(); 613 tick_irq_enter(); 601 614 602 account_hardirq_enter(current); 615 account_hardirq_enter(current); 603 } 616 } 604 617 605 /** 618 /** 606 * irq_enter - Enter an interrupt context incl 619 * irq_enter - Enter an interrupt context including RCU update 607 */ 620 */ 608 void irq_enter(void) 621 void irq_enter(void) 609 { 622 { 610 ct_irq_enter(); 623 ct_irq_enter(); 611 irq_enter_rcu(); 624 irq_enter_rcu(); 612 } 625 } 613 626 614 static inline void tick_irq_exit(void) 627 static inline void tick_irq_exit(void) 615 { 628 { 616 #ifdef CONFIG_NO_HZ_COMMON 629 #ifdef CONFIG_NO_HZ_COMMON 617 int cpu = smp_processor_id(); 630 int cpu = smp_processor_id(); 618 631 619 /* Make sure that timer wheel updates 632 /* Make sure that timer wheel updates are propagated */ 620 if ((sched_core_idle_cpu(cpu) && !need !! 633 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { 621 if (!in_hardirq()) 634 if (!in_hardirq()) 622 tick_nohz_irq_exit(); 635 tick_nohz_irq_exit(); 623 } 636 } 624 #endif 637 #endif 625 } 638 } 626 639 627 static inline void __irq_exit_rcu(void) 640 static inline void __irq_exit_rcu(void) 628 { 641 { 629 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED 642 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED 630 local_irq_disable(); 643 local_irq_disable(); 631 #else 644 #else 632 lockdep_assert_irqs_disabled(); 645 lockdep_assert_irqs_disabled(); 633 #endif 646 #endif 634 account_hardirq_exit(current); 647 account_hardirq_exit(current); 635 preempt_count_sub(HARDIRQ_OFFSET); 648 preempt_count_sub(HARDIRQ_OFFSET); 636 if (!in_interrupt() && local_softirq_p 649 if (!in_interrupt() && local_softirq_pending()) 637 invoke_softirq(); 650 invoke_softirq(); 638 651 639 tick_irq_exit(); 652 tick_irq_exit(); 640 } 653 } 641 654 642 /** 655 /** 643 * irq_exit_rcu() - Exit an interrupt context 656 * irq_exit_rcu() - Exit an interrupt context without updating RCU 644 * 657 * 645 * Also processes softirqs if needed and possi 658 * Also processes softirqs if needed and possible. 646 */ 659 */ 647 void irq_exit_rcu(void) 660 void irq_exit_rcu(void) 648 { 661 { 649 __irq_exit_rcu(); 662 __irq_exit_rcu(); 650 /* must be last! */ 663 /* must be last! */ 651 lockdep_hardirq_exit(); 664 lockdep_hardirq_exit(); 652 } 665 } 653 666 654 /** 667 /** 655 * irq_exit - Exit an interrupt context, updat 668 * irq_exit - Exit an interrupt context, update RCU and lockdep 656 * 669 * 657 * Also processes softirqs if needed and possi 670 * Also processes softirqs if needed and possible. 658 */ 671 */ 659 void irq_exit(void) 672 void irq_exit(void) 660 { 673 { 661 __irq_exit_rcu(); 674 __irq_exit_rcu(); 662 ct_irq_exit(); 675 ct_irq_exit(); 663 /* must be last! */ 676 /* must be last! */ 664 lockdep_hardirq_exit(); 677 lockdep_hardirq_exit(); 665 } 678 } 666 679 667 /* 680 /* 668 * This function must run with irqs disabled! 681 * This function must run with irqs disabled! 669 */ 682 */ 670 inline void raise_softirq_irqoff(unsigned int 683 inline void raise_softirq_irqoff(unsigned int nr) 671 { 684 { 672 __raise_softirq_irqoff(nr); 685 __raise_softirq_irqoff(nr); 673 686 674 /* 687 /* 675 * If we're in an interrupt or softirq 688 * If we're in an interrupt or softirq, we're done 676 * (this also catches softirq-disabled 689 * (this also catches softirq-disabled code). We will 677 * actually run the softirq once we re 690 * actually run the softirq once we return from 678 * the irq or softirq. 691 * the irq or softirq. 679 * 692 * 680 * Otherwise we wake up ksoftirqd to m 693 * Otherwise we wake up ksoftirqd to make sure we 681 * schedule the softirq soon. 694 * schedule the softirq soon. 682 */ 695 */ 683 if (!in_interrupt() && should_wake_kso 696 if (!in_interrupt() && should_wake_ksoftirqd()) 684 wakeup_softirqd(); 697 wakeup_softirqd(); 685 } 698 } 686 699 687 void raise_softirq(unsigned int nr) 700 void raise_softirq(unsigned int nr) 688 { 701 { 689 unsigned long flags; 702 unsigned long flags; 690 703 691 local_irq_save(flags); 704 local_irq_save(flags); 692 raise_softirq_irqoff(nr); 705 raise_softirq_irqoff(nr); 693 local_irq_restore(flags); 706 local_irq_restore(flags); 694 } 707 } 695 708 696 void __raise_softirq_irqoff(unsigned int nr) 709 void __raise_softirq_irqoff(unsigned int nr) 697 { 710 { 698 lockdep_assert_irqs_disabled(); 711 lockdep_assert_irqs_disabled(); 699 trace_softirq_raise(nr); 712 trace_softirq_raise(nr); 700 or_softirq_pending(1UL << nr); 713 or_softirq_pending(1UL << nr); 701 } 714 } 702 715 703 void open_softirq(int nr, void (*action)(void) !! 716 void open_softirq(int nr, void (*action)(struct softirq_action *)) 704 { 717 { 705 softirq_vec[nr].action = action; 718 softirq_vec[nr].action = action; 706 } 719 } 707 720 708 /* 721 /* 709 * Tasklets 722 * Tasklets 710 */ 723 */ 711 struct tasklet_head { 724 struct tasklet_head { 712 struct tasklet_struct *head; 725 struct tasklet_struct *head; 713 struct tasklet_struct **tail; 726 struct tasklet_struct **tail; 714 }; 727 }; 715 728 716 static DEFINE_PER_CPU(struct tasklet_head, tas 729 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); 717 static DEFINE_PER_CPU(struct tasklet_head, tas 730 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); 718 731 719 static void __tasklet_schedule_common(struct t 732 static void __tasklet_schedule_common(struct tasklet_struct *t, 720 struct t 733 struct tasklet_head __percpu *headp, 721 unsigned 734 unsigned int softirq_nr) 722 { 735 { 723 struct tasklet_head *head; 736 struct tasklet_head *head; 724 unsigned long flags; 737 unsigned long flags; 725 738 726 local_irq_save(flags); 739 local_irq_save(flags); 727 head = this_cpu_ptr(headp); 740 head = this_cpu_ptr(headp); 728 t->next = NULL; 741 t->next = NULL; 729 *head->tail = t; 742 *head->tail = t; 730 head->tail = &(t->next); 743 head->tail = &(t->next); 731 raise_softirq_irqoff(softirq_nr); 744 raise_softirq_irqoff(softirq_nr); 732 local_irq_restore(flags); 745 local_irq_restore(flags); 733 } 746 } 734 747 735 void __tasklet_schedule(struct tasklet_struct 748 void __tasklet_schedule(struct tasklet_struct *t) 736 { 749 { 737 __tasklet_schedule_common(t, &tasklet_ 750 __tasklet_schedule_common(t, &tasklet_vec, 738 TASKLET_SOFT 751 TASKLET_SOFTIRQ); 739 } 752 } 740 EXPORT_SYMBOL(__tasklet_schedule); 753 EXPORT_SYMBOL(__tasklet_schedule); 741 754 742 void __tasklet_hi_schedule(struct tasklet_stru 755 void __tasklet_hi_schedule(struct tasklet_struct *t) 743 { 756 { 744 __tasklet_schedule_common(t, &tasklet_ 757 __tasklet_schedule_common(t, &tasklet_hi_vec, 745 HI_SOFTIRQ); 758 HI_SOFTIRQ); 746 } 759 } 747 EXPORT_SYMBOL(__tasklet_hi_schedule); 760 EXPORT_SYMBOL(__tasklet_hi_schedule); 748 761 749 static bool tasklet_clear_sched(struct tasklet 762 static bool tasklet_clear_sched(struct tasklet_struct *t) 750 { 763 { 751 if (test_and_clear_bit(TASKLET_STATE_S 764 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) { 752 wake_up_var(&t->state); 765 wake_up_var(&t->state); 753 return true; 766 return true; 754 } 767 } 755 768 756 WARN_ONCE(1, "tasklet SCHED state not 769 WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n", 757 t->use_callback ? "callback" 770 t->use_callback ? "callback" : "func", 758 t->use_callback ? (void *)t- 771 t->use_callback ? (void *)t->callback : (void *)t->func); 759 772 760 return false; 773 return false; 761 } 774 } 762 775 763 static void tasklet_action_common(struct taskl !! 776 static void tasklet_action_common(struct softirq_action *a, >> 777 struct tasklet_head *tl_head, 764 unsigned int 778 unsigned int softirq_nr) 765 { 779 { 766 struct tasklet_struct *list; 780 struct tasklet_struct *list; 767 781 768 local_irq_disable(); 782 local_irq_disable(); 769 list = tl_head->head; 783 list = tl_head->head; 770 tl_head->head = NULL; 784 tl_head->head = NULL; 771 tl_head->tail = &tl_head->head; 785 tl_head->tail = &tl_head->head; 772 local_irq_enable(); 786 local_irq_enable(); 773 787 774 while (list) { 788 while (list) { 775 struct tasklet_struct *t = lis 789 struct tasklet_struct *t = list; 776 790 777 list = list->next; 791 list = list->next; 778 792 779 if (tasklet_trylock(t)) { 793 if (tasklet_trylock(t)) { 780 if (!atomic_read(&t->c 794 if (!atomic_read(&t->count)) { 781 if (tasklet_cl 795 if (tasklet_clear_sched(t)) { 782 if (t- !! 796 if (t->use_callback) 783 << 784 797 t->callback(t); 785 !! 798 else 786 } else << 787 << 788 799 t->func(t->data); 789 << 790 } << 791 } 800 } 792 tasklet_unlock 801 tasklet_unlock(t); 793 continue; 802 continue; 794 } 803 } 795 tasklet_unlock(t); 804 tasklet_unlock(t); 796 } 805 } 797 806 798 local_irq_disable(); 807 local_irq_disable(); 799 t->next = NULL; 808 t->next = NULL; 800 *tl_head->tail = t; 809 *tl_head->tail = t; 801 tl_head->tail = &t->next; 810 tl_head->tail = &t->next; 802 __raise_softirq_irqoff(softirq 811 __raise_softirq_irqoff(softirq_nr); 803 local_irq_enable(); 812 local_irq_enable(); 804 } 813 } 805 } 814 } 806 815 807 static __latent_entropy void tasklet_action(vo !! 816 static __latent_entropy void tasklet_action(struct softirq_action *a) 808 { 817 { 809 workqueue_softirq_action(false); !! 818 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ); 810 tasklet_action_common(this_cpu_ptr(&ta << 811 } 819 } 812 820 813 static __latent_entropy void tasklet_hi_action !! 821 static __latent_entropy void tasklet_hi_action(struct softirq_action *a) 814 { 822 { 815 workqueue_softirq_action(true); !! 823 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); 816 tasklet_action_common(this_cpu_ptr(&ta << 817 } 824 } 818 825 819 void tasklet_setup(struct tasklet_struct *t, 826 void tasklet_setup(struct tasklet_struct *t, 820 void (*callback)(struct tas 827 void (*callback)(struct tasklet_struct *)) 821 { 828 { 822 t->next = NULL; 829 t->next = NULL; 823 t->state = 0; 830 t->state = 0; 824 atomic_set(&t->count, 0); 831 atomic_set(&t->count, 0); 825 t->callback = callback; 832 t->callback = callback; 826 t->use_callback = true; 833 t->use_callback = true; 827 t->data = 0; 834 t->data = 0; 828 } 835 } 829 EXPORT_SYMBOL(tasklet_setup); 836 EXPORT_SYMBOL(tasklet_setup); 830 837 831 void tasklet_init(struct tasklet_struct *t, 838 void tasklet_init(struct tasklet_struct *t, 832 void (*func)(unsigned long), 839 void (*func)(unsigned long), unsigned long data) 833 { 840 { 834 t->next = NULL; 841 t->next = NULL; 835 t->state = 0; 842 t->state = 0; 836 atomic_set(&t->count, 0); 843 atomic_set(&t->count, 0); 837 t->func = func; 844 t->func = func; 838 t->use_callback = false; 845 t->use_callback = false; 839 t->data = data; 846 t->data = data; 840 } 847 } 841 EXPORT_SYMBOL(tasklet_init); 848 EXPORT_SYMBOL(tasklet_init); 842 849 843 #if defined(CONFIG_SMP) || defined(CONFIG_PREE 850 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 844 /* 851 /* 845 * Do not use in new code. Waiting for tasklet 852 * Do not use in new code. Waiting for tasklets from atomic contexts is 846 * error prone and should be avoided. 853 * error prone and should be avoided. 847 */ 854 */ 848 void tasklet_unlock_spin_wait(struct tasklet_s 855 void tasklet_unlock_spin_wait(struct tasklet_struct *t) 849 { 856 { 850 while (test_bit(TASKLET_STATE_RUN, &(t 857 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { 851 if (IS_ENABLED(CONFIG_PREEMPT_ 858 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 852 /* 859 /* 853 * Prevent a live lock 860 * Prevent a live lock when current preempted soft 854 * interrupt processin 861 * interrupt processing or prevents ksoftirqd from 855 * running. If the tas 862 * running. If the tasklet runs on a different CPU 856 * then this has no ef 863 * then this has no effect other than doing the BH 857 * disable/enable danc 864 * disable/enable dance for nothing. 858 */ 865 */ 859 local_bh_disable(); 866 local_bh_disable(); 860 local_bh_enable(); 867 local_bh_enable(); 861 } else { 868 } else { 862 cpu_relax(); 869 cpu_relax(); 863 } 870 } 864 } 871 } 865 } 872 } 866 EXPORT_SYMBOL(tasklet_unlock_spin_wait); 873 EXPORT_SYMBOL(tasklet_unlock_spin_wait); 867 #endif 874 #endif 868 875 869 void tasklet_kill(struct tasklet_struct *t) 876 void tasklet_kill(struct tasklet_struct *t) 870 { 877 { 871 if (in_interrupt()) 878 if (in_interrupt()) 872 pr_notice("Attempt to kill tas 879 pr_notice("Attempt to kill tasklet from interrupt\n"); 873 880 874 while (test_and_set_bit(TASKLET_STATE_ 881 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 875 wait_var_event(&t->state, !tes 882 wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state)); 876 883 877 tasklet_unlock_wait(t); 884 tasklet_unlock_wait(t); 878 tasklet_clear_sched(t); 885 tasklet_clear_sched(t); 879 } 886 } 880 EXPORT_SYMBOL(tasklet_kill); 887 EXPORT_SYMBOL(tasklet_kill); 881 888 882 #if defined(CONFIG_SMP) || defined(CONFIG_PREE 889 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 883 void tasklet_unlock(struct tasklet_struct *t) 890 void tasklet_unlock(struct tasklet_struct *t) 884 { 891 { 885 smp_mb__before_atomic(); 892 smp_mb__before_atomic(); 886 clear_bit(TASKLET_STATE_RUN, &t->state 893 clear_bit(TASKLET_STATE_RUN, &t->state); 887 smp_mb__after_atomic(); 894 smp_mb__after_atomic(); 888 wake_up_var(&t->state); 895 wake_up_var(&t->state); 889 } 896 } 890 EXPORT_SYMBOL_GPL(tasklet_unlock); 897 EXPORT_SYMBOL_GPL(tasklet_unlock); 891 898 892 void tasklet_unlock_wait(struct tasklet_struct 899 void tasklet_unlock_wait(struct tasklet_struct *t) 893 { 900 { 894 wait_var_event(&t->state, !test_bit(TA 901 wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state)); 895 } 902 } 896 EXPORT_SYMBOL_GPL(tasklet_unlock_wait); 903 EXPORT_SYMBOL_GPL(tasklet_unlock_wait); 897 #endif 904 #endif 898 905 899 void __init softirq_init(void) 906 void __init softirq_init(void) 900 { 907 { 901 int cpu; 908 int cpu; 902 909 903 for_each_possible_cpu(cpu) { 910 for_each_possible_cpu(cpu) { 904 per_cpu(tasklet_vec, cpu).tail 911 per_cpu(tasklet_vec, cpu).tail = 905 &per_cpu(tasklet_vec, 912 &per_cpu(tasklet_vec, cpu).head; 906 per_cpu(tasklet_hi_vec, cpu).t 913 per_cpu(tasklet_hi_vec, cpu).tail = 907 &per_cpu(tasklet_hi_ve 914 &per_cpu(tasklet_hi_vec, cpu).head; 908 } 915 } 909 916 910 open_softirq(TASKLET_SOFTIRQ, tasklet_ 917 open_softirq(TASKLET_SOFTIRQ, tasklet_action); 911 open_softirq(HI_SOFTIRQ, tasklet_hi_ac 918 open_softirq(HI_SOFTIRQ, tasklet_hi_action); 912 } 919 } 913 920 914 static int ksoftirqd_should_run(unsigned int c 921 static int ksoftirqd_should_run(unsigned int cpu) 915 { 922 { 916 return local_softirq_pending(); 923 return local_softirq_pending(); 917 } 924 } 918 925 919 static void run_ksoftirqd(unsigned int cpu) 926 static void run_ksoftirqd(unsigned int cpu) 920 { 927 { 921 ksoftirqd_run_begin(); 928 ksoftirqd_run_begin(); 922 if (local_softirq_pending()) { 929 if (local_softirq_pending()) { 923 /* 930 /* 924 * We can safely run softirq o 931 * We can safely run softirq on inline stack, as we are not deep 925 * in the task stack here. 932 * in the task stack here. 926 */ 933 */ 927 handle_softirqs(true); !! 934 __do_softirq(); 928 ksoftirqd_run_end(); 935 ksoftirqd_run_end(); 929 cond_resched(); 936 cond_resched(); 930 return; 937 return; 931 } 938 } 932 ksoftirqd_run_end(); 939 ksoftirqd_run_end(); 933 } 940 } 934 941 935 #ifdef CONFIG_HOTPLUG_CPU 942 #ifdef CONFIG_HOTPLUG_CPU 936 static int takeover_tasklets(unsigned int cpu) 943 static int takeover_tasklets(unsigned int cpu) 937 { 944 { 938 workqueue_softirq_dead(cpu); << 939 << 940 /* CPU is dead, so no lock needed. */ 945 /* CPU is dead, so no lock needed. */ 941 local_irq_disable(); 946 local_irq_disable(); 942 947 943 /* Find end, append list for that CPU. 948 /* Find end, append list for that CPU. */ 944 if (&per_cpu(tasklet_vec, cpu).head != 949 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { 945 *__this_cpu_read(tasklet_vec.t 950 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; 946 __this_cpu_write(tasklet_vec.t 951 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); 947 per_cpu(tasklet_vec, cpu).head 952 per_cpu(tasklet_vec, cpu).head = NULL; 948 per_cpu(tasklet_vec, cpu).tail 953 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; 949 } 954 } 950 raise_softirq_irqoff(TASKLET_SOFTIRQ); 955 raise_softirq_irqoff(TASKLET_SOFTIRQ); 951 956 952 if (&per_cpu(tasklet_hi_vec, cpu).head 957 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { 953 *__this_cpu_read(tasklet_hi_ve 958 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; 954 __this_cpu_write(tasklet_hi_ve 959 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); 955 per_cpu(tasklet_hi_vec, cpu).h 960 per_cpu(tasklet_hi_vec, cpu).head = NULL; 956 per_cpu(tasklet_hi_vec, cpu).t 961 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; 957 } 962 } 958 raise_softirq_irqoff(HI_SOFTIRQ); 963 raise_softirq_irqoff(HI_SOFTIRQ); 959 964 960 local_irq_enable(); 965 local_irq_enable(); 961 return 0; 966 return 0; 962 } 967 } 963 #else 968 #else 964 #define takeover_tasklets NULL 969 #define takeover_tasklets NULL 965 #endif /* CONFIG_HOTPLUG_CPU */ 970 #endif /* CONFIG_HOTPLUG_CPU */ 966 971 967 static struct smp_hotplug_thread softirq_threa 972 static struct smp_hotplug_thread softirq_threads = { 968 .store = &ksoftirqd, 973 .store = &ksoftirqd, 969 .thread_should_run = ksoftirqd_sh 974 .thread_should_run = ksoftirqd_should_run, 970 .thread_fn = run_ksoftirq 975 .thread_fn = run_ksoftirqd, 971 .thread_comm = "ksoftirqd/% 976 .thread_comm = "ksoftirqd/%u", 972 }; 977 }; 973 978 974 static __init int spawn_ksoftirqd(void) 979 static __init int spawn_ksoftirqd(void) 975 { 980 { 976 cpuhp_setup_state_nocalls(CPUHP_SOFTIR 981 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, 977 takeover_tas 982 takeover_tasklets); 978 BUG_ON(smpboot_register_percpu_thread( 983 BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); 979 984 980 return 0; 985 return 0; 981 } 986 } 982 early_initcall(spawn_ksoftirqd); 987 early_initcall(spawn_ksoftirqd); 983 988 984 /* 989 /* 985 * [ These __weak aliases are kept in a separa 990 * [ These __weak aliases are kept in a separate compilation unit, so that 986 * GCC does not inline them incorrectly. ] 991 * GCC does not inline them incorrectly. ] 987 */ 992 */ 988 993 989 int __init __weak early_irq_init(void) 994 int __init __weak early_irq_init(void) 990 { 995 { 991 return 0; 996 return 0; 992 } 997 } 993 998 994 int __init __weak arch_probe_nr_irqs(void) 999 int __init __weak arch_probe_nr_irqs(void) 995 { 1000 { 996 return NR_IRQS_LEGACY; 1001 return NR_IRQS_LEGACY; 997 } 1002 } 998 1003 999 int __init __weak arch_early_irq_init(void) 1004 int __init __weak arch_early_irq_init(void) 1000 { 1005 { 1001 return 0; 1006 return 0; 1002 } 1007 } 1003 1008 1004 unsigned int __weak arch_dynirq_lower_bound(u 1009 unsigned int __weak arch_dynirq_lower_bound(unsigned int from) 1005 { 1010 { 1006 return from; 1011 return from; 1007 } 1012 } 1008 1013
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.