1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * linux/kernel/softirq.c 3 * linux/kernel/softirq.c 4 * 4 * 5 * Copyright (C) 1992 Linus Torvalds 5 * Copyright (C) 1992 Linus Torvalds 6 * 6 * 7 * Rewritten. Old one was good in 2.2, bu 7 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) 8 */ 8 */ 9 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 11 12 #include <linux/export.h> 12 #include <linux/export.h> 13 #include <linux/kernel_stat.h> 13 #include <linux/kernel_stat.h> 14 #include <linux/interrupt.h> 14 #include <linux/interrupt.h> 15 #include <linux/init.h> 15 #include <linux/init.h> 16 #include <linux/local_lock.h> << 17 #include <linux/mm.h> 16 #include <linux/mm.h> 18 #include <linux/notifier.h> 17 #include <linux/notifier.h> 19 #include <linux/percpu.h> 18 #include <linux/percpu.h> 20 #include <linux/cpu.h> 19 #include <linux/cpu.h> 21 #include <linux/freezer.h> 20 #include <linux/freezer.h> 22 #include <linux/kthread.h> 21 #include <linux/kthread.h> 23 #include <linux/rcupdate.h> 22 #include <linux/rcupdate.h> 24 #include <linux/ftrace.h> 23 #include <linux/ftrace.h> 25 #include <linux/smp.h> 24 #include <linux/smp.h> 26 #include <linux/smpboot.h> 25 #include <linux/smpboot.h> 27 #include <linux/tick.h> 26 #include <linux/tick.h> 28 #include <linux/irq.h> 27 #include <linux/irq.h> 29 #include <linux/wait_bit.h> << 30 #include <linux/workqueue.h> << 31 << 32 #include <asm/softirq_stack.h> << 33 28 34 #define CREATE_TRACE_POINTS 29 #define CREATE_TRACE_POINTS 35 #include <trace/events/irq.h> 30 #include <trace/events/irq.h> 36 31 37 /* 32 /* 38 - No shared variables, all the data are CPU 33 - No shared variables, all the data are CPU local. 39 - If a softirq needs serialization, let it 34 - If a softirq needs serialization, let it serialize itself 40 by its own spinlocks. 35 by its own spinlocks. 41 - Even if softirq is serialized, only local 36 - Even if softirq is serialized, only local cpu is marked for 42 execution. Hence, we get something sort o 37 execution. Hence, we get something sort of weak cpu binding. 43 Though it is still not clear, will it res 38 Though it is still not clear, will it result in better locality 44 or will not. 39 or will not. 45 40 46 Examples: 41 Examples: 47 - NET RX softirq. It is multithreaded and d 42 - NET RX softirq. It is multithreaded and does not require 48 any global serialization. 43 any global serialization. 49 - NET TX softirq. It kicks software netdevi 44 - NET TX softirq. It kicks software netdevice queues, hence 50 it is logically serialized per device, bu 45 it is logically serialized per device, but this serialization 51 is invisible to common code. 46 is invisible to common code. 52 - Tasklets: serialized wrt itself. 47 - Tasklets: serialized wrt itself. 53 */ 48 */ 54 49 55 #ifndef __ARCH_IRQ_STAT 50 #ifndef __ARCH_IRQ_STAT 56 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat 51 DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); 57 EXPORT_PER_CPU_SYMBOL(irq_stat); 52 EXPORT_PER_CPU_SYMBOL(irq_stat); 58 #endif 53 #endif 59 54 60 static struct softirq_action softirq_vec[NR_SO 55 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; 61 56 62 DEFINE_PER_CPU(struct task_struct *, ksoftirqd 57 DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 63 58 64 const char * const softirq_to_name[NR_SOFTIRQS 59 const char * const softirq_to_name[NR_SOFTIRQS] = { 65 "HI", "TIMER", "NET_TX", "NET_RX", "BL 60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", 66 "TASKLET", "SCHED", "HRTIMER", "RCU" 61 "TASKLET", "SCHED", "HRTIMER", "RCU" 67 }; 62 }; 68 63 69 /* 64 /* 70 * we cannot loop indefinitely here to avoid u 65 * we cannot loop indefinitely here to avoid userspace starvation, 71 * but we also don't want to introduce a worst 66 * but we also don't want to introduce a worst case 1/HZ latency 72 * to the pending events, so lets the schedule 67 * to the pending events, so lets the scheduler to balance 73 * the softirq load for us. 68 * the softirq load for us. 74 */ 69 */ 75 static void wakeup_softirqd(void) 70 static void wakeup_softirqd(void) 76 { 71 { 77 /* Interrupts are disabled: no need to 72 /* Interrupts are disabled: no need to stop preemption */ 78 struct task_struct *tsk = __this_cpu_r 73 struct task_struct *tsk = __this_cpu_read(ksoftirqd); 79 74 80 if (tsk) !! 75 if (tsk && tsk->state != TASK_RUNNING) 81 wake_up_process(tsk); 76 wake_up_process(tsk); 82 } 77 } 83 78 84 #ifdef CONFIG_TRACE_IRQFLAGS << 85 DEFINE_PER_CPU(int, hardirqs_enabled); << 86 DEFINE_PER_CPU(int, hardirq_context); << 87 EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled); << 88 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context); << 89 #endif << 90 << 91 /* << 92 * SOFTIRQ_OFFSET usage: << 93 * << 94 * On !RT kernels 'count' is the preempt count << 95 * to a per CPU counter and to task::softirqs_ << 96 * << 97 * - count is changed by SOFTIRQ_OFFSET on ent << 98 * processing. << 99 * << 100 * - count is changed by SOFTIRQ_DISABLE_OFFSE << 101 * on local_bh_disable or local_bh_enable. << 102 * << 103 * This lets us distinguish between whether we << 104 * softirq and whether we just have bh disable << 105 */ << 106 #ifdef CONFIG_PREEMPT_RT << 107 << 108 /* << 109 * RT accounts for BH disabled sections in tas << 110 * also in per CPU softirq_ctrl::cnt. This is << 111 * softirq disabled section to be preempted. << 112 * << 113 * The per task counter is used for softirq_co << 114 * in_serving_softirqs() because these counts << 115 * holding softirq_ctrl::lock is running. << 116 * << 117 * The per CPU counter prevents pointless wake << 118 * the task which is in a softirq disabled sec << 119 */ << 120 struct softirq_ctrl { << 121 local_lock_t lock; << 122 int cnt; << 123 }; << 124 << 125 static DEFINE_PER_CPU(struct softirq_ctrl, sof << 126 .lock = INIT_LOCAL_LOCK(softirq_ctrl << 127 }; << 128 << 129 /** << 130 * local_bh_blocked() - Check for idle whether << 131 * << 132 * Returns false if the per CPU softirq::cnt i << 133 * << 134 * This is invoked from the idle task to guard << 135 * softirq pending warnings, which would happe << 136 * softirq_ctrl::lock was the only running tas << 137 * some other lock. << 138 */ << 139 bool local_bh_blocked(void) << 140 { << 141 return __this_cpu_read(softirq_ctrl.cn << 142 } << 143 << 144 void __local_bh_disable_ip(unsigned long ip, u << 145 { << 146 unsigned long flags; << 147 int newcnt; << 148 << 149 WARN_ON_ONCE(in_hardirq()); << 150 << 151 /* First entry of a task into a BH dis << 152 if (!current->softirq_disable_cnt) { << 153 if (preemptible()) { << 154 local_lock(&softirq_ct << 155 /* Required to meet th << 156 rcu_read_lock(); << 157 } else { << 158 DEBUG_LOCKS_WARN_ON(th << 159 } << 160 } << 161 << 162 /* << 163 * Track the per CPU softirq disabled << 164 * state to allow preemption of bottom << 165 */ << 166 newcnt = __this_cpu_add_return(softirq << 167 /* << 168 * Reflect the result in the task stat << 169 * local lock and to make softirq_coun << 170 */ << 171 current->softirq_disable_cnt = newcnt; << 172 << 173 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) << 174 raw_local_irq_save(flags); << 175 lockdep_softirqs_off(ip); << 176 raw_local_irq_restore(flags); << 177 } << 178 } << 179 EXPORT_SYMBOL(__local_bh_disable_ip); << 180 << 181 static void __local_bh_enable(unsigned int cnt << 182 { << 183 unsigned long flags; << 184 int newcnt; << 185 << 186 DEBUG_LOCKS_WARN_ON(current->softirq_d << 187 this_cpu_read(soft << 188 << 189 if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) << 190 raw_local_irq_save(flags); << 191 lockdep_softirqs_on(_RET_IP_); << 192 raw_local_irq_restore(flags); << 193 } << 194 << 195 newcnt = __this_cpu_sub_return(softirq << 196 current->softirq_disable_cnt = newcnt; << 197 << 198 if (!newcnt && unlock) { << 199 rcu_read_unlock(); << 200 local_unlock(&softirq_ctrl.loc << 201 } << 202 } << 203 << 204 void __local_bh_enable_ip(unsigned long ip, un << 205 { << 206 bool preempt_on = preemptible(); << 207 unsigned long flags; << 208 u32 pending; << 209 int curcnt; << 210 << 211 WARN_ON_ONCE(in_hardirq()); << 212 lockdep_assert_irqs_enabled(); << 213 << 214 local_irq_save(flags); << 215 curcnt = __this_cpu_read(softirq_ctrl. << 216 << 217 /* << 218 * If this is not reenabling soft inte << 219 * run pending ones. << 220 */ << 221 if (curcnt != cnt) << 222 goto out; << 223 << 224 pending = local_softirq_pending(); << 225 if (!pending) << 226 goto out; << 227 << 228 /* << 229 * If this was called from non preempt << 230 * softirq daemon. << 231 */ << 232 if (!preempt_on) { << 233 wakeup_softirqd(); << 234 goto out; << 235 } << 236 << 237 /* << 238 * Adjust softirq count to SOFTIRQ_OFF << 239 * in_serving_softirq() become true. << 240 */ << 241 cnt = SOFTIRQ_OFFSET; << 242 __local_bh_enable(cnt, false); << 243 __do_softirq(); << 244 << 245 out: << 246 __local_bh_enable(cnt, preempt_on); << 247 local_irq_restore(flags); << 248 } << 249 EXPORT_SYMBOL(__local_bh_enable_ip); << 250 << 251 /* 79 /* 252 * Invoked from ksoftirqd_run() outside of the !! 80 * If ksoftirqd is scheduled, we do not want to process pending softirqs 253 * to acquire the per CPU local lock for reent !! 81 * right now. Let ksoftirqd handle this at its own rate, to get fairness, >> 82 * unless we're doing some of the synchronous softirqs. 254 */ 83 */ 255 static inline void ksoftirqd_run_begin(void) !! 84 #define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) >> 85 static bool ksoftirqd_running(unsigned long pending) 256 { 86 { 257 __local_bh_disable_ip(_RET_IP_, SOFTIR !! 87 struct task_struct *tsk = __this_cpu_read(ksoftirqd); 258 local_irq_disable(); << 259 } << 260 << 261 /* Counterpart to ksoftirqd_run_begin() */ << 262 static inline void ksoftirqd_run_end(void) << 263 { << 264 __local_bh_enable(SOFTIRQ_OFFSET, true << 265 WARN_ON_ONCE(in_interrupt()); << 266 local_irq_enable(); << 267 } << 268 << 269 static inline void softirq_handle_begin(void) << 270 static inline void softirq_handle_end(void) { << 271 << 272 static inline bool should_wake_ksoftirqd(void) << 273 { << 274 return !this_cpu_read(softirq_ctrl.cnt << 275 } << 276 88 277 static inline void invoke_softirq(void) !! 89 if (pending & SOFTIRQ_NOW_MASK) 278 { !! 90 return false; 279 if (should_wake_ksoftirqd()) !! 91 return tsk && (tsk->state == TASK_RUNNING) && 280 wakeup_softirqd(); !! 92 !__kthread_should_park(tsk); 281 } 93 } 282 94 283 /* 95 /* 284 * flush_smp_call_function_queue() can raise a !! 96 * preempt_count and SOFTIRQ_OFFSET usage: 285 * call. On RT kernels this is undesired and t !! 97 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving 286 * in the block layer which does this is disab !! 98 * softirq processing. 287 * get raised which haven't been raised before !! 99 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) 288 * investigated. !! 100 * on local_bh_disable or local_bh_enable. >> 101 * This lets us distinguish between whether we are currently processing >> 102 * softirq and whether we just have bh disabled. 289 */ 103 */ 290 void do_softirq_post_smp_call_flush(unsigned i << 291 { << 292 if (WARN_ON_ONCE(was_pending != local_ << 293 invoke_softirq(); << 294 } << 295 << 296 #else /* CONFIG_PREEMPT_RT */ << 297 104 298 /* 105 /* 299 * This one is for softirq.c-internal use, whe !! 106 * This one is for softirq.c-internal use, 300 * legitimately: !! 107 * where hardirqs are disabled legitimately: 301 */ 108 */ 302 #ifdef CONFIG_TRACE_IRQFLAGS 109 #ifdef CONFIG_TRACE_IRQFLAGS 303 void __local_bh_disable_ip(unsigned long ip, u 110 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) 304 { 111 { 305 unsigned long flags; 112 unsigned long flags; 306 113 307 WARN_ON_ONCE(in_hardirq()); !! 114 WARN_ON_ONCE(in_irq()); 308 115 309 raw_local_irq_save(flags); 116 raw_local_irq_save(flags); 310 /* 117 /* 311 * The preempt tracer hooks into preem 118 * The preempt tracer hooks into preempt_count_add and will break 312 * lockdep because it calls back into 119 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET 313 * is set and before current->softirq_ 120 * is set and before current->softirq_enabled is cleared. 314 * We must manually increment preempt_ 121 * We must manually increment preempt_count here and manually 315 * call the trace_preempt_off later. 122 * call the trace_preempt_off later. 316 */ 123 */ 317 __preempt_count_add(cnt); 124 __preempt_count_add(cnt); 318 /* 125 /* 319 * Were softirqs turned off above: 126 * Were softirqs turned off above: 320 */ 127 */ 321 if (softirq_count() == (cnt & SOFTIRQ_ 128 if (softirq_count() == (cnt & SOFTIRQ_MASK)) 322 lockdep_softirqs_off(ip); !! 129 trace_softirqs_off(ip); 323 raw_local_irq_restore(flags); 130 raw_local_irq_restore(flags); 324 131 325 if (preempt_count() == cnt) { 132 if (preempt_count() == cnt) { 326 #ifdef CONFIG_DEBUG_PREEMPT 133 #ifdef CONFIG_DEBUG_PREEMPT 327 current->preempt_disable_ip = 134 current->preempt_disable_ip = get_lock_parent_ip(); 328 #endif 135 #endif 329 trace_preempt_off(CALLER_ADDR0 136 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); 330 } 137 } 331 } 138 } 332 EXPORT_SYMBOL(__local_bh_disable_ip); 139 EXPORT_SYMBOL(__local_bh_disable_ip); 333 #endif /* CONFIG_TRACE_IRQFLAGS */ 140 #endif /* CONFIG_TRACE_IRQFLAGS */ 334 141 335 static void __local_bh_enable(unsigned int cnt 142 static void __local_bh_enable(unsigned int cnt) 336 { 143 { 337 lockdep_assert_irqs_disabled(); 144 lockdep_assert_irqs_disabled(); 338 145 339 if (preempt_count() == cnt) 146 if (preempt_count() == cnt) 340 trace_preempt_on(CALLER_ADDR0, 147 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); 341 148 342 if (softirq_count() == (cnt & SOFTIRQ_ 149 if (softirq_count() == (cnt & SOFTIRQ_MASK)) 343 lockdep_softirqs_on(_RET_IP_); !! 150 trace_softirqs_on(_RET_IP_); 344 151 345 __preempt_count_sub(cnt); 152 __preempt_count_sub(cnt); 346 } 153 } 347 154 348 /* 155 /* 349 * Special-case - softirqs can safely be enabl 156 * Special-case - softirqs can safely be enabled by __do_softirq(), 350 * without processing still-pending softirqs: 157 * without processing still-pending softirqs: 351 */ 158 */ 352 void _local_bh_enable(void) 159 void _local_bh_enable(void) 353 { 160 { 354 WARN_ON_ONCE(in_hardirq()); !! 161 WARN_ON_ONCE(in_irq()); 355 __local_bh_enable(SOFTIRQ_DISABLE_OFFS 162 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); 356 } 163 } 357 EXPORT_SYMBOL(_local_bh_enable); 164 EXPORT_SYMBOL(_local_bh_enable); 358 165 359 void __local_bh_enable_ip(unsigned long ip, un 166 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) 360 { 167 { 361 WARN_ON_ONCE(in_hardirq()); !! 168 WARN_ON_ONCE(in_irq()); 362 lockdep_assert_irqs_enabled(); 169 lockdep_assert_irqs_enabled(); 363 #ifdef CONFIG_TRACE_IRQFLAGS 170 #ifdef CONFIG_TRACE_IRQFLAGS 364 local_irq_disable(); 171 local_irq_disable(); 365 #endif 172 #endif 366 /* 173 /* 367 * Are softirqs going to be turned on 174 * Are softirqs going to be turned on now: 368 */ 175 */ 369 if (softirq_count() == SOFTIRQ_DISABLE 176 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) 370 lockdep_softirqs_on(ip); !! 177 trace_softirqs_on(ip); 371 /* 178 /* 372 * Keep preemption disabled until we a 179 * Keep preemption disabled until we are done with 373 * softirq processing: 180 * softirq processing: 374 */ 181 */ 375 __preempt_count_sub(cnt - 1); !! 182 preempt_count_sub(cnt - 1); 376 183 377 if (unlikely(!in_interrupt() && local_ 184 if (unlikely(!in_interrupt() && local_softirq_pending())) { 378 /* 185 /* 379 * Run softirq if any pending. 186 * Run softirq if any pending. And do it in its own stack 380 * as we may be calling this d 187 * as we may be calling this deep in a task call stack already. 381 */ 188 */ 382 do_softirq(); 189 do_softirq(); 383 } 190 } 384 191 385 preempt_count_dec(); 192 preempt_count_dec(); 386 #ifdef CONFIG_TRACE_IRQFLAGS 193 #ifdef CONFIG_TRACE_IRQFLAGS 387 local_irq_enable(); 194 local_irq_enable(); 388 #endif 195 #endif 389 preempt_check_resched(); 196 preempt_check_resched(); 390 } 197 } 391 EXPORT_SYMBOL(__local_bh_enable_ip); 198 EXPORT_SYMBOL(__local_bh_enable_ip); 392 199 393 static inline void softirq_handle_begin(void) << 394 { << 395 __local_bh_disable_ip(_RET_IP_, SOFTIR << 396 } << 397 << 398 static inline void softirq_handle_end(void) << 399 { << 400 __local_bh_enable(SOFTIRQ_OFFSET); << 401 WARN_ON_ONCE(in_interrupt()); << 402 } << 403 << 404 static inline void ksoftirqd_run_begin(void) << 405 { << 406 local_irq_disable(); << 407 } << 408 << 409 static inline void ksoftirqd_run_end(void) << 410 { << 411 local_irq_enable(); << 412 } << 413 << 414 static inline bool should_wake_ksoftirqd(void) << 415 { << 416 return true; << 417 } << 418 << 419 static inline void invoke_softirq(void) << 420 { << 421 if (!force_irqthreads() || !__this_cpu << 422 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK << 423 /* << 424 * We can safely execute softi << 425 * it is the irq stack, becaus << 426 * at this stage. << 427 */ << 428 __do_softirq(); << 429 #else << 430 /* << 431 * Otherwise, irq_exit() is ca << 432 * be potentially deep already << 433 * to prevent from any overrun << 434 */ << 435 do_softirq_own_stack(); << 436 #endif << 437 } else { << 438 wakeup_softirqd(); << 439 } << 440 } << 441 << 442 asmlinkage __visible void do_softirq(void) << 443 { << 444 __u32 pending; << 445 unsigned long flags; << 446 << 447 if (in_interrupt()) << 448 return; << 449 << 450 local_irq_save(flags); << 451 << 452 pending = local_softirq_pending(); << 453 << 454 if (pending) << 455 do_softirq_own_stack(); << 456 << 457 local_irq_restore(flags); << 458 } << 459 << 460 #endif /* !CONFIG_PREEMPT_RT */ << 461 << 462 /* 200 /* 463 * We restart softirq processing for at most M 201 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, 464 * but break the loop if need_resched() is set 202 * but break the loop if need_resched() is set or after 2 ms. 465 * The MAX_SOFTIRQ_TIME provides a nice upper 203 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in 466 * certain cases, such as stop_machine(), jiff 204 * certain cases, such as stop_machine(), jiffies may cease to 467 * increment and so we need the MAX_SOFTIRQ_RE 205 * increment and so we need the MAX_SOFTIRQ_RESTART limit as 468 * well to make sure we eventually return from 206 * well to make sure we eventually return from this method. 469 * 207 * 470 * These limits have been established via expe 208 * These limits have been established via experimentation. 471 * The two things to balance is latency agains 209 * The two things to balance is latency against fairness - 472 * we want to handle softirqs as soon as possi 210 * we want to handle softirqs as soon as possible, but they 473 * should not be able to lock up the box. 211 * should not be able to lock up the box. 474 */ 212 */ 475 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) 213 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) 476 #define MAX_SOFTIRQ_RESTART 10 214 #define MAX_SOFTIRQ_RESTART 10 477 215 478 #ifdef CONFIG_TRACE_IRQFLAGS 216 #ifdef CONFIG_TRACE_IRQFLAGS 479 /* 217 /* 480 * When we run softirqs from irq_exit() and th 218 * When we run softirqs from irq_exit() and thus on the hardirq stack we need 481 * to keep the lockdep irq context tracking as 219 * to keep the lockdep irq context tracking as tight as possible in order to 482 * not miss-qualify lock contexts and miss pos 220 * not miss-qualify lock contexts and miss possible deadlocks. 483 */ 221 */ 484 222 485 static inline bool lockdep_softirq_start(void) 223 static inline bool lockdep_softirq_start(void) 486 { 224 { 487 bool in_hardirq = false; 225 bool in_hardirq = false; 488 226 489 if (lockdep_hardirq_context()) { !! 227 if (trace_hardirq_context(current)) { 490 in_hardirq = true; 228 in_hardirq = true; 491 lockdep_hardirq_exit(); !! 229 trace_hardirq_exit(); 492 } 230 } 493 231 494 lockdep_softirq_enter(); 232 lockdep_softirq_enter(); 495 233 496 return in_hardirq; 234 return in_hardirq; 497 } 235 } 498 236 499 static inline void lockdep_softirq_end(bool in 237 static inline void lockdep_softirq_end(bool in_hardirq) 500 { 238 { 501 lockdep_softirq_exit(); 239 lockdep_softirq_exit(); 502 240 503 if (in_hardirq) 241 if (in_hardirq) 504 lockdep_hardirq_enter(); !! 242 trace_hardirq_enter(); 505 } 243 } 506 #else 244 #else 507 static inline bool lockdep_softirq_start(void) 245 static inline bool lockdep_softirq_start(void) { return false; } 508 static inline void lockdep_softirq_end(bool in 246 static inline void lockdep_softirq_end(bool in_hardirq) { } 509 #endif 247 #endif 510 248 511 static void handle_softirqs(bool ksirqd) !! 249 asmlinkage __visible void __softirq_entry __do_softirq(void) 512 { 250 { 513 unsigned long end = jiffies + MAX_SOFT 251 unsigned long end = jiffies + MAX_SOFTIRQ_TIME; 514 unsigned long old_flags = current->fla 252 unsigned long old_flags = current->flags; 515 int max_restart = MAX_SOFTIRQ_RESTART; 253 int max_restart = MAX_SOFTIRQ_RESTART; 516 struct softirq_action *h; 254 struct softirq_action *h; 517 bool in_hardirq; 255 bool in_hardirq; 518 __u32 pending; 256 __u32 pending; 519 int softirq_bit; 257 int softirq_bit; 520 258 521 /* 259 /* 522 * Mask out PF_MEMALLOC as the current 260 * Mask out PF_MEMALLOC as the current task context is borrowed for the 523 * softirq. A softirq handled, such as 261 * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC 524 * again if the socket is related to s 262 * again if the socket is related to swapping. 525 */ 263 */ 526 current->flags &= ~PF_MEMALLOC; 264 current->flags &= ~PF_MEMALLOC; 527 265 528 pending = local_softirq_pending(); 266 pending = local_softirq_pending(); >> 267 account_irq_enter_time(current); 529 268 530 softirq_handle_begin(); !! 269 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); 531 in_hardirq = lockdep_softirq_start(); 270 in_hardirq = lockdep_softirq_start(); 532 account_softirq_enter(current); << 533 271 534 restart: 272 restart: 535 /* Reset the pending bitmask before en 273 /* Reset the pending bitmask before enabling irqs */ 536 set_softirq_pending(0); 274 set_softirq_pending(0); 537 275 538 local_irq_enable(); 276 local_irq_enable(); 539 277 540 h = softirq_vec; 278 h = softirq_vec; 541 279 542 while ((softirq_bit = ffs(pending))) { 280 while ((softirq_bit = ffs(pending))) { 543 unsigned int vec_nr; 281 unsigned int vec_nr; 544 int prev_count; 282 int prev_count; 545 283 546 h += softirq_bit - 1; 284 h += softirq_bit - 1; 547 285 548 vec_nr = h - softirq_vec; 286 vec_nr = h - softirq_vec; 549 prev_count = preempt_count(); 287 prev_count = preempt_count(); 550 288 551 kstat_incr_softirqs_this_cpu(v 289 kstat_incr_softirqs_this_cpu(vec_nr); 552 290 553 trace_softirq_entry(vec_nr); 291 trace_softirq_entry(vec_nr); 554 h->action(); !! 292 h->action(h); 555 trace_softirq_exit(vec_nr); 293 trace_softirq_exit(vec_nr); 556 if (unlikely(prev_count != pre 294 if (unlikely(prev_count != preempt_count())) { 557 pr_err("huh, entered s 295 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", 558 vec_nr, softirq 296 vec_nr, softirq_to_name[vec_nr], h->action, 559 prev_count, pre 297 prev_count, preempt_count()); 560 preempt_count_set(prev 298 preempt_count_set(prev_count); 561 } 299 } 562 h++; 300 h++; 563 pending >>= softirq_bit; 301 pending >>= softirq_bit; 564 } 302 } 565 303 566 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && !! 304 if (__this_cpu_read(ksoftirqd) == current) 567 rcu_softirq_qs(); 305 rcu_softirq_qs(); 568 << 569 local_irq_disable(); 306 local_irq_disable(); 570 307 571 pending = local_softirq_pending(); 308 pending = local_softirq_pending(); 572 if (pending) { 309 if (pending) { 573 if (time_before(jiffies, end) 310 if (time_before(jiffies, end) && !need_resched() && 574 --max_restart) 311 --max_restart) 575 goto restart; 312 goto restart; 576 313 577 wakeup_softirqd(); 314 wakeup_softirqd(); 578 } 315 } 579 316 580 account_softirq_exit(current); << 581 lockdep_softirq_end(in_hardirq); 317 lockdep_softirq_end(in_hardirq); 582 softirq_handle_end(); !! 318 account_irq_exit_time(current); >> 319 __local_bh_enable(SOFTIRQ_OFFSET); >> 320 WARN_ON_ONCE(in_interrupt()); 583 current_restore_flags(old_flags, PF_ME 321 current_restore_flags(old_flags, PF_MEMALLOC); 584 } 322 } 585 323 586 asmlinkage __visible void __softirq_entry __do !! 324 asmlinkage __visible void do_softirq(void) 587 { 325 { 588 handle_softirqs(false); !! 326 __u32 pending; >> 327 unsigned long flags; >> 328 >> 329 if (in_interrupt()) >> 330 return; >> 331 >> 332 local_irq_save(flags); >> 333 >> 334 pending = local_softirq_pending(); >> 335 >> 336 if (pending && !ksoftirqd_running(pending)) >> 337 do_softirq_own_stack(); >> 338 >> 339 local_irq_restore(flags); 589 } 340 } 590 341 591 /** !! 342 /* 592 * irq_enter_rcu - Enter an interrupt context !! 343 * Enter an interrupt context. 593 */ 344 */ 594 void irq_enter_rcu(void) !! 345 void irq_enter(void) 595 { 346 { 596 __irq_enter_raw(); !! 347 rcu_irq_enter(); 597 !! 348 if (is_idle_task(current) && !in_interrupt()) { 598 if (tick_nohz_full_cpu(smp_processor_i !! 349 /* 599 (is_idle_task(current) && (irq_cou !! 350 * Prevent raise_softirq from needlessly waking up ksoftirqd >> 351 * here, as softirq will be serviced on return from interrupt. >> 352 */ >> 353 local_bh_disable(); 600 tick_irq_enter(); 354 tick_irq_enter(); >> 355 _local_bh_enable(); >> 356 } 601 357 602 account_hardirq_enter(current); !! 358 __irq_enter(); 603 } 359 } 604 360 605 /** !! 361 static inline void invoke_softirq(void) 606 * irq_enter - Enter an interrupt context incl << 607 */ << 608 void irq_enter(void) << 609 { 362 { 610 ct_irq_enter(); !! 363 if (ksoftirqd_running(local_softirq_pending())) 611 irq_enter_rcu(); !! 364 return; >> 365 >> 366 if (!force_irqthreads) { >> 367 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK >> 368 /* >> 369 * We can safely execute softirq on the current stack if >> 370 * it is the irq stack, because it should be near empty >> 371 * at this stage. >> 372 */ >> 373 __do_softirq(); >> 374 #else >> 375 /* >> 376 * Otherwise, irq_exit() is called on the task stack that can >> 377 * be potentially deep already. So call softirq in its own stack >> 378 * to prevent from any overrun. >> 379 */ >> 380 do_softirq_own_stack(); >> 381 #endif >> 382 } else { >> 383 wakeup_softirqd(); >> 384 } 612 } 385 } 613 386 614 static inline void tick_irq_exit(void) 387 static inline void tick_irq_exit(void) 615 { 388 { 616 #ifdef CONFIG_NO_HZ_COMMON 389 #ifdef CONFIG_NO_HZ_COMMON 617 int cpu = smp_processor_id(); 390 int cpu = smp_processor_id(); 618 391 619 /* Make sure that timer wheel updates 392 /* Make sure that timer wheel updates are propagated */ 620 if ((sched_core_idle_cpu(cpu) && !need !! 393 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { 621 if (!in_hardirq()) !! 394 if (!in_irq()) 622 tick_nohz_irq_exit(); 395 tick_nohz_irq_exit(); 623 } 396 } 624 #endif 397 #endif 625 } 398 } 626 399 627 static inline void __irq_exit_rcu(void) !! 400 /* >> 401 * Exit an interrupt context. Process softirqs if needed and possible: >> 402 */ >> 403 void irq_exit(void) 628 { 404 { 629 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED 405 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED 630 local_irq_disable(); 406 local_irq_disable(); 631 #else 407 #else 632 lockdep_assert_irqs_disabled(); 408 lockdep_assert_irqs_disabled(); 633 #endif 409 #endif 634 account_hardirq_exit(current); !! 410 account_irq_exit_time(current); 635 preempt_count_sub(HARDIRQ_OFFSET); 411 preempt_count_sub(HARDIRQ_OFFSET); 636 if (!in_interrupt() && local_softirq_p 412 if (!in_interrupt() && local_softirq_pending()) 637 invoke_softirq(); 413 invoke_softirq(); 638 414 639 tick_irq_exit(); 415 tick_irq_exit(); 640 } !! 416 rcu_irq_exit(); 641 !! 417 trace_hardirq_exit(); /* must be last! */ 642 /** << 643 * irq_exit_rcu() - Exit an interrupt context << 644 * << 645 * Also processes softirqs if needed and possi << 646 */ << 647 void irq_exit_rcu(void) << 648 { << 649 __irq_exit_rcu(); << 650 /* must be last! */ << 651 lockdep_hardirq_exit(); << 652 } << 653 << 654 /** << 655 * irq_exit - Exit an interrupt context, updat << 656 * << 657 * Also processes softirqs if needed and possi << 658 */ << 659 void irq_exit(void) << 660 { << 661 __irq_exit_rcu(); << 662 ct_irq_exit(); << 663 /* must be last! */ << 664 lockdep_hardirq_exit(); << 665 } 418 } 666 419 667 /* 420 /* 668 * This function must run with irqs disabled! 421 * This function must run with irqs disabled! 669 */ 422 */ 670 inline void raise_softirq_irqoff(unsigned int 423 inline void raise_softirq_irqoff(unsigned int nr) 671 { 424 { 672 __raise_softirq_irqoff(nr); 425 __raise_softirq_irqoff(nr); 673 426 674 /* 427 /* 675 * If we're in an interrupt or softirq 428 * If we're in an interrupt or softirq, we're done 676 * (this also catches softirq-disabled 429 * (this also catches softirq-disabled code). We will 677 * actually run the softirq once we re 430 * actually run the softirq once we return from 678 * the irq or softirq. 431 * the irq or softirq. 679 * 432 * 680 * Otherwise we wake up ksoftirqd to m 433 * Otherwise we wake up ksoftirqd to make sure we 681 * schedule the softirq soon. 434 * schedule the softirq soon. 682 */ 435 */ 683 if (!in_interrupt() && should_wake_kso !! 436 if (!in_interrupt()) 684 wakeup_softirqd(); 437 wakeup_softirqd(); 685 } 438 } 686 439 687 void raise_softirq(unsigned int nr) 440 void raise_softirq(unsigned int nr) 688 { 441 { 689 unsigned long flags; 442 unsigned long flags; 690 443 691 local_irq_save(flags); 444 local_irq_save(flags); 692 raise_softirq_irqoff(nr); 445 raise_softirq_irqoff(nr); 693 local_irq_restore(flags); 446 local_irq_restore(flags); 694 } 447 } 695 448 696 void __raise_softirq_irqoff(unsigned int nr) 449 void __raise_softirq_irqoff(unsigned int nr) 697 { 450 { 698 lockdep_assert_irqs_disabled(); << 699 trace_softirq_raise(nr); 451 trace_softirq_raise(nr); 700 or_softirq_pending(1UL << nr); 452 or_softirq_pending(1UL << nr); 701 } 453 } 702 454 703 void open_softirq(int nr, void (*action)(void) !! 455 void open_softirq(int nr, void (*action)(struct softirq_action *)) 704 { 456 { 705 softirq_vec[nr].action = action; 457 softirq_vec[nr].action = action; 706 } 458 } 707 459 708 /* 460 /* 709 * Tasklets 461 * Tasklets 710 */ 462 */ 711 struct tasklet_head { 463 struct tasklet_head { 712 struct tasklet_struct *head; 464 struct tasklet_struct *head; 713 struct tasklet_struct **tail; 465 struct tasklet_struct **tail; 714 }; 466 }; 715 467 716 static DEFINE_PER_CPU(struct tasklet_head, tas 468 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); 717 static DEFINE_PER_CPU(struct tasklet_head, tas 469 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); 718 470 719 static void __tasklet_schedule_common(struct t 471 static void __tasklet_schedule_common(struct tasklet_struct *t, 720 struct t 472 struct tasklet_head __percpu *headp, 721 unsigned 473 unsigned int softirq_nr) 722 { 474 { 723 struct tasklet_head *head; 475 struct tasklet_head *head; 724 unsigned long flags; 476 unsigned long flags; 725 477 726 local_irq_save(flags); 478 local_irq_save(flags); 727 head = this_cpu_ptr(headp); 479 head = this_cpu_ptr(headp); 728 t->next = NULL; 480 t->next = NULL; 729 *head->tail = t; 481 *head->tail = t; 730 head->tail = &(t->next); 482 head->tail = &(t->next); 731 raise_softirq_irqoff(softirq_nr); 483 raise_softirq_irqoff(softirq_nr); 732 local_irq_restore(flags); 484 local_irq_restore(flags); 733 } 485 } 734 486 735 void __tasklet_schedule(struct tasklet_struct 487 void __tasklet_schedule(struct tasklet_struct *t) 736 { 488 { 737 __tasklet_schedule_common(t, &tasklet_ 489 __tasklet_schedule_common(t, &tasklet_vec, 738 TASKLET_SOFT 490 TASKLET_SOFTIRQ); 739 } 491 } 740 EXPORT_SYMBOL(__tasklet_schedule); 492 EXPORT_SYMBOL(__tasklet_schedule); 741 493 742 void __tasklet_hi_schedule(struct tasklet_stru 494 void __tasklet_hi_schedule(struct tasklet_struct *t) 743 { 495 { 744 __tasklet_schedule_common(t, &tasklet_ 496 __tasklet_schedule_common(t, &tasklet_hi_vec, 745 HI_SOFTIRQ); 497 HI_SOFTIRQ); 746 } 498 } 747 EXPORT_SYMBOL(__tasklet_hi_schedule); 499 EXPORT_SYMBOL(__tasklet_hi_schedule); 748 500 749 static bool tasklet_clear_sched(struct tasklet !! 501 static void tasklet_action_common(struct softirq_action *a, 750 { !! 502 struct tasklet_head *tl_head, 751 if (test_and_clear_bit(TASKLET_STATE_S << 752 wake_up_var(&t->state); << 753 return true; << 754 } << 755 << 756 WARN_ONCE(1, "tasklet SCHED state not << 757 t->use_callback ? "callback" << 758 t->use_callback ? (void *)t- << 759 << 760 return false; << 761 } << 762 << 763 static void tasklet_action_common(struct taskl << 764 unsigned int 503 unsigned int softirq_nr) 765 { 504 { 766 struct tasklet_struct *list; 505 struct tasklet_struct *list; 767 506 768 local_irq_disable(); 507 local_irq_disable(); 769 list = tl_head->head; 508 list = tl_head->head; 770 tl_head->head = NULL; 509 tl_head->head = NULL; 771 tl_head->tail = &tl_head->head; 510 tl_head->tail = &tl_head->head; 772 local_irq_enable(); 511 local_irq_enable(); 773 512 774 while (list) { 513 while (list) { 775 struct tasklet_struct *t = lis 514 struct tasklet_struct *t = list; 776 515 777 list = list->next; 516 list = list->next; 778 517 779 if (tasklet_trylock(t)) { 518 if (tasklet_trylock(t)) { 780 if (!atomic_read(&t->c 519 if (!atomic_read(&t->count)) { 781 if (tasklet_cl !! 520 if (!test_and_clear_bit(TASKLET_STATE_SCHED, 782 if (t- !! 521 &t->state)) 783 !! 522 BUG(); 784 !! 523 t->func(t->data); 785 << 786 } else << 787 << 788 << 789 << 790 } << 791 } << 792 tasklet_unlock 524 tasklet_unlock(t); 793 continue; 525 continue; 794 } 526 } 795 tasklet_unlock(t); 527 tasklet_unlock(t); 796 } 528 } 797 529 798 local_irq_disable(); 530 local_irq_disable(); 799 t->next = NULL; 531 t->next = NULL; 800 *tl_head->tail = t; 532 *tl_head->tail = t; 801 tl_head->tail = &t->next; 533 tl_head->tail = &t->next; 802 __raise_softirq_irqoff(softirq 534 __raise_softirq_irqoff(softirq_nr); 803 local_irq_enable(); 535 local_irq_enable(); 804 } 536 } 805 } 537 } 806 538 807 static __latent_entropy void tasklet_action(vo !! 539 static __latent_entropy void tasklet_action(struct softirq_action *a) 808 { 540 { 809 workqueue_softirq_action(false); !! 541 tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ); 810 tasklet_action_common(this_cpu_ptr(&ta << 811 } 542 } 812 543 813 static __latent_entropy void tasklet_hi_action !! 544 static __latent_entropy void tasklet_hi_action(struct softirq_action *a) 814 { 545 { 815 workqueue_softirq_action(true); !! 546 tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); 816 tasklet_action_common(this_cpu_ptr(&ta << 817 } 547 } 818 548 819 void tasklet_setup(struct tasklet_struct *t, << 820 void (*callback)(struct tas << 821 { << 822 t->next = NULL; << 823 t->state = 0; << 824 atomic_set(&t->count, 0); << 825 t->callback = callback; << 826 t->use_callback = true; << 827 t->data = 0; << 828 } << 829 EXPORT_SYMBOL(tasklet_setup); << 830 << 831 void tasklet_init(struct tasklet_struct *t, 549 void tasklet_init(struct tasklet_struct *t, 832 void (*func)(unsigned long), 550 void (*func)(unsigned long), unsigned long data) 833 { 551 { 834 t->next = NULL; 552 t->next = NULL; 835 t->state = 0; 553 t->state = 0; 836 atomic_set(&t->count, 0); 554 atomic_set(&t->count, 0); 837 t->func = func; 555 t->func = func; 838 t->use_callback = false; << 839 t->data = data; 556 t->data = data; 840 } 557 } 841 EXPORT_SYMBOL(tasklet_init); 558 EXPORT_SYMBOL(tasklet_init); 842 559 843 #if defined(CONFIG_SMP) || defined(CONFIG_PREE << 844 /* << 845 * Do not use in new code. Waiting for tasklet << 846 * error prone and should be avoided. << 847 */ << 848 void tasklet_unlock_spin_wait(struct tasklet_s << 849 { << 850 while (test_bit(TASKLET_STATE_RUN, &(t << 851 if (IS_ENABLED(CONFIG_PREEMPT_ << 852 /* << 853 * Prevent a live lock << 854 * interrupt processin << 855 * running. If the tas << 856 * then this has no ef << 857 * disable/enable danc << 858 */ << 859 local_bh_disable(); << 860 local_bh_enable(); << 861 } else { << 862 cpu_relax(); << 863 } << 864 } << 865 } << 866 EXPORT_SYMBOL(tasklet_unlock_spin_wait); << 867 #endif << 868 << 869 void tasklet_kill(struct tasklet_struct *t) 560 void tasklet_kill(struct tasklet_struct *t) 870 { 561 { 871 if (in_interrupt()) 562 if (in_interrupt()) 872 pr_notice("Attempt to kill tas 563 pr_notice("Attempt to kill tasklet from interrupt\n"); 873 564 874 while (test_and_set_bit(TASKLET_STATE_ !! 565 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { 875 wait_var_event(&t->state, !tes !! 566 do { 876 !! 567 yield(); >> 568 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); >> 569 } 877 tasklet_unlock_wait(t); 570 tasklet_unlock_wait(t); 878 tasklet_clear_sched(t); !! 571 clear_bit(TASKLET_STATE_SCHED, &t->state); 879 } 572 } 880 EXPORT_SYMBOL(tasklet_kill); 573 EXPORT_SYMBOL(tasklet_kill); 881 574 882 #if defined(CONFIG_SMP) || defined(CONFIG_PREE << 883 void tasklet_unlock(struct tasklet_struct *t) << 884 { << 885 smp_mb__before_atomic(); << 886 clear_bit(TASKLET_STATE_RUN, &t->state << 887 smp_mb__after_atomic(); << 888 wake_up_var(&t->state); << 889 } << 890 EXPORT_SYMBOL_GPL(tasklet_unlock); << 891 << 892 void tasklet_unlock_wait(struct tasklet_struct << 893 { << 894 wait_var_event(&t->state, !test_bit(TA << 895 } << 896 EXPORT_SYMBOL_GPL(tasklet_unlock_wait); << 897 #endif << 898 << 899 void __init softirq_init(void) 575 void __init softirq_init(void) 900 { 576 { 901 int cpu; 577 int cpu; 902 578 903 for_each_possible_cpu(cpu) { 579 for_each_possible_cpu(cpu) { 904 per_cpu(tasklet_vec, cpu).tail 580 per_cpu(tasklet_vec, cpu).tail = 905 &per_cpu(tasklet_vec, 581 &per_cpu(tasklet_vec, cpu).head; 906 per_cpu(tasklet_hi_vec, cpu).t 582 per_cpu(tasklet_hi_vec, cpu).tail = 907 &per_cpu(tasklet_hi_ve 583 &per_cpu(tasklet_hi_vec, cpu).head; 908 } 584 } 909 585 910 open_softirq(TASKLET_SOFTIRQ, tasklet_ 586 open_softirq(TASKLET_SOFTIRQ, tasklet_action); 911 open_softirq(HI_SOFTIRQ, tasklet_hi_ac 587 open_softirq(HI_SOFTIRQ, tasklet_hi_action); 912 } 588 } 913 589 914 static int ksoftirqd_should_run(unsigned int c 590 static int ksoftirqd_should_run(unsigned int cpu) 915 { 591 { 916 return local_softirq_pending(); 592 return local_softirq_pending(); 917 } 593 } 918 594 919 static void run_ksoftirqd(unsigned int cpu) 595 static void run_ksoftirqd(unsigned int cpu) 920 { 596 { 921 ksoftirqd_run_begin(); !! 597 local_irq_disable(); 922 if (local_softirq_pending()) { 598 if (local_softirq_pending()) { 923 /* 599 /* 924 * We can safely run softirq o 600 * We can safely run softirq on inline stack, as we are not deep 925 * in the task stack here. 601 * in the task stack here. 926 */ 602 */ 927 handle_softirqs(true); !! 603 __do_softirq(); 928 ksoftirqd_run_end(); !! 604 local_irq_enable(); 929 cond_resched(); 605 cond_resched(); 930 return; 606 return; 931 } 607 } 932 ksoftirqd_run_end(); !! 608 local_irq_enable(); 933 } 609 } 934 610 935 #ifdef CONFIG_HOTPLUG_CPU 611 #ifdef CONFIG_HOTPLUG_CPU 936 static int takeover_tasklets(unsigned int cpu) !! 612 /* >> 613 * tasklet_kill_immediate is called to remove a tasklet which can already be >> 614 * scheduled for execution on @cpu. >> 615 * >> 616 * Unlike tasklet_kill, this function removes the tasklet >> 617 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. >> 618 * >> 619 * When this function is called, @cpu must be in the CPU_DEAD state. >> 620 */ >> 621 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) 937 { 622 { 938 workqueue_softirq_dead(cpu); !! 623 struct tasklet_struct **i; 939 624 >> 625 BUG_ON(cpu_online(cpu)); >> 626 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); >> 627 >> 628 if (!test_bit(TASKLET_STATE_SCHED, &t->state)) >> 629 return; >> 630 >> 631 /* CPU is dead, so no lock needed. */ >> 632 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { >> 633 if (*i == t) { >> 634 *i = t->next; >> 635 /* If this was the tail element, move the tail ptr */ >> 636 if (*i == NULL) >> 637 per_cpu(tasklet_vec, cpu).tail = i; >> 638 return; >> 639 } >> 640 } >> 641 BUG(); >> 642 } >> 643 >> 644 static int takeover_tasklets(unsigned int cpu) >> 645 { 940 /* CPU is dead, so no lock needed. */ 646 /* CPU is dead, so no lock needed. */ 941 local_irq_disable(); 647 local_irq_disable(); 942 648 943 /* Find end, append list for that CPU. 649 /* Find end, append list for that CPU. */ 944 if (&per_cpu(tasklet_vec, cpu).head != 650 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { 945 *__this_cpu_read(tasklet_vec.t 651 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; 946 __this_cpu_write(tasklet_vec.t !! 652 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); 947 per_cpu(tasklet_vec, cpu).head 653 per_cpu(tasklet_vec, cpu).head = NULL; 948 per_cpu(tasklet_vec, cpu).tail 654 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; 949 } 655 } 950 raise_softirq_irqoff(TASKLET_SOFTIRQ); 656 raise_softirq_irqoff(TASKLET_SOFTIRQ); 951 657 952 if (&per_cpu(tasklet_hi_vec, cpu).head 658 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { 953 *__this_cpu_read(tasklet_hi_ve 659 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; 954 __this_cpu_write(tasklet_hi_ve 660 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); 955 per_cpu(tasklet_hi_vec, cpu).h 661 per_cpu(tasklet_hi_vec, cpu).head = NULL; 956 per_cpu(tasklet_hi_vec, cpu).t 662 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; 957 } 663 } 958 raise_softirq_irqoff(HI_SOFTIRQ); 664 raise_softirq_irqoff(HI_SOFTIRQ); 959 665 960 local_irq_enable(); 666 local_irq_enable(); 961 return 0; 667 return 0; 962 } 668 } 963 #else 669 #else 964 #define takeover_tasklets NULL 670 #define takeover_tasklets NULL 965 #endif /* CONFIG_HOTPLUG_CPU */ 671 #endif /* CONFIG_HOTPLUG_CPU */ 966 672 967 static struct smp_hotplug_thread softirq_threa 673 static struct smp_hotplug_thread softirq_threads = { 968 .store = &ksoftirqd, 674 .store = &ksoftirqd, 969 .thread_should_run = ksoftirqd_sh 675 .thread_should_run = ksoftirqd_should_run, 970 .thread_fn = run_ksoftirq 676 .thread_fn = run_ksoftirqd, 971 .thread_comm = "ksoftirqd/% 677 .thread_comm = "ksoftirqd/%u", 972 }; 678 }; 973 679 974 static __init int spawn_ksoftirqd(void) 680 static __init int spawn_ksoftirqd(void) 975 { 681 { 976 cpuhp_setup_state_nocalls(CPUHP_SOFTIR 682 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, 977 takeover_tas 683 takeover_tasklets); 978 BUG_ON(smpboot_register_percpu_thread( 684 BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); 979 685 980 return 0; 686 return 0; 981 } 687 } 982 early_initcall(spawn_ksoftirqd); 688 early_initcall(spawn_ksoftirqd); 983 689 984 /* 690 /* 985 * [ These __weak aliases are kept in a separa 691 * [ These __weak aliases are kept in a separate compilation unit, so that 986 * GCC does not inline them incorrectly. ] 692 * GCC does not inline them incorrectly. ] 987 */ 693 */ 988 694 989 int __init __weak early_irq_init(void) 695 int __init __weak early_irq_init(void) 990 { 696 { 991 return 0; 697 return 0; 992 } 698 } 993 699 994 int __init __weak arch_probe_nr_irqs(void) 700 int __init __weak arch_probe_nr_irqs(void) 995 { 701 { 996 return NR_IRQS_LEGACY; 702 return NR_IRQS_LEGACY; 997 } 703 } 998 704 999 int __init __weak arch_early_irq_init(void) 705 int __init __weak arch_early_irq_init(void) 1000 { 706 { 1001 return 0; 707 return 0; 1002 } 708 } 1003 709 1004 unsigned int __weak arch_dynirq_lower_bound(u 710 unsigned int __weak arch_dynirq_lower_bound(unsigned int from) 1005 { 711 { 1006 return from; 712 return from; 1007 } 713 } 1008 714
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.