1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Derived from arch/i386/kernel/irq.c 4 * Copyright (C) 1992 Linus Torvalds 5 * Adapted from arch/i386 by Gary Thomas 6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 7 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 8 * Copyright (C) 1996-2001 Cort Dougan 9 * Adapted for Power Macintosh by Paul Mackerras 10 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 11 * 12 * This file contains the code used by various IRQ handling routines: 13 * asking for different IRQ's should be done through these routines 14 * instead of just grabbing them. Thus setups with different IRQ numbers 15 * shouldn't result in any weird surprises, and installing new handlers 16 * should be easier. 17 */ 18 19 #undef DEBUG 20 21 #include <linux/export.h> 22 #include <linux/threads.h> 23 #include <linux/kernel_stat.h> 24 #include <linux/signal.h> 25 #include <linux/sched.h> 26 #include <linux/ptrace.h> 27 #include <linux/ioport.h> 28 #include <linux/interrupt.h> 29 #include <linux/timex.h> 30 #include <linux/init.h> 31 #include <linux/slab.h> 32 #include <linux/delay.h> 33 #include <linux/irq.h> 34 #include <linux/seq_file.h> 35 #include <linux/cpumask.h> 36 #include <linux/profile.h> 37 #include <linux/bitops.h> 38 #include <linux/list.h> 39 #include <linux/radix-tree.h> 40 #include <linux/mutex.h> 41 #include <linux/pci.h> 42 #include <linux/debugfs.h> 43 #include <linux/of.h> 44 #include <linux/of_irq.h> 45 #include <linux/vmalloc.h> 46 #include <linux/pgtable.h> 47 #include <linux/static_call.h> 48 49 #include <linux/uaccess.h> 50 #include <asm/interrupt.h> 51 #include <asm/io.h> 52 #include <asm/irq.h> 53 #include <asm/cache.h> 54 #include <asm/ptrace.h> 55 #include <asm/machdep.h> 56 #include <asm/udbg.h> 57 #include <asm/smp.h> 58 #include <asm/hw_irq.h> 59 #include <asm/softirq_stack.h> 60 #include <asm/ppc_asm.h> 61 62 #include <asm/paca.h> 63 #include <asm/firmware.h> 64 #include <asm/lv1call.h> 65 #include <asm/dbell.h> 66 #include <asm/trace.h> 67 #include <asm/cpu_has_feature.h> 68 69 int distribute_irqs = 1; 70 71 static inline void next_interrupt(struct pt_regs *regs) 72 { 73 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 74 WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)); 75 WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED); 76 } 77 78 /* 79 * We are responding to the next interrupt, so interrupt-off 80 * latencies should be reset here. 81 */ 82 lockdep_hardirq_exit(); 83 trace_hardirqs_on(); 84 trace_hardirqs_off(); 85 lockdep_hardirq_enter(); 86 } 87 88 static inline bool irq_happened_test_and_clear(u8 irq) 89 { 90 if (local_paca->irq_happened & irq) { 91 local_paca->irq_happened &= ~irq; 92 return true; 93 } 94 return false; 95 } 96 97 static __no_kcsan void __replay_soft_interrupts(void) 98 { 99 struct pt_regs regs; 100 101 /* 102 * We use local_paca rather than get_paca() to avoid all the 103 * debug_smp_processor_id() business in this low level function. 104 */ 105 106 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 107 WARN_ON_ONCE(mfmsr() & MSR_EE); 108 WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)); 109 WARN_ON(local_paca->irq_happened & PACA_IRQ_REPLAYING); 110 } 111 112 /* 113 * PACA_IRQ_REPLAYING prevents interrupt handlers from enabling 114 * MSR[EE] to get PMIs, which can result in more IRQs becoming 115 * pending. 116 */ 117 local_paca->irq_happened |= PACA_IRQ_REPLAYING; 118 119 ppc_save_regs(®s); 120 regs.softe = IRQS_ENABLED; 121 regs.msr |= MSR_EE; 122 123 /* 124 * Force the delivery of pending soft-disabled interrupts on PS3. 125 * Any HV call will have this side effect. 126 */ 127 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { 128 u64 tmp, tmp2; 129 lv1_get_version_info(&tmp, &tmp2); 130 } 131 132 /* 133 * Check if an hypervisor Maintenance interrupt happened. 134 * This is a higher priority interrupt than the others, so 135 * replay it first. 136 */ 137 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && 138 irq_happened_test_and_clear(PACA_IRQ_HMI)) { 139 regs.trap = INTERRUPT_HMI; 140 handle_hmi_exception(®s); 141 next_interrupt(®s); 142 } 143 144 if (irq_happened_test_and_clear(PACA_IRQ_DEC)) { 145 regs.trap = INTERRUPT_DECREMENTER; 146 timer_interrupt(®s); 147 next_interrupt(®s); 148 } 149 150 if (irq_happened_test_and_clear(PACA_IRQ_EE)) { 151 regs.trap = INTERRUPT_EXTERNAL; 152 do_IRQ(®s); 153 next_interrupt(®s); 154 } 155 156 if (IS_ENABLED(CONFIG_PPC_DOORBELL) && 157 irq_happened_test_and_clear(PACA_IRQ_DBELL)) { 158 regs.trap = INTERRUPT_DOORBELL; 159 doorbell_exception(®s); 160 next_interrupt(®s); 161 } 162 163 /* Book3E does not support soft-masking PMI interrupts */ 164 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && 165 irq_happened_test_and_clear(PACA_IRQ_PMI)) { 166 regs.trap = INTERRUPT_PERFMON; 167 performance_monitor_exception(®s); 168 next_interrupt(®s); 169 } 170 171 local_paca->irq_happened &= ~PACA_IRQ_REPLAYING; 172 } 173 174 __no_kcsan void replay_soft_interrupts(void) 175 { 176 irq_enter(); /* See comment in arch_local_irq_restore */ 177 __replay_soft_interrupts(); 178 irq_exit(); 179 } 180 181 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP) 182 static inline __no_kcsan void replay_soft_interrupts_irqrestore(void) 183 { 184 unsigned long kuap_state = get_kuap(); 185 186 /* 187 * Check if anything calls local_irq_enable/restore() when KUAP is 188 * disabled (user access enabled). We handle that case here by saving 189 * and re-locking AMR but we shouldn't get here in the first place, 190 * hence the warning. 191 */ 192 kuap_assert_locked(); 193 194 if (kuap_state != AMR_KUAP_BLOCKED) 195 set_kuap(AMR_KUAP_BLOCKED); 196 197 __replay_soft_interrupts(); 198 199 if (kuap_state != AMR_KUAP_BLOCKED) 200 set_kuap(kuap_state); 201 } 202 #else 203 #define replay_soft_interrupts_irqrestore() __replay_soft_interrupts() 204 #endif 205 206 notrace __no_kcsan void arch_local_irq_restore(unsigned long mask) 207 { 208 unsigned char irq_happened; 209 210 /* Write the new soft-enabled value if it is a disable */ 211 if (mask) { 212 irq_soft_mask_set(mask); 213 return; 214 } 215 216 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 217 WARN_ON_ONCE(in_nmi()); 218 WARN_ON_ONCE(in_hardirq()); 219 WARN_ON_ONCE(local_paca->irq_happened & PACA_IRQ_REPLAYING); 220 } 221 222 again: 223 /* 224 * After the stb, interrupts are unmasked and there are no interrupts 225 * pending replay. The restart sequence makes this atomic with 226 * respect to soft-masked interrupts. If this was just a simple code 227 * sequence, a soft-masked interrupt could become pending right after 228 * the comparison and before the stb. 229 * 230 * This allows interrupts to be unmasked without hard disabling, and 231 * also without new hard interrupts coming in ahead of pending ones. 232 */ 233 asm goto( 234 "1: \n" 235 " lbz 9,%0(13) \n" 236 " cmpwi 9,0 \n" 237 " bne %l[happened] \n" 238 " stb 9,%1(13) \n" 239 "2: \n" 240 RESTART_TABLE(1b, 2b, 1b) 241 : : "i" (offsetof(struct paca_struct, irq_happened)), 242 "i" (offsetof(struct paca_struct, irq_soft_mask)) 243 : "cr0", "r9" 244 : happened); 245 246 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 247 WARN_ON_ONCE(!(mfmsr() & MSR_EE)); 248 249 /* 250 * If we came here from the replay below, we might have a preempt 251 * pending (due to preempt_enable_no_resched()). Have to check now. 252 */ 253 preempt_check_resched(); 254 255 return; 256 257 happened: 258 irq_happened = READ_ONCE(local_paca->irq_happened); 259 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 260 WARN_ON_ONCE(!irq_happened); 261 262 if (irq_happened == PACA_IRQ_HARD_DIS) { 263 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 264 WARN_ON_ONCE(mfmsr() & MSR_EE); 265 irq_soft_mask_set(IRQS_ENABLED); 266 local_paca->irq_happened = 0; 267 __hard_irq_enable(); 268 preempt_check_resched(); 269 return; 270 } 271 272 /* Have interrupts to replay, need to hard disable first */ 273 if (!(irq_happened & PACA_IRQ_HARD_DIS)) { 274 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 275 if (!(mfmsr() & MSR_EE)) { 276 /* 277 * An interrupt could have come in and cleared 278 * MSR[EE] and set IRQ_HARD_DIS, so check 279 * IRQ_HARD_DIS again and warn if it is still 280 * clear. 281 */ 282 irq_happened = READ_ONCE(local_paca->irq_happened); 283 WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS)); 284 } 285 } 286 __hard_irq_disable(); 287 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 288 } else { 289 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 290 if (WARN_ON_ONCE(mfmsr() & MSR_EE)) 291 __hard_irq_disable(); 292 } 293 } 294 295 /* 296 * Disable preempt here, so that the below preempt_enable will 297 * perform resched if required (a replayed interrupt may set 298 * need_resched). 299 */ 300 preempt_disable(); 301 irq_soft_mask_set(IRQS_ALL_DISABLED); 302 trace_hardirqs_off(); 303 304 /* 305 * Now enter interrupt context. The interrupt handlers themselves 306 * also call irq_enter/exit (which is okay, they can nest). But call 307 * it here now to hold off softirqs until the below irq_exit(). If 308 * we allowed replayed handlers to run softirqs, that enables irqs, 309 * which must replay interrupts, which recurses in here and makes 310 * things more complicated. The recursion is limited to 2, and it can 311 * be made to work, but it's complicated. 312 * 313 * local_bh_disable can not be used here because interrupts taken in 314 * idle are not in the right context (RCU, tick, etc) to run softirqs 315 * so irq_enter must be called. 316 */ 317 irq_enter(); 318 319 replay_soft_interrupts_irqrestore(); 320 321 irq_exit(); 322 323 if (unlikely(local_paca->irq_happened != PACA_IRQ_HARD_DIS)) { 324 /* 325 * The softirq processing in irq_exit() may enable interrupts 326 * temporarily, which can result in MSR[EE] being enabled and 327 * more irqs becoming pending. Go around again if that happens. 328 */ 329 trace_hardirqs_on(); 330 preempt_enable_no_resched(); 331 goto again; 332 } 333 334 trace_hardirqs_on(); 335 irq_soft_mask_set(IRQS_ENABLED); 336 local_paca->irq_happened = 0; 337 __hard_irq_enable(); 338 preempt_enable(); 339 } 340 EXPORT_SYMBOL(arch_local_irq_restore); 341 342 /* 343 * This is a helper to use when about to go into idle low-power 344 * when the latter has the side effect of re-enabling interrupts 345 * (such as calling H_CEDE under pHyp). 346 * 347 * You call this function with interrupts soft-disabled (this is 348 * already the case when ppc_md.power_save is called). The function 349 * will return whether to enter power save or just return. 350 * 351 * In the former case, it will have generally sanitized the lazy irq 352 * state, and in the latter case it will leave with interrupts hard 353 * disabled and marked as such, so the local_irq_enable() call 354 * in arch_cpu_idle() will properly re-enable everything. 355 */ 356 __cpuidle bool prep_irq_for_idle(void) 357 { 358 /* 359 * First we need to hard disable to ensure no interrupt 360 * occurs before we effectively enter the low power state 361 */ 362 __hard_irq_disable(); 363 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 364 365 /* 366 * If anything happened while we were soft-disabled, 367 * we return now and do not enter the low power state. 368 */ 369 if (lazy_irq_pending()) 370 return false; 371 372 /* 373 * Mark interrupts as soft-enabled and clear the 374 * PACA_IRQ_HARD_DIS from the pending mask since we 375 * are about to hard enable as well as a side effect 376 * of entering the low power state. 377 */ 378 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; 379 irq_soft_mask_set(IRQS_ENABLED); 380 381 /* Tell the caller to enter the low power state */ 382 return true; 383 } 384 385 #ifdef CONFIG_PPC_BOOK3S 386 /* 387 * This is for idle sequences that return with IRQs off, but the 388 * idle state itself wakes on interrupt. Tell the irq tracer that 389 * IRQs are enabled for the duration of idle so it does not get long 390 * off times. Must be paired with fini_irq_for_idle_irqsoff. 391 */ 392 bool prep_irq_for_idle_irqsoff(void) 393 { 394 WARN_ON(!irqs_disabled()); 395 396 /* 397 * First we need to hard disable to ensure no interrupt 398 * occurs before we effectively enter the low power state 399 */ 400 __hard_irq_disable(); 401 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 402 403 /* 404 * If anything happened while we were soft-disabled, 405 * we return now and do not enter the low power state. 406 */ 407 if (lazy_irq_pending()) 408 return false; 409 410 /* Tell lockdep we are about to re-enable */ 411 trace_hardirqs_on(); 412 413 return true; 414 } 415 416 /* 417 * Take the SRR1 wakeup reason, index into this table to find the 418 * appropriate irq_happened bit. 419 * 420 * Sytem reset exceptions taken in idle state also come through here, 421 * but they are NMI interrupts so do not need to wait for IRQs to be 422 * restored, and should be taken as early as practical. These are marked 423 * with 0xff in the table. The Power ISA specifies 0100b as the system 424 * reset interrupt reason. 425 */ 426 #define IRQ_SYSTEM_RESET 0xff 427 428 static const u8 srr1_to_lazyirq[0x10] = { 429 0, 0, 0, 430 PACA_IRQ_DBELL, 431 IRQ_SYSTEM_RESET, 432 PACA_IRQ_DBELL, 433 PACA_IRQ_DEC, 434 0, 435 PACA_IRQ_EE, 436 PACA_IRQ_EE, 437 PACA_IRQ_HMI, 438 0, 0, 0, 0, 0 }; 439 440 void replay_system_reset(void) 441 { 442 struct pt_regs regs; 443 444 ppc_save_regs(®s); 445 regs.trap = 0x100; 446 get_paca()->in_nmi = 1; 447 system_reset_exception(®s); 448 get_paca()->in_nmi = 0; 449 } 450 EXPORT_SYMBOL_GPL(replay_system_reset); 451 452 void irq_set_pending_from_srr1(unsigned long srr1) 453 { 454 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18; 455 u8 reason = srr1_to_lazyirq[idx]; 456 457 /* 458 * Take the system reset now, which is immediately after registers 459 * are restored from idle. It's an NMI, so interrupts need not be 460 * re-enabled before it is taken. 461 */ 462 if (unlikely(reason == IRQ_SYSTEM_RESET)) { 463 replay_system_reset(); 464 return; 465 } 466 467 if (reason == PACA_IRQ_DBELL) { 468 /* 469 * When doorbell triggers a system reset wakeup, the message 470 * is not cleared, so if the doorbell interrupt is replayed 471 * and the IPI handled, the doorbell interrupt would still 472 * fire when EE is enabled. 473 * 474 * To avoid taking the superfluous doorbell interrupt, 475 * execute a msgclr here before the interrupt is replayed. 476 */ 477 ppc_msgclr(PPC_DBELL_MSGTYPE); 478 } 479 480 /* 481 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0, 482 * so this can be called unconditionally with the SRR1 wake 483 * reason as returned by the idle code, which uses 0 to mean no 484 * interrupt. 485 * 486 * If a future CPU was to designate this as an interrupt reason, 487 * then a new index for no interrupt must be assigned. 488 */ 489 local_paca->irq_happened |= reason; 490 } 491 #endif /* CONFIG_PPC_BOOK3S */ 492 493 /* 494 * Force a replay of the external interrupt handler on this CPU. 495 */ 496 void force_external_irq_replay(void) 497 { 498 /* 499 * This must only be called with interrupts soft-disabled, 500 * the replay will happen when re-enabling. 501 */ 502 WARN_ON(!arch_irqs_disabled()); 503 504 /* 505 * Interrupts must always be hard disabled before irq_happened is 506 * modified (to prevent lost update in case of interrupt between 507 * load and store). 508 */ 509 __hard_irq_disable(); 510 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 511 512 /* Indicate in the PACA that we have an interrupt to replay */ 513 local_paca->irq_happened |= PACA_IRQ_EE; 514 } 515 516 static int __init setup_noirqdistrib(char *str) 517 { 518 distribute_irqs = 0; 519 return 1; 520 } 521 522 __setup("noirqdistrib", setup_noirqdistrib); 523
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.