1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Support for the interrupt controllers found on Power Macintosh, 4 * currently Apple's "Grand Central" interrupt controller in all 5 * its incarnations. OpenPIC support used on newer machines is 6 * in a separate file 7 * 8 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) 9 * Copyright (C) 2005 Benjamin Herrenschmidt (benh@kernel.crashing.org) 10 * IBM, Corp. 11 */ 12 13 #include <linux/stddef.h> 14 #include <linux/init.h> 15 #include <linux/sched.h> 16 #include <linux/signal.h> 17 #include <linux/pci.h> 18 #include <linux/interrupt.h> 19 #include <linux/syscore_ops.h> 20 #include <linux/adb.h> 21 #include <linux/minmax.h> 22 #include <linux/pmu.h> 23 #include <linux/irqdomain.h> 24 #include <linux/of_address.h> 25 #include <linux/of_irq.h> 26 27 #include <asm/sections.h> 28 #include <asm/io.h> 29 #include <asm/smp.h> 30 #include <asm/pci-bridge.h> 31 #include <asm/time.h> 32 #include <asm/pmac_feature.h> 33 #include <asm/mpic.h> 34 #include <asm/xmon.h> 35 36 #include "pmac.h" 37 38 #ifdef CONFIG_PPC32 39 struct pmac_irq_hw { 40 unsigned int event; 41 unsigned int enable; 42 unsigned int ack; 43 unsigned int level; 44 }; 45 46 /* Workaround flags for 32bit powermac machines */ 47 unsigned int of_irq_workarounds; 48 struct device_node *of_irq_dflt_pic; 49 50 /* Default addresses */ 51 static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4]; 52 53 static int max_irqs; 54 static int max_real_irqs; 55 56 static DEFINE_RAW_SPINLOCK(pmac_pic_lock); 57 58 /* The max irq number this driver deals with is 128; see max_irqs */ 59 static DECLARE_BITMAP(ppc_lost_interrupts, 128); 60 static DECLARE_BITMAP(ppc_cached_irq_mask, 128); 61 static int pmac_irq_cascade = -1; 62 static struct irq_domain *pmac_pic_host; 63 64 static void __pmac_retrigger(unsigned int irq_nr) 65 { 66 if (irq_nr >= max_real_irqs && pmac_irq_cascade > 0) { 67 __set_bit(irq_nr, ppc_lost_interrupts); 68 irq_nr = pmac_irq_cascade; 69 mb(); 70 } 71 if (!__test_and_set_bit(irq_nr, ppc_lost_interrupts)) { 72 atomic_inc(&ppc_n_lost_interrupts); 73 set_dec(1); 74 } 75 } 76 77 static void pmac_mask_and_ack_irq(struct irq_data *d) 78 { 79 unsigned int src = irqd_to_hwirq(d); 80 unsigned long bit = 1UL << (src & 0x1f); 81 int i = src >> 5; 82 unsigned long flags; 83 84 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 85 __clear_bit(src, ppc_cached_irq_mask); 86 if (__test_and_clear_bit(src, ppc_lost_interrupts)) 87 atomic_dec(&ppc_n_lost_interrupts); 88 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); 89 out_le32(&pmac_irq_hw[i]->ack, bit); 90 do { 91 /* make sure ack gets to controller before we enable 92 interrupts */ 93 mb(); 94 } while((in_le32(&pmac_irq_hw[i]->enable) & bit) 95 != (ppc_cached_irq_mask[i] & bit)); 96 raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); 97 } 98 99 static void pmac_ack_irq(struct irq_data *d) 100 { 101 unsigned int src = irqd_to_hwirq(d); 102 unsigned long bit = 1UL << (src & 0x1f); 103 int i = src >> 5; 104 unsigned long flags; 105 106 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 107 if (__test_and_clear_bit(src, ppc_lost_interrupts)) 108 atomic_dec(&ppc_n_lost_interrupts); 109 out_le32(&pmac_irq_hw[i]->ack, bit); 110 (void)in_le32(&pmac_irq_hw[i]->ack); 111 raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); 112 } 113 114 static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) 115 { 116 unsigned long bit = 1UL << (irq_nr & 0x1f); 117 int i = irq_nr >> 5; 118 119 if ((unsigned)irq_nr >= max_irqs) 120 return; 121 122 /* enable unmasked interrupts */ 123 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); 124 125 do { 126 /* make sure mask gets to controller before we 127 return to user */ 128 mb(); 129 } while((in_le32(&pmac_irq_hw[i]->enable) & bit) 130 != (ppc_cached_irq_mask[i] & bit)); 131 132 /* 133 * Unfortunately, setting the bit in the enable register 134 * when the device interrupt is already on *doesn't* set 135 * the bit in the flag register or request another interrupt. 136 */ 137 if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level)) 138 __pmac_retrigger(irq_nr); 139 } 140 141 /* When an irq gets requested for the first client, if it's an 142 * edge interrupt, we clear any previous one on the controller 143 */ 144 static unsigned int pmac_startup_irq(struct irq_data *d) 145 { 146 unsigned long flags; 147 unsigned int src = irqd_to_hwirq(d); 148 unsigned long bit = 1UL << (src & 0x1f); 149 int i = src >> 5; 150 151 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 152 if (!irqd_is_level_type(d)) 153 out_le32(&pmac_irq_hw[i]->ack, bit); 154 __set_bit(src, ppc_cached_irq_mask); 155 __pmac_set_irq_mask(src, 0); 156 raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); 157 158 return 0; 159 } 160 161 static void pmac_mask_irq(struct irq_data *d) 162 { 163 unsigned long flags; 164 unsigned int src = irqd_to_hwirq(d); 165 166 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 167 __clear_bit(src, ppc_cached_irq_mask); 168 __pmac_set_irq_mask(src, 1); 169 raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); 170 } 171 172 static void pmac_unmask_irq(struct irq_data *d) 173 { 174 unsigned long flags; 175 unsigned int src = irqd_to_hwirq(d); 176 177 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 178 __set_bit(src, ppc_cached_irq_mask); 179 __pmac_set_irq_mask(src, 0); 180 raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); 181 } 182 183 static int pmac_retrigger(struct irq_data *d) 184 { 185 unsigned long flags; 186 187 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 188 __pmac_retrigger(irqd_to_hwirq(d)); 189 raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); 190 return 1; 191 } 192 193 static struct irq_chip pmac_pic = { 194 .name = "PMAC-PIC", 195 .irq_startup = pmac_startup_irq, 196 .irq_mask = pmac_mask_irq, 197 .irq_ack = pmac_ack_irq, 198 .irq_mask_ack = pmac_mask_and_ack_irq, 199 .irq_unmask = pmac_unmask_irq, 200 .irq_retrigger = pmac_retrigger, 201 }; 202 203 static irqreturn_t gatwick_action(int cpl, void *dev_id) 204 { 205 unsigned long flags; 206 int irq, bits; 207 int rc = IRQ_NONE; 208 209 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 210 for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) { 211 int i = irq >> 5; 212 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; 213 bits |= in_le32(&pmac_irq_hw[i]->level); 214 bits &= ppc_cached_irq_mask[i]; 215 if (bits == 0) 216 continue; 217 irq += __ilog2(bits); 218 raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); 219 generic_handle_irq(irq); 220 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 221 rc = IRQ_HANDLED; 222 } 223 raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); 224 return rc; 225 } 226 227 static unsigned int pmac_pic_get_irq(void) 228 { 229 int irq; 230 unsigned long bits = 0; 231 unsigned long flags; 232 233 #ifdef CONFIG_PPC_PMAC32_PSURGE 234 /* IPI's are a hack on the powersurge -- Cort */ 235 if (smp_processor_id() != 0) { 236 return psurge_secondary_virq; 237 } 238 #endif /* CONFIG_PPC_PMAC32_PSURGE */ 239 raw_spin_lock_irqsave(&pmac_pic_lock, flags); 240 for (irq = max_real_irqs; (irq -= 32) >= 0; ) { 241 int i = irq >> 5; 242 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; 243 bits |= in_le32(&pmac_irq_hw[i]->level); 244 bits &= ppc_cached_irq_mask[i]; 245 if (bits == 0) 246 continue; 247 irq += __ilog2(bits); 248 break; 249 } 250 raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); 251 if (unlikely(irq < 0)) 252 return 0; 253 return irq_linear_revmap(pmac_pic_host, irq); 254 } 255 256 static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node, 257 enum irq_domain_bus_token bus_token) 258 { 259 /* We match all, we don't always have a node anyway */ 260 return 1; 261 } 262 263 static int pmac_pic_host_map(struct irq_domain *h, unsigned int virq, 264 irq_hw_number_t hw) 265 { 266 if (hw >= max_irqs) 267 return -EINVAL; 268 269 /* Mark level interrupts, set delayed disable for edge ones and set 270 * handlers 271 */ 272 irq_set_status_flags(virq, IRQ_LEVEL); 273 irq_set_chip_and_handler(virq, &pmac_pic, handle_level_irq); 274 return 0; 275 } 276 277 static const struct irq_domain_ops pmac_pic_host_ops = { 278 .match = pmac_pic_host_match, 279 .map = pmac_pic_host_map, 280 .xlate = irq_domain_xlate_onecell, 281 }; 282 283 static void __init pmac_pic_probe_oldstyle(void) 284 { 285 int i; 286 struct device_node *master = NULL; 287 struct device_node *slave = NULL; 288 u8 __iomem *addr; 289 struct resource r; 290 291 /* Set our get_irq function */ 292 ppc_md.get_irq = pmac_pic_get_irq; 293 294 /* 295 * Find the interrupt controller type & node 296 */ 297 298 if ((master = of_find_node_by_name(NULL, "gc")) != NULL) { 299 max_irqs = max_real_irqs = 32; 300 } else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) { 301 max_irqs = max_real_irqs = 32; 302 /* We might have a second cascaded ohare */ 303 slave = of_find_node_by_name(NULL, "pci106b,7"); 304 if (slave) 305 max_irqs = 64; 306 } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { 307 max_irqs = max_real_irqs = 64; 308 309 /* We might have a second cascaded heathrow */ 310 311 /* Compensate for of_node_put() in of_find_node_by_name() */ 312 of_node_get(master); 313 slave = of_find_node_by_name(master, "mac-io"); 314 315 /* Check ordering of master & slave */ 316 if (of_device_is_compatible(master, "gatwick")) { 317 BUG_ON(slave == NULL); 318 swap(master, slave); 319 } 320 321 /* We found a slave */ 322 if (slave) 323 max_irqs = 128; 324 } 325 BUG_ON(master == NULL); 326 327 /* 328 * Allocate an irq host 329 */ 330 pmac_pic_host = irq_domain_add_linear(master, max_irqs, 331 &pmac_pic_host_ops, NULL); 332 BUG_ON(pmac_pic_host == NULL); 333 irq_set_default_host(pmac_pic_host); 334 335 /* Get addresses of first controller if we have a node for it */ 336 BUG_ON(of_address_to_resource(master, 0, &r)); 337 338 /* Map interrupts of primary controller */ 339 addr = (u8 __iomem *) ioremap(r.start, 0x40); 340 i = 0; 341 pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) 342 (addr + 0x20); 343 if (max_real_irqs > 32) 344 pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) 345 (addr + 0x10); 346 of_node_put(master); 347 348 printk(KERN_INFO "irq: Found primary Apple PIC %pOF for %d irqs\n", 349 master, max_real_irqs); 350 351 /* Map interrupts of cascaded controller */ 352 if (slave && !of_address_to_resource(slave, 0, &r)) { 353 addr = (u8 __iomem *)ioremap(r.start, 0x40); 354 pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) 355 (addr + 0x20); 356 if (max_irqs > 64) 357 pmac_irq_hw[i++] = 358 (volatile struct pmac_irq_hw __iomem *) 359 (addr + 0x10); 360 pmac_irq_cascade = irq_of_parse_and_map(slave, 0); 361 362 printk(KERN_INFO "irq: Found slave Apple PIC %pOF for %d irqs" 363 " cascade: %d\n", slave, 364 max_irqs - max_real_irqs, pmac_irq_cascade); 365 } 366 of_node_put(slave); 367 368 /* Disable all interrupts in all controllers */ 369 for (i = 0; i * 32 < max_irqs; ++i) 370 out_le32(&pmac_irq_hw[i]->enable, 0); 371 372 /* Hookup cascade irq */ 373 if (slave && pmac_irq_cascade) { 374 if (request_irq(pmac_irq_cascade, gatwick_action, 375 IRQF_NO_THREAD, "cascade", NULL)) 376 pr_err("Failed to register cascade interrupt\n"); 377 } 378 379 printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs); 380 #ifdef CONFIG_XMON 381 i = irq_create_mapping(NULL, 20); 382 if (request_irq(i, xmon_irq, IRQF_NO_THREAD, "NMI - XMON", NULL)) 383 pr_err("Failed to register NMI-XMON interrupt\n"); 384 #endif 385 } 386 387 int of_irq_parse_oldworld(const struct device_node *device, int index, 388 struct of_phandle_args *out_irq) 389 { 390 const u32 *ints = NULL; 391 int intlen; 392 393 /* 394 * Old machines just have a list of interrupt numbers 395 * and no interrupt-controller nodes. We also have dodgy 396 * cases where the APPL,interrupts property is completely 397 * missing behind pci-pci bridges and we have to get it 398 * from the parent (the bridge itself, as apple just wired 399 * everything together on these) 400 */ 401 while (device) { 402 ints = of_get_property(device, "AAPL,interrupts", &intlen); 403 if (ints != NULL) 404 break; 405 device = device->parent; 406 if (!of_node_is_type(device, "pci")) 407 break; 408 } 409 if (ints == NULL) 410 return -EINVAL; 411 intlen /= sizeof(u32); 412 413 if (index >= intlen) 414 return -EINVAL; 415 416 out_irq->np = NULL; 417 out_irq->args[0] = ints[index]; 418 out_irq->args_count = 1; 419 420 return 0; 421 } 422 #endif /* CONFIG_PPC32 */ 423 424 static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) 425 { 426 #if defined(CONFIG_XMON) && defined(CONFIG_PPC32) 427 struct device_node* pswitch; 428 int nmi_irq; 429 430 pswitch = of_find_node_by_name(NULL, "programmer-switch"); 431 if (pswitch) { 432 nmi_irq = irq_of_parse_and_map(pswitch, 0); 433 if (nmi_irq) { 434 mpic_irq_set_priority(nmi_irq, 9); 435 if (request_irq(nmi_irq, xmon_irq, IRQF_NO_THREAD, 436 "NMI - XMON", NULL)) 437 pr_err("Failed to register NMI-XMON interrupt\n"); 438 } 439 of_node_put(pswitch); 440 } 441 #endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */ 442 } 443 444 static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, 445 int master) 446 { 447 const char *name = master ? " MPIC 1 " : " MPIC 2 "; 448 struct mpic *mpic; 449 unsigned int flags = master ? 0 : MPIC_SECONDARY; 450 451 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); 452 453 if (of_property_read_bool(np, "big-endian")) 454 flags |= MPIC_BIG_ENDIAN; 455 456 /* Primary Big Endian means HT interrupts. This is quite dodgy 457 * but works until I find a better way 458 */ 459 if (master && (flags & MPIC_BIG_ENDIAN)) 460 flags |= MPIC_U3_HT_IRQS; 461 462 mpic = mpic_alloc(np, 0, flags, 0, 0, name); 463 if (mpic == NULL) 464 return NULL; 465 466 mpic_init(mpic); 467 468 return mpic; 469 } 470 471 static int __init pmac_pic_probe_mpic(void) 472 { 473 struct mpic *mpic1, *mpic2; 474 struct device_node *np, *master = NULL, *slave = NULL; 475 476 /* We can have up to 2 MPICs cascaded */ 477 for_each_node_by_type(np, "open-pic") { 478 if (master == NULL && !of_property_present(np, "interrupts")) 479 master = of_node_get(np); 480 else if (slave == NULL) 481 slave = of_node_get(np); 482 if (master && slave) { 483 of_node_put(np); 484 break; 485 } 486 } 487 488 /* Check for bogus setups */ 489 if (master == NULL && slave != NULL) { 490 master = slave; 491 slave = NULL; 492 } 493 494 /* Not found, default to good old pmac pic */ 495 if (master == NULL) 496 return -ENODEV; 497 498 /* Set master handler */ 499 ppc_md.get_irq = mpic_get_irq; 500 501 /* Setup master */ 502 mpic1 = pmac_setup_one_mpic(master, 1); 503 BUG_ON(mpic1 == NULL); 504 505 /* Install NMI if any */ 506 pmac_pic_setup_mpic_nmi(mpic1); 507 508 of_node_put(master); 509 510 /* Set up a cascaded controller, if present */ 511 if (slave) { 512 mpic2 = pmac_setup_one_mpic(slave, 0); 513 if (mpic2 == NULL) 514 printk(KERN_ERR "Failed to setup slave MPIC\n"); 515 of_node_put(slave); 516 } 517 518 return 0; 519 } 520 521 522 void __init pmac_pic_init(void) 523 { 524 /* We configure the OF parsing based on our oldworld vs. newworld 525 * platform type and whether we were booted by BootX. 526 */ 527 #ifdef CONFIG_PPC32 528 if (!pmac_newworld) 529 of_irq_workarounds |= OF_IMAP_OLDWORLD_MAC; 530 if (of_property_read_bool(of_chosen, "linux,bootx")) 531 of_irq_workarounds |= OF_IMAP_NO_PHANDLE; 532 533 /* If we don't have phandles on a newworld, then try to locate a 534 * default interrupt controller (happens when booting with BootX). 535 * We do a first match here, hopefully, that only ever happens on 536 * machines with one controller. 537 */ 538 if (pmac_newworld && (of_irq_workarounds & OF_IMAP_NO_PHANDLE)) { 539 struct device_node *np; 540 541 for_each_node_with_property(np, "interrupt-controller") { 542 /* Skip /chosen/interrupt-controller */ 543 if (of_node_name_eq(np, "chosen")) 544 continue; 545 /* It seems like at least one person wants 546 * to use BootX on a machine with an AppleKiwi 547 * controller which happens to pretend to be an 548 * interrupt controller too. */ 549 if (of_node_name_eq(np, "AppleKiwi")) 550 continue; 551 /* I think we found one ! */ 552 of_irq_dflt_pic = np; 553 break; 554 } 555 } 556 #endif /* CONFIG_PPC32 */ 557 558 /* We first try to detect Apple's new Core99 chipset, since mac-io 559 * is quite different on those machines and contains an IBM MPIC2. 560 */ 561 if (pmac_pic_probe_mpic() == 0) 562 return; 563 564 #ifdef CONFIG_PPC32 565 pmac_pic_probe_oldstyle(); 566 #endif 567 } 568 569 #if defined(CONFIG_PM) && defined(CONFIG_PPC32) 570 /* 571 * These procedures are used in implementing sleep on the powerbooks. 572 * sleep_save_intrs() saves the states of all interrupt enables 573 * and disables all interrupts except for the nominated one. 574 * sleep_restore_intrs() restores the states of all interrupt enables. 575 */ 576 unsigned long sleep_save_mask[2]; 577 578 /* This used to be passed by the PMU driver but that link got 579 * broken with the new driver model. We use this tweak for now... 580 * We really want to do things differently though... 581 */ 582 static int pmacpic_find_viaint(void) 583 { 584 int viaint = -1; 585 586 #ifdef CONFIG_ADB_PMU 587 struct device_node *np; 588 589 if (pmu_get_model() != PMU_OHARE_BASED) 590 goto not_found; 591 np = of_find_node_by_name(NULL, "via-pmu"); 592 if (np == NULL) 593 goto not_found; 594 viaint = irq_of_parse_and_map(np, 0); 595 of_node_put(np); 596 597 not_found: 598 #endif /* CONFIG_ADB_PMU */ 599 return viaint; 600 } 601 602 static int pmacpic_suspend(void) 603 { 604 int viaint = pmacpic_find_viaint(); 605 606 sleep_save_mask[0] = ppc_cached_irq_mask[0]; 607 sleep_save_mask[1] = ppc_cached_irq_mask[1]; 608 ppc_cached_irq_mask[0] = 0; 609 ppc_cached_irq_mask[1] = 0; 610 if (viaint > 0) 611 set_bit(viaint, ppc_cached_irq_mask); 612 out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]); 613 if (max_real_irqs > 32) 614 out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]); 615 (void)in_le32(&pmac_irq_hw[0]->event); 616 /* make sure mask gets to controller before we return to caller */ 617 mb(); 618 (void)in_le32(&pmac_irq_hw[0]->enable); 619 620 return 0; 621 } 622 623 static void pmacpic_resume(void) 624 { 625 int i; 626 627 out_le32(&pmac_irq_hw[0]->enable, 0); 628 if (max_real_irqs > 32) 629 out_le32(&pmac_irq_hw[1]->enable, 0); 630 mb(); 631 for (i = 0; i < max_real_irqs; ++i) 632 if (test_bit(i, sleep_save_mask)) 633 pmac_unmask_irq(irq_get_irq_data(i)); 634 } 635 636 static struct syscore_ops pmacpic_syscore_ops = { 637 .suspend = pmacpic_suspend, 638 .resume = pmacpic_resume, 639 }; 640 641 static int __init init_pmacpic_syscore(void) 642 { 643 if (pmac_irq_hw[0]) 644 register_syscore_ops(&pmacpic_syscore_ops); 645 return 0; 646 } 647 648 machine_subsys_initcall(powermac, init_pmacpic_syscore); 649 650 #endif /* CONFIG_PM && CONFIG_PPC32 */ 651
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.