1 // SPDX-License-Identifier: GPL-2.0 !! 1 /* 2 // Copyright (C) 2018 Hangzhou C-SKY Microsyst !! 2 * This file is subject to the terms and conditions of the GNU General Public 3 !! 3 * License. See the file "COPYING" in the main directory of this archive >> 4 * for more details. >> 5 * >> 6 * Code to handle x86 style IRQs plus some generic interrupt stuff. >> 7 * >> 8 * Copyright (C) 1992 Linus Torvalds >> 9 * Copyright (C) 1994 - 2000 Ralf Baechle >> 10 */ >> 11 #include <linux/config.h> >> 12 #include <linux/kernel.h> >> 13 #include <linux/delay.h> 4 #include <linux/init.h> 14 #include <linux/init.h> 5 #include <linux/interrupt.h> 15 #include <linux/interrupt.h> 6 #include <linux/irq.h> !! 16 #include <linux/kernel_stat.h> 7 #include <linux/irqchip.h> !! 17 #include <linux/module.h> 8 #include <asm/traps.h> !! 18 #include <linux/proc_fs.h> 9 #include <asm/smp.h> !! 19 #include <linux/slab.h> >> 20 #include <linux/mm.h> >> 21 #include <linux/random.h> >> 22 #include <linux/sched.h> >> 23 #include <linux/seq_file.h> >> 24 #include <linux/kallsyms.h> >> 25 >> 26 #include <asm/atomic.h> >> 27 #include <asm/system.h> >> 28 #include <asm/uaccess.h> >> 29 >> 30 /* >> 31 * Controller mappings for all interrupt sources: >> 32 */ >> 33 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { >> 34 [0 ... NR_IRQS-1] = { >> 35 .handler = &no_irq_type, >> 36 .lock = SPIN_LOCK_UNLOCKED >> 37 } >> 38 }; >> 39 >> 40 static void register_irq_proc (unsigned int irq); >> 41 >> 42 /* >> 43 * Special irq handlers. >> 44 */ >> 45 >> 46 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) >> 47 { return IRQ_NONE; } >> 48 >> 49 /* >> 50 * Generic no controller code >> 51 */ >> 52 >> 53 static void enable_none(unsigned int irq) { } >> 54 static unsigned int startup_none(unsigned int irq) { return 0; } >> 55 static void disable_none(unsigned int irq) { } >> 56 static void ack_none(unsigned int irq) >> 57 { >> 58 /* >> 59 * 'what should we do if we get a hw irq event on an illegal vector'. >> 60 * each architecture has to answer this themselves, it doesn't deserve >> 61 * a generic callback i think. >> 62 */ >> 63 printk("unexpected interrupt %d\n", irq); >> 64 } >> 65 >> 66 /* startup is the same as "enable", shutdown is same as "disable" */ >> 67 #define shutdown_none disable_none >> 68 #define end_none enable_none >> 69 >> 70 struct hw_interrupt_type no_irq_type = { >> 71 "none", >> 72 startup_none, >> 73 shutdown_none, >> 74 enable_none, >> 75 disable_none, >> 76 ack_none, >> 77 end_none >> 78 }; >> 79 >> 80 atomic_t irq_err_count; >> 81 >> 82 /* >> 83 * Generic, controller-independent functions: >> 84 */ >> 85 >> 86 int show_interrupts(struct seq_file *p, void *v) >> 87 { >> 88 int i, j; >> 89 struct irqaction * action; >> 90 unsigned long flags; >> 91 >> 92 seq_printf(p, " "); >> 93 for (j=0; j<NR_CPUS; j++) >> 94 if (cpu_online(j)) >> 95 seq_printf(p, "CPU%d ",j); >> 96 seq_putc(p, '\n'); >> 97 >> 98 for (i = 0 ; i < NR_IRQS ; i++) { >> 99 spin_lock_irqsave(&irq_desc[i].lock, flags); >> 100 action = irq_desc[i].action; >> 101 if (!action) >> 102 goto skip; >> 103 seq_printf(p, "%3d: ",i); >> 104 #ifndef CONFIG_SMP >> 105 seq_printf(p, "%10u ", kstat_irqs(i)); >> 106 #else >> 107 for (j = 0; j < NR_CPUS; j++) >> 108 if (cpu_online(j)) >> 109 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); >> 110 #endif >> 111 seq_printf(p, " %14s", irq_desc[i].handler->typename); >> 112 seq_printf(p, " %s", action->name); >> 113 >> 114 for (action=action->next; action; action = action->next) >> 115 seq_printf(p, ", %s", action->name); >> 116 >> 117 seq_putc(p, '\n'); >> 118 skip: >> 119 spin_unlock_irqrestore(&irq_desc[i].lock, flags); >> 120 } >> 121 seq_putc(p, '\n'); >> 122 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); >> 123 >> 124 return 0; >> 125 } >> 126 >> 127 #ifdef CONFIG_SMP >> 128 inline void synchronize_irq(unsigned int irq) >> 129 { >> 130 while (irq_desc[irq].status & IRQ_INPROGRESS) >> 131 cpu_relax(); >> 132 } >> 133 #endif >> 134 >> 135 /* >> 136 * This should really return information about whether >> 137 * we should do bottom half handling etc. Right now we >> 138 * end up _always_ checking the bottom half, which is a >> 139 * waste of time and is not what some drivers would >> 140 * prefer. >> 141 */ >> 142 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action) >> 143 { >> 144 int status = 1; /* Force the "do bottom halves" bit */ >> 145 int retval = 0; >> 146 >> 147 if (!(action->flags & SA_INTERRUPT)) >> 148 local_irq_enable(); >> 149 >> 150 do { >> 151 status |= action->flags; >> 152 retval |= action->handler(irq, action->dev_id, regs); >> 153 action = action->next; >> 154 } while (action); >> 155 if (status & SA_SAMPLE_RANDOM) >> 156 add_interrupt_randomness(irq); >> 157 local_irq_disable(); >> 158 >> 159 return retval; >> 160 } >> 161 >> 162 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret) >> 163 { >> 164 struct irqaction *action; >> 165 >> 166 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { >> 167 printk(KERN_ERR "irq event %d: bogus return value %x\n", >> 168 irq, action_ret); >> 169 } else { >> 170 printk(KERN_ERR "irq %d: nobody cared!\n", irq); >> 171 } >> 172 dump_stack(); >> 173 printk(KERN_ERR "handlers:\n"); >> 174 action = desc->action; >> 175 do { >> 176 printk(KERN_ERR "[<%p>]", action->handler); >> 177 print_symbol(" (%s)", >> 178 (unsigned long)action->handler); >> 179 printk("\n"); >> 180 action = action->next; >> 181 } while (action); >> 182 } >> 183 >> 184 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret) >> 185 { >> 186 static int count = 100; >> 187 >> 188 if (count) { >> 189 count--; >> 190 __report_bad_irq(irq, desc, action_ret); >> 191 } >> 192 } >> 193 >> 194 static int noirqdebug; >> 195 >> 196 static int __init noirqdebug_setup(char *str) >> 197 { >> 198 noirqdebug = 1; >> 199 printk("IRQ lockup detection disabled\n"); >> 200 return 1; >> 201 } >> 202 >> 203 __setup("noirqdebug", noirqdebug_setup); >> 204 >> 205 /* >> 206 * If 99,900 of the previous 100,000 interrupts have not been handled then >> 207 * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to >> 208 * turn the IRQ off. >> 209 * >> 210 * (The other 100-of-100,000 interrupts may have been a correctly-functioning >> 211 * device sharing an IRQ with the failing one) >> 212 * >> 213 * Called under desc->lock >> 214 */ >> 215 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret) >> 216 { >> 217 if (action_ret != IRQ_HANDLED) { >> 218 desc->irqs_unhandled++; >> 219 if (action_ret != IRQ_NONE) >> 220 report_bad_irq(irq, desc, action_ret); >> 221 } >> 222 >> 223 desc->irq_count++; >> 224 if (desc->irq_count < 100000) >> 225 return; >> 226 >> 227 desc->irq_count = 0; >> 228 if (desc->irqs_unhandled > 99900) { >> 229 /* >> 230 * The interrupt is stuck >> 231 */ >> 232 __report_bad_irq(irq, desc, action_ret); >> 233 /* >> 234 * Now kill the IRQ >> 235 */ >> 236 printk(KERN_EMERG "Disabling IRQ #%d\n", irq); >> 237 desc->status |= IRQ_DISABLED; >> 238 desc->handler->disable(irq); >> 239 } >> 240 desc->irqs_unhandled = 0; >> 241 } >> 242 >> 243 /* >> 244 * Generic enable/disable code: this just calls >> 245 * down into the PIC-specific version for the actual >> 246 * hardware disable after having gotten the irq >> 247 * controller lock. >> 248 */ >> 249 >> 250 /** >> 251 * disable_irq_nosync - disable an irq without waiting >> 252 * @irq: Interrupt to disable >> 253 * >> 254 * Disable the selected interrupt line. Disables of an interrupt >> 255 * stack. Unlike disable_irq(), this function does not ensure existing >> 256 * instances of the IRQ handler have completed before returning. >> 257 * >> 258 * This function may be called from IRQ context. >> 259 */ >> 260 >> 261 void inline disable_irq_nosync(unsigned int irq) >> 262 { >> 263 irq_desc_t *desc = irq_desc + irq; >> 264 unsigned long flags; >> 265 >> 266 spin_lock_irqsave(&desc->lock, flags); >> 267 if (!desc->depth++) { >> 268 desc->status |= IRQ_DISABLED; >> 269 desc->handler->disable(irq); >> 270 } >> 271 spin_unlock_irqrestore(&desc->lock, flags); >> 272 } >> 273 >> 274 /** >> 275 * disable_irq - disable an irq and wait for completion >> 276 * @irq: Interrupt to disable >> 277 * >> 278 * Disable the selected interrupt line. Disables of an interrupt >> 279 * stack. That is for two disables you need two enables. This >> 280 * function waits for any pending IRQ handlers for this interrupt >> 281 * to complete before returning. If you use this function while >> 282 * holding a resource the IRQ handler may need you will deadlock. >> 283 * >> 284 * This function may be called - with care - from IRQ context. >> 285 */ >> 286 >> 287 void disable_irq(unsigned int irq) >> 288 { >> 289 disable_irq_nosync(irq); >> 290 synchronize_irq(irq); >> 291 } >> 292 >> 293 /** >> 294 * enable_irq - enable interrupt handling on an irq >> 295 * @irq: Interrupt to enable >> 296 * >> 297 * Re-enables the processing of interrupts on this IRQ line >> 298 * providing no disable_irq calls are now in effect. >> 299 * >> 300 * This function may be called from IRQ context. >> 301 */ >> 302 >> 303 void enable_irq(unsigned int irq) >> 304 { >> 305 irq_desc_t *desc = irq_desc + irq; >> 306 unsigned long flags; >> 307 >> 308 spin_lock_irqsave(&desc->lock, flags); >> 309 switch (desc->depth) { >> 310 case 1: { >> 311 unsigned int status = desc->status & ~IRQ_DISABLED; >> 312 desc->status = status; >> 313 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { >> 314 desc->status = status | IRQ_REPLAY; >> 315 hw_resend_irq(desc->handler,irq); >> 316 } >> 317 desc->handler->enable(irq); >> 318 /* fall-through */ >> 319 } >> 320 default: >> 321 desc->depth--; >> 322 break; >> 323 case 0: >> 324 printk("enable_irq(%u) unbalanced from %p\n", irq, >> 325 __builtin_return_address(0)); >> 326 } >> 327 spin_unlock_irqrestore(&desc->lock, flags); >> 328 } >> 329 >> 330 /* >> 331 * do_IRQ handles all normal device IRQ's (the special >> 332 * SMP cross-CPU interrupts have their own specific >> 333 * handlers). >> 334 */ >> 335 asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs) >> 336 { >> 337 /* >> 338 * We ack quickly, we don't want the irq controller >> 339 * thinking we're snobs just because some other CPU has >> 340 * disabled global interrupts (we have already done the >> 341 * INT_ACK cycles, it's too late to try to pretend to the >> 342 * controller that we aren't taking the interrupt). >> 343 * >> 344 * 0 return value means that this irq is already being >> 345 * handled by some other CPU. (or is disabled) >> 346 */ >> 347 irq_desc_t *desc = irq_desc + irq; >> 348 struct irqaction * action; >> 349 unsigned int status; >> 350 >> 351 irq_enter(); >> 352 kstat_this_cpu.irqs[irq]++; >> 353 spin_lock(&desc->lock); >> 354 desc->handler->ack(irq); >> 355 /* >> 356 REPLAY is when Linux resends an IRQ that was dropped earlier >> 357 WAITING is used by probe to mark irqs that are being tested >> 358 */ >> 359 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); >> 360 status |= IRQ_PENDING; /* we _want_ to handle it */ >> 361 >> 362 /* >> 363 * If the IRQ is disabled for whatever reason, we cannot >> 364 * use the action we have. >> 365 */ >> 366 action = NULL; >> 367 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { >> 368 action = desc->action; >> 369 status &= ~IRQ_PENDING; /* we commit to handling */ >> 370 status |= IRQ_INPROGRESS; /* we are handling it */ >> 371 } >> 372 desc->status = status; >> 373 >> 374 /* >> 375 * If there is no IRQ handler or it was disabled, exit early. >> 376 Since we set PENDING, if another processor is handling >> 377 a different instance of this same irq, the other processor >> 378 will take care of it. >> 379 */ >> 380 if (unlikely(!action)) >> 381 goto out; >> 382 >> 383 /* >> 384 * Edge triggered interrupts need to remember >> 385 * pending events. >> 386 * This applies to any hw interrupts that allow a second >> 387 * instance of the same irq to arrive while we are in do_IRQ >> 388 * or in the handler. But the code here only handles the _second_ >> 389 * instance of the irq, not the third or fourth. So it is mostly >> 390 * useful for irq hardware that does not mask cleanly in an >> 391 * SMP environment. >> 392 */ >> 393 for (;;) { >> 394 irqreturn_t action_ret; >> 395 >> 396 spin_unlock(&desc->lock); >> 397 action_ret = handle_IRQ_event(irq, regs, action); >> 398 spin_lock(&desc->lock); >> 399 if (!noirqdebug) >> 400 note_interrupt(irq, desc, action_ret); >> 401 if (likely(!(desc->status & IRQ_PENDING))) >> 402 break; >> 403 desc->status &= ~IRQ_PENDING; >> 404 } >> 405 desc->status &= ~IRQ_INPROGRESS; >> 406 >> 407 out: >> 408 /* >> 409 * The ->end() handler has to deal with interrupts which got >> 410 * disabled while the handler was running. >> 411 */ >> 412 desc->handler->end(irq); >> 413 spin_unlock(&desc->lock); >> 414 >> 415 irq_exit(); >> 416 >> 417 return 1; >> 418 } >> 419 >> 420 /** >> 421 * request_irq - allocate an interrupt line >> 422 * @irq: Interrupt line to allocate >> 423 * @handler: Function to be called when the IRQ occurs >> 424 * @irqflags: Interrupt type flags >> 425 * @devname: An ascii name for the claiming device >> 426 * @dev_id: A cookie passed back to the handler function >> 427 * >> 428 * This call allocates interrupt resources and enables the >> 429 * interrupt line and IRQ handling. From the point this >> 430 * call is made your handler function may be invoked. Since >> 431 * your handler function must clear any interrupt the board >> 432 * raises, you must take care both to initialise your hardware >> 433 * and to set up the interrupt handler in the right order. >> 434 * >> 435 * Dev_id must be globally unique. Normally the address of the >> 436 * device data structure is used as the cookie. Since the handler >> 437 * receives this value it makes sense to use it. >> 438 * >> 439 * If your interrupt is shared you must pass a non NULL dev_id >> 440 * as this is required when freeing the interrupt. >> 441 * >> 442 * Flags: >> 443 * >> 444 * SA_SHIRQ Interrupt is shared >> 445 * >> 446 * SA_INTERRUPT Disable local interrupts while processing >> 447 * >> 448 * SA_SAMPLE_RANDOM The interrupt can be used for entropy >> 449 * >> 450 */ >> 451 >> 452 int request_irq(unsigned int irq, >> 453 irqreturn_t (*handler)(int, void *, struct pt_regs *), >> 454 unsigned long irqflags, >> 455 const char * devname, >> 456 void *dev_id) >> 457 { >> 458 int retval; >> 459 struct irqaction * action; >> 460 >> 461 #if 1 >> 462 /* >> 463 * Sanity-check: shared interrupts should REALLY pass in >> 464 * a real dev-ID, otherwise we'll have trouble later trying >> 465 * to figure out which interrupt is which (messes up the >> 466 * interrupt freeing logic etc). >> 467 */ >> 468 if (irqflags & SA_SHIRQ) { >> 469 if (!dev_id) >> 470 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]); >> 471 } >> 472 #endif >> 473 >> 474 if (irq >= NR_IRQS) >> 475 return -EINVAL; >> 476 if (!handler) >> 477 return -EINVAL; >> 478 >> 479 action = (struct irqaction *) >> 480 kmalloc(sizeof(struct irqaction), GFP_ATOMIC); >> 481 if (!action) >> 482 return -ENOMEM; >> 483 >> 484 action->handler = handler; >> 485 action->flags = irqflags; >> 486 action->mask = 0; >> 487 action->name = devname; >> 488 action->next = NULL; >> 489 action->dev_id = dev_id; >> 490 >> 491 retval = setup_irq(irq, action); >> 492 if (retval) >> 493 kfree(action); >> 494 return retval; >> 495 } >> 496 >> 497 EXPORT_SYMBOL(request_irq); >> 498 >> 499 /** >> 500 * free_irq - free an interrupt >> 501 * @irq: Interrupt line to free >> 502 * @dev_id: Device identity to free >> 503 * >> 504 * Remove an interrupt handler. The handler is removed and if the >> 505 * interrupt line is no longer in use by any driver it is disabled. >> 506 * On a shared IRQ the caller must ensure the interrupt is disabled >> 507 * on the card it drives before calling this function. The function >> 508 * does not return until any executing interrupts for this IRQ >> 509 * have completed. >> 510 * >> 511 * This function must not be called from interrupt context. >> 512 */ >> 513 >> 514 void free_irq(unsigned int irq, void *dev_id) >> 515 { >> 516 irq_desc_t *desc; >> 517 struct irqaction **p; >> 518 unsigned long flags; >> 519 >> 520 if (irq >= NR_IRQS) >> 521 return; >> 522 >> 523 desc = irq_desc + irq; >> 524 spin_lock_irqsave(&desc->lock,flags); >> 525 p = &desc->action; >> 526 for (;;) { >> 527 struct irqaction * action = *p; >> 528 if (action) { >> 529 struct irqaction **pp = p; >> 530 p = &action->next; >> 531 if (action->dev_id != dev_id) >> 532 continue; >> 533 >> 534 /* Found it - now remove it from the list of entries */ >> 535 *pp = action->next; >> 536 if (!desc->action) { >> 537 desc->status |= IRQ_DISABLED; >> 538 desc->handler->shutdown(irq); >> 539 } >> 540 spin_unlock_irqrestore(&desc->lock,flags); >> 541 >> 542 /* Wait to make sure it's not being used on another CPU */ >> 543 synchronize_irq(irq); >> 544 kfree(action); >> 545 return; >> 546 } >> 547 printk("Trying to free free IRQ%d\n",irq); >> 548 spin_unlock_irqrestore(&desc->lock,flags); >> 549 return; >> 550 } >> 551 } >> 552 >> 553 EXPORT_SYMBOL(free_irq); >> 554 >> 555 /* >> 556 * IRQ autodetection code.. >> 557 * >> 558 * This depends on the fact that any interrupt that >> 559 * comes in on to an unassigned handler will get stuck >> 560 * with "IRQ_WAITING" cleared and the interrupt >> 561 * disabled. >> 562 */ >> 563 >> 564 static DECLARE_MUTEX(probe_sem); >> 565 >> 566 /** >> 567 * probe_irq_on - begin an interrupt autodetect >> 568 * >> 569 * Commence probing for an interrupt. The interrupts are scanned >> 570 * and a mask of potential interrupt lines is returned. >> 571 * >> 572 */ >> 573 >> 574 unsigned long probe_irq_on(void) >> 575 { >> 576 unsigned int i; >> 577 irq_desc_t *desc; >> 578 unsigned long val; >> 579 unsigned long delay; >> 580 >> 581 down(&probe_sem); >> 582 /* >> 583 * something may have generated an irq long ago and we want to >> 584 * flush such a longstanding irq before considering it as spurious. >> 585 */ >> 586 for (i = NR_IRQS-1; i > 0; i--) { >> 587 desc = irq_desc + i; >> 588 >> 589 spin_lock_irq(&desc->lock); >> 590 if (!irq_desc[i].action) >> 591 irq_desc[i].handler->startup(i); >> 592 spin_unlock_irq(&desc->lock); >> 593 } >> 594 >> 595 /* Wait for longstanding interrupts to trigger. */ >> 596 for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) >> 597 /* about 20ms delay */ barrier(); >> 598 >> 599 /* >> 600 * enable any unassigned irqs >> 601 * (we must startup again here because if a longstanding irq >> 602 * happened in the previous stage, it may have masked itself) >> 603 */ >> 604 for (i = NR_IRQS-1; i > 0; i--) { >> 605 desc = irq_desc + i; >> 606 >> 607 spin_lock_irq(&desc->lock); >> 608 if (!desc->action) { >> 609 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; >> 610 if (desc->handler->startup(i)) >> 611 desc->status |= IRQ_PENDING; >> 612 } >> 613 spin_unlock_irq(&desc->lock); >> 614 } >> 615 >> 616 /* >> 617 * Wait for spurious interrupts to trigger >> 618 */ >> 619 for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) >> 620 /* about 100ms delay */ barrier(); >> 621 >> 622 /* >> 623 * Now filter out any obviously spurious interrupts >> 624 */ >> 625 val = 0; >> 626 for (i = 0; i < NR_IRQS; i++) { >> 627 irq_desc_t *desc = irq_desc + i; >> 628 unsigned int status; >> 629 >> 630 spin_lock_irq(&desc->lock); >> 631 status = desc->status; >> 632 >> 633 if (status & IRQ_AUTODETECT) { >> 634 /* It triggered already - consider it spurious. */ >> 635 if (!(status & IRQ_WAITING)) { >> 636 desc->status = status & ~IRQ_AUTODETECT; >> 637 desc->handler->shutdown(i); >> 638 } else >> 639 if (i < 32) >> 640 val |= 1 << i; >> 641 } >> 642 spin_unlock_irq(&desc->lock); >> 643 } >> 644 >> 645 return val; >> 646 } >> 647 >> 648 EXPORT_SYMBOL(probe_irq_on); >> 649 >> 650 /* >> 651 * Return a mask of triggered interrupts (this >> 652 * can handle only legacy ISA interrupts). >> 653 */ >> 654 >> 655 /** >> 656 * probe_irq_mask - scan a bitmap of interrupt lines >> 657 * @val: mask of interrupts to consider >> 658 * >> 659 * Scan the ISA bus interrupt lines and return a bitmap of >> 660 * active interrupts. The interrupt probe logic state is then >> 661 * returned to its previous value. >> 662 * >> 663 * Note: we need to scan all the irq's even though we will >> 664 * only return ISA irq numbers - just so that we reset them >> 665 * all to a known state. >> 666 */ >> 667 unsigned int probe_irq_mask(unsigned long val) >> 668 { >> 669 int i; >> 670 unsigned int mask; >> 671 >> 672 mask = 0; >> 673 for (i = 0; i < NR_IRQS; i++) { >> 674 irq_desc_t *desc = irq_desc + i; >> 675 unsigned int status; >> 676 >> 677 spin_lock_irq(&desc->lock); >> 678 status = desc->status; >> 679 >> 680 if (status & IRQ_AUTODETECT) { >> 681 if (i < 16 && !(status & IRQ_WAITING)) >> 682 mask |= 1 << i; >> 683 >> 684 desc->status = status & ~IRQ_AUTODETECT; >> 685 desc->handler->shutdown(i); >> 686 } >> 687 spin_unlock_irq(&desc->lock); >> 688 } >> 689 up(&probe_sem); >> 690 >> 691 return mask & val; >> 692 } >> 693 >> 694 /* >> 695 * Return the one interrupt that triggered (this can >> 696 * handle any interrupt source). >> 697 */ >> 698 >> 699 /** >> 700 * probe_irq_off - end an interrupt autodetect >> 701 * @val: mask of potential interrupts (unused) >> 702 * >> 703 * Scans the unused interrupt lines and returns the line which >> 704 * appears to have triggered the interrupt. If no interrupt was >> 705 * found then zero is returned. If more than one interrupt is >> 706 * found then minus the first candidate is returned to indicate >> 707 * their is doubt. >> 708 * >> 709 * The interrupt probe logic state is returned to its previous >> 710 * value. >> 711 * >> 712 * BUGS: When used in a module (which arguably shouldnt happen) >> 713 * nothing prevents two IRQ probe callers from overlapping. The >> 714 * results of this are non-optimal. >> 715 */ >> 716 >> 717 int probe_irq_off(unsigned long val) >> 718 { >> 719 int i, irq_found, nr_irqs; >> 720 >> 721 nr_irqs = 0; >> 722 irq_found = 0; >> 723 for (i = 0; i < NR_IRQS; i++) { >> 724 irq_desc_t *desc = irq_desc + i; >> 725 unsigned int status; >> 726 >> 727 spin_lock_irq(&desc->lock); >> 728 status = desc->status; >> 729 >> 730 if (status & IRQ_AUTODETECT) { >> 731 if (!(status & IRQ_WAITING)) { >> 732 if (!nr_irqs) >> 733 irq_found = i; >> 734 nr_irqs++; >> 735 } >> 736 desc->status = status & ~IRQ_AUTODETECT; >> 737 desc->handler->shutdown(i); >> 738 } >> 739 spin_unlock_irq(&desc->lock); >> 740 } >> 741 up(&probe_sem); >> 742 >> 743 if (nr_irqs > 1) >> 744 irq_found = -irq_found; >> 745 return irq_found; >> 746 } >> 747 >> 748 EXPORT_SYMBOL(probe_irq_off); >> 749 >> 750 /* this was setup_x86_irq but it seems pretty generic */ >> 751 int setup_irq(unsigned int irq, struct irqaction * new) >> 752 { >> 753 int shared = 0; >> 754 unsigned long flags; >> 755 struct irqaction *old, **p; >> 756 irq_desc_t *desc = irq_desc + irq; >> 757 >> 758 /* >> 759 * Some drivers like serial.c use request_irq() heavily, >> 760 * so we have to be careful not to interfere with a >> 761 * running system. >> 762 */ >> 763 if (new->flags & SA_SAMPLE_RANDOM) { >> 764 /* >> 765 * This function might sleep, we want to call it first, >> 766 * outside of the atomic block. >> 767 * Yes, this might clear the entropy pool if the wrong >> 768 * driver is attempted to be loaded, without actually >> 769 * installing a new handler, but is this really a problem, >> 770 * only the sysadmin is able to do this. >> 771 */ >> 772 rand_initialize_irq(irq); >> 773 } >> 774 >> 775 /* >> 776 * The following block of code has to be executed atomically >> 777 */ >> 778 spin_lock_irqsave(&desc->lock,flags); >> 779 p = &desc->action; >> 780 if ((old = *p) != NULL) { >> 781 /* Can't share interrupts unless both agree to */ >> 782 if (!(old->flags & new->flags & SA_SHIRQ)) { >> 783 spin_unlock_irqrestore(&desc->lock,flags); >> 784 return -EBUSY; >> 785 } >> 786 >> 787 /* add new interrupt at end of irq queue */ >> 788 do { >> 789 p = &old->next; >> 790 old = *p; >> 791 } while (old); >> 792 shared = 1; >> 793 } >> 794 >> 795 *p = new; >> 796 >> 797 if (!shared) { >> 798 desc->depth = 0; >> 799 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS); >> 800 desc->handler->startup(irq); >> 801 } >> 802 spin_unlock_irqrestore(&desc->lock,flags); >> 803 >> 804 register_irq_proc(irq); >> 805 return 0; >> 806 } >> 807 >> 808 void __init init_generic_irq(void) >> 809 { >> 810 int i; >> 811 >> 812 for (i = 0; i < NR_IRQS; i++) { >> 813 irq_desc[i].status = IRQ_DISABLED; >> 814 irq_desc[i].action = NULL; >> 815 irq_desc[i].depth = 1; >> 816 irq_desc[i].handler = &no_irq_type; >> 817 } >> 818 } >> 819 >> 820 EXPORT_SYMBOL(disable_irq_nosync); >> 821 EXPORT_SYMBOL(disable_irq); >> 822 EXPORT_SYMBOL(enable_irq); >> 823 EXPORT_SYMBOL(probe_irq_mask); >> 824 >> 825 static struct proc_dir_entry * root_irq_dir; >> 826 static struct proc_dir_entry * irq_dir [NR_IRQS]; >> 827 >> 828 #define HEX_DIGITS 8 >> 829 >> 830 static unsigned int parse_hex_value (const char *buffer, >> 831 unsigned long count, unsigned long *ret) >> 832 { >> 833 unsigned char hexnum [HEX_DIGITS]; >> 834 unsigned long value; >> 835 int i; >> 836 >> 837 if (!count) >> 838 return -EINVAL; >> 839 if (count > HEX_DIGITS) >> 840 count = HEX_DIGITS; >> 841 if (copy_from_user(hexnum, buffer, count)) >> 842 return -EFAULT; >> 843 >> 844 /* >> 845 * Parse the first 8 characters as a hex string, any non-hex char >> 846 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same. >> 847 */ >> 848 value = 0; >> 849 >> 850 for (i = 0; i < count; i++) { >> 851 unsigned int c = hexnum[i]; >> 852 >> 853 switch (c) { >> 854 case '' ... '9': c -= ''; break; >> 855 case 'a' ... 'f': c -= 'a'-10; break; >> 856 case 'A' ... 'F': c -= 'A'-10; break; >> 857 default: >> 858 goto out; >> 859 } >> 860 value = (value << 4) | c; >> 861 } >> 862 out: >> 863 *ret = value; >> 864 return 0; >> 865 } >> 866 >> 867 #ifdef CONFIG_SMP >> 868 >> 869 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS]; >> 870 >> 871 static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL }; >> 872 static int irq_affinity_read_proc (char *page, char **start, off_t off, >> 873 int count, int *eof, void *data) >> 874 { >> 875 int len, k; >> 876 cpumask_t tmp = irq_affinity[(long)data]; >> 877 >> 878 if (count < HEX_DIGITS+1) >> 879 return -EINVAL; >> 880 for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) { >> 881 int j = sprintf(page, "%04hx", cpus_coerce(tmp)); >> 882 len += j; >> 883 page += j; >> 884 cpus_shift_right(tmp, tmp, 16); >> 885 } >> 886 len += sprintf(page, "\n"); >> 887 return len; >> 888 } >> 889 >> 890 static int irq_affinity_write_proc (struct file *file, const char *buffer, >> 891 unsigned long count, void *data) >> 892 { >> 893 int irq = (long) data, full_count = count, err; >> 894 cpumask_t new_value, tmp; >> 895 >> 896 if (!irq_desc[irq].handler->set_affinity) >> 897 return -EIO; >> 898 >> 899 err = parse_hex_value(buffer, count, &new_value); >> 900 >> 901 /* >> 902 * Do not allow disabling IRQs completely - it's a too easy >> 903 * way to make the system unusable accidentally :-) At least >> 904 * one online CPU still has to be targeted. >> 905 */ >> 906 cpus_and(tmp, tmp, cpu_online_map); >> 907 if (cpus_empty(tmp)) >> 908 return -EINVAL; >> 909 >> 910 irq_affinity[irq] = new_value; >> 911 irq_desc[irq].handler->set_affinity(irq, new_value); >> 912 >> 913 return full_count; >> 914 } >> 915 >> 916 #endif >> 917 >> 918 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off, >> 919 int count, int *eof, void *data) >> 920 { >> 921 int len, k; >> 922 cpumask_t *mask = (cpumask_t *)data, tmp; 10 923 11 void __init init_IRQ(void) !! 924 if (count < HEX_DIGITS+1) >> 925 return -EINVAL; >> 926 tmp = *mask; >> 927 >> 928 for (k = 0; k < sizeof(cpumask_t)/sizeof(u16); ++k) { >> 929 int j = sprintf(page, "%04hx", cpus_coerce(tmp)); >> 930 len += j; >> 931 page += j; >> 932 cpus_shift_right(tmp, tmp, 16); >> 933 } >> 934 len += sprintf(page, "\n"); >> 935 return len; >> 936 } >> 937 >> 938 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer, >> 939 unsigned long count, void *data) 12 { 940 { 13 irqchip_init(); !! 941 cpumask_t *mask = (cpumask_t *)data, new_value; >> 942 unsigned long full_count = count, err; >> 943 >> 944 err = parse_hex_value(buffer, count, &new_value); >> 945 if (err) >> 946 return err; >> 947 >> 948 *mask = new_value; >> 949 return full_count; >> 950 } >> 951 >> 952 #define MAX_NAMELEN 10 >> 953 >> 954 static void register_irq_proc (unsigned int irq) >> 955 { >> 956 char name [MAX_NAMELEN]; >> 957 >> 958 if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) || >> 959 irq_dir[irq]) >> 960 return; >> 961 >> 962 memset(name, 0, MAX_NAMELEN); >> 963 sprintf(name, "%d", irq); >> 964 >> 965 /* create /proc/irq/1234 */ >> 966 irq_dir[irq] = proc_mkdir(name, root_irq_dir); >> 967 14 #ifdef CONFIG_SMP 968 #ifdef CONFIG_SMP 15 setup_smp_ipi(); !! 969 { >> 970 struct proc_dir_entry *entry; >> 971 >> 972 /* create /proc/irq/1234/smp_affinity */ >> 973 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); >> 974 >> 975 if (entry) { >> 976 entry->nlink = 1; >> 977 entry->data = (void *)(long)irq; >> 978 entry->read_proc = irq_affinity_read_proc; >> 979 entry->write_proc = irq_affinity_write_proc; >> 980 } >> 981 >> 982 smp_affinity_entry[irq] = entry; >> 983 } 16 #endif 984 #endif >> 985 } >> 986 >> 987 unsigned long prof_cpu_mask = -1; >> 988 >> 989 void init_irq_proc (void) >> 990 { >> 991 struct proc_dir_entry *entry; >> 992 int i; >> 993 >> 994 /* create /proc/irq */ >> 995 root_irq_dir = proc_mkdir("irq", 0); >> 996 >> 997 /* create /proc/irq/prof_cpu_mask */ >> 998 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); >> 999 >> 1000 if (!entry) >> 1001 return; >> 1002 >> 1003 entry->nlink = 1; >> 1004 entry->data = (void *)&prof_cpu_mask; >> 1005 entry->read_proc = prof_cpu_mask_read_proc; >> 1006 entry->write_proc = prof_cpu_mask_write_proc; >> 1007 >> 1008 /* >> 1009 * Create entries for all existing IRQs. >> 1010 */ >> 1011 for (i = 0; i < NR_IRQS; i++) >> 1012 register_irq_proc(i); 17 } 1013 } 18 1014
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.