1 /* 1 /* 2 * Copyright (C) 2007-2009 Michal Simek <monst !! 2 * linux/arch/i386/kernel/irq.c 3 * Copyright (C) 2007-2009 PetaLogix !! 3 * 4 * Copyright (C) 2006 Atmark Techno, Inc. !! 4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 5 * !! 5 * 6 * This file is subject to the terms and condi !! 6 * This file contains the code used by various IRQ handling routines: 7 * License. See the file "COPYING" in the main !! 7 * asking for different IRQ's should be done through these routines 8 * for more details. !! 8 * instead of just grabbing them. Thus setups with different IRQ numbers >> 9 * shouldn't result in any weird surprises, and installing new handlers >> 10 * should be easier. 9 */ 11 */ 10 12 11 #include <linux/init.h> !! 13 /* 12 #include <linux/ftrace.h> !! 14 * (mostly architecture independent, will move to kernel/irq.c in 2.5.) 13 #include <linux/kernel.h> !! 15 * 14 #include <linux/hardirq.h> !! 16 * IRQs are in fact implemented a bit like signal handlers for the kernel. >> 17 * Naturally it's not a 1:1 relation, but there are similarities. >> 18 */ >> 19 >> 20 #include <linux/config.h> >> 21 #include <linux/ptrace.h> >> 22 #include <linux/errno.h> >> 23 #include <linux/signal.h> >> 24 #include <linux/sched.h> >> 25 #include <linux/ioport.h> 15 #include <linux/interrupt.h> 26 #include <linux/interrupt.h> 16 #include <linux/irqflags.h> !! 27 #include <linux/timex.h> 17 #include <linux/seq_file.h> !! 28 #include <linux/slab.h> >> 29 #include <linux/random.h> >> 30 #include <linux/smp_lock.h> >> 31 #include <linux/init.h> 18 #include <linux/kernel_stat.h> 32 #include <linux/kernel_stat.h> 19 #include <linux/irq.h> 33 #include <linux/irq.h> 20 #include <linux/irqchip.h> !! 34 #include <linux/proc_fs.h> 21 #include <linux/of_irq.h> !! 35 #include <linux/seq_file.h> >> 36 >> 37 #include <asm/atomic.h> >> 38 #include <asm/io.h> >> 39 #include <asm/smp.h> >> 40 #include <asm/system.h> >> 41 #include <asm/bitops.h> >> 42 #include <asm/uaccess.h> >> 43 #include <asm/pgalloc.h> >> 44 #include <asm/delay.h> >> 45 #include <asm/desc.h> >> 46 #include <asm/irq.h> >> 47 >> 48 >> 49 >> 50 /* >> 51 * Linux has a controller-independent x86 interrupt architecture. >> 52 * every controller has a 'controller-template', that is used >> 53 * by the main code to do the right thing. Each driver-visible >> 54 * interrupt source is transparently wired to the apropriate >> 55 * controller. Thus drivers need not be aware of the >> 56 * interrupt-controller. >> 57 * >> 58 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC, >> 59 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC. >> 60 * (IO-APICs assumed to be messaging to Pentium local-APICs) >> 61 * >> 62 * the code is designed to be easily extended with new/different >> 63 * interrupt controllers, without having to do assembly magic. >> 64 */ >> 65 >> 66 /* >> 67 * Controller mappings for all interrupt sources: >> 68 */ >> 69 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = >> 70 { [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}}; >> 71 >> 72 static void register_irq_proc (unsigned int irq); >> 73 >> 74 /* >> 75 * Special irq handlers. >> 76 */ >> 77 >> 78 void no_action(int cpl, void *dev_id, struct pt_regs *regs) { } >> 79 >> 80 /* >> 81 * Generic no controller code >> 82 */ >> 83 >> 84 static void enable_none(unsigned int irq) { } >> 85 static unsigned int startup_none(unsigned int irq) { return 0; } >> 86 static void disable_none(unsigned int irq) { } >> 87 static void ack_none(unsigned int irq) >> 88 { >> 89 /* >> 90 * 'what should we do if we get a hw irq event on an illegal vector'. >> 91 * each architecture has to answer this themselves, it doesnt deserve >> 92 * a generic callback i think. >> 93 */ >> 94 #if CONFIG_X86 >> 95 printk("unexpected IRQ trap at vector %02x\n", irq); >> 96 #ifdef CONFIG_X86_LOCAL_APIC >> 97 /* >> 98 * Currently unexpected vectors happen only on SMP and APIC. >> 99 * We _must_ ack these because every local APIC has only N >> 100 * irq slots per priority level, and a 'hanging, unacked' IRQ >> 101 * holds up an irq slot - in excessive cases (when multiple >> 102 * unexpected vectors occur) that might lock up the APIC >> 103 * completely. >> 104 */ >> 105 ack_APIC_irq(); >> 106 #endif >> 107 #endif >> 108 } >> 109 >> 110 /* startup is the same as "enable", shutdown is same as "disable" */ >> 111 #define shutdown_none disable_none >> 112 #define end_none enable_none >> 113 >> 114 struct hw_interrupt_type no_irq_type = { >> 115 "none", >> 116 startup_none, >> 117 shutdown_none, >> 118 enable_none, >> 119 disable_none, >> 120 ack_none, >> 121 end_none >> 122 }; >> 123 >> 124 atomic_t irq_err_count; >> 125 #ifdef CONFIG_X86_IO_APIC >> 126 #ifdef APIC_MISMATCH_DEBUG >> 127 atomic_t irq_mis_count; >> 128 #endif >> 129 #endif >> 130 >> 131 /* >> 132 * Generic, controller-independent functions: >> 133 */ >> 134 >> 135 int show_interrupts(struct seq_file *p, void *v) >> 136 { >> 137 int i, j; >> 138 struct irqaction * action; >> 139 >> 140 seq_printf(p, " "); >> 141 for (j=0; j<smp_num_cpus; j++) >> 142 seq_printf(p, "CPU%d ",j); >> 143 seq_putc(p,'\n'); >> 144 >> 145 for (i = 0 ; i < NR_IRQS ; i++) { >> 146 action = irq_desc[i].action; >> 147 if (!action) >> 148 continue; >> 149 seq_printf(p, "%3d: ",i); >> 150 #ifndef CONFIG_SMP >> 151 seq_printf(p, "%10u ", kstat_irqs(i)); >> 152 #else >> 153 for (j = 0; j < smp_num_cpus; j++) >> 154 seq_printf(p, "%10u ", >> 155 kstat.irqs[cpu_logical_map(j)][i]); >> 156 #endif >> 157 seq_printf(p, " %14s", irq_desc[i].handler->typename); >> 158 seq_printf(p, " %s", action->name); >> 159 >> 160 for (action=action->next; action; action = action->next) >> 161 seq_printf(p, ", %s", action->name); >> 162 seq_putc(p,'\n'); >> 163 } >> 164 seq_printf(p, "NMI: "); >> 165 for (j = 0; j < smp_num_cpus; j++) >> 166 seq_printf(p, "%10u ", >> 167 nmi_count(cpu_logical_map(j))); >> 168 seq_printf(p, "\n"); >> 169 #if CONFIG_X86_LOCAL_APIC >> 170 seq_printf(p, "LOC: "); >> 171 for (j = 0; j < smp_num_cpus; j++) >> 172 seq_printf(p, "%10u ", >> 173 apic_timer_irqs[cpu_logical_map(j)]); >> 174 seq_printf(p, "\n"); >> 175 #endif >> 176 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); >> 177 #ifdef CONFIG_X86_IO_APIC >> 178 #ifdef APIC_MISMATCH_DEBUG >> 179 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); >> 180 #endif >> 181 #endif >> 182 >> 183 return 0; >> 184 } >> 185 >> 186 >> 187 /* >> 188 * Global interrupt locks for SMP. Allow interrupts to come in on any >> 189 * CPU, yet make cli/sti act globally to protect critical regions.. >> 190 */ >> 191 >> 192 #ifdef CONFIG_SMP >> 193 unsigned char global_irq_holder = NO_PROC_ID; >> 194 unsigned volatile long global_irq_lock; /* pendantic: long for set_bit --RR */ >> 195 >> 196 extern void show_stack(unsigned long* esp); >> 197 >> 198 static void show(char * str) >> 199 { >> 200 int i; >> 201 int cpu = smp_processor_id(); >> 202 >> 203 printk("\n%s, CPU %d:\n", str, cpu); >> 204 printk("irq: %d [",irqs_running()); >> 205 for(i=0;i < smp_num_cpus;i++) >> 206 printk(" %d",local_irq_count(i)); >> 207 printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0); >> 208 for(i=0;i < smp_num_cpus;i++) >> 209 printk(" %d",local_bh_count(i)); >> 210 >> 211 printk(" ]\nStack dumps:"); >> 212 for(i = 0; i < smp_num_cpus; i++) { >> 213 unsigned long esp; >> 214 if (i == cpu) >> 215 continue; >> 216 printk("\nCPU %d:",i); >> 217 esp = init_tss[i].esp0; >> 218 if (!esp) { >> 219 /* tss->esp0 is set to NULL in cpu_init(), >> 220 * it's initialized when the cpu returns to user >> 221 * space. -- manfreds >> 222 */ >> 223 printk(" <unknown> "); >> 224 continue; >> 225 } >> 226 esp &= ~(THREAD_SIZE-1); >> 227 esp += sizeof(struct task_struct); >> 228 show_stack((void*)esp); >> 229 } >> 230 printk("\nCPU %d:",cpu); >> 231 show_stack(NULL); >> 232 printk("\n"); >> 233 } >> 234 >> 235 #define MAXCOUNT 100000000 >> 236 >> 237 /* >> 238 * I had a lockup scenario where a tight loop doing >> 239 * spin_unlock()/spin_lock() on CPU#1 was racing with >> 240 * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but >> 241 * apparently the spin_unlock() information did not make it >> 242 * through to CPU#0 ... nasty, is this by design, do we have to limit >> 243 * 'memory update oscillation frequency' artificially like here? >> 244 * >> 245 * Such 'high frequency update' races can be avoided by careful design, but >> 246 * some of our major constructs like spinlocks use similar techniques, >> 247 * it would be nice to clarify this issue. Set this define to 0 if you >> 248 * want to check whether your system freezes. I suspect the delay done >> 249 * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but >> 250 * i thought that such things are guaranteed by design, since we use >> 251 * the 'LOCK' prefix. >> 252 */ >> 253 #define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0 >> 254 >> 255 #if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND >> 256 # define SYNC_OTHER_CORES(x) udelay(x+1) >> 257 #else >> 258 /* >> 259 * We have to allow irqs to arrive between __sti and __cli >> 260 */ >> 261 # define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop") >> 262 #endif >> 263 >> 264 static inline void wait_on_irq(int cpu) >> 265 { >> 266 int count = MAXCOUNT; >> 267 >> 268 for (;;) { >> 269 >> 270 /* >> 271 * Wait until all interrupts are gone. Wait >> 272 * for bottom half handlers unless we're >> 273 * already executing in one.. >> 274 */ >> 275 if (!irqs_running()) >> 276 if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock)) >> 277 break; >> 278 >> 279 /* Duh, we have to loop. Release the lock to avoid deadlocks */ >> 280 clear_bit(0,&global_irq_lock); >> 281 >> 282 for (;;) { >> 283 if (!--count) { >> 284 show("wait_on_irq"); >> 285 count = ~0; >> 286 } >> 287 __sti(); >> 288 SYNC_OTHER_CORES(cpu); >> 289 __cli(); >> 290 if (irqs_running()) >> 291 continue; >> 292 if (global_irq_lock) >> 293 continue; >> 294 if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock)) >> 295 continue; >> 296 if (!test_and_set_bit(0,&global_irq_lock)) >> 297 break; >> 298 } >> 299 } >> 300 } >> 301 >> 302 /* >> 303 * This is called when we want to synchronize with >> 304 * interrupts. We may for example tell a device to >> 305 * stop sending interrupts: but to make sure there >> 306 * are no interrupts that are executing on another >> 307 * CPU we need to call this function. >> 308 */ >> 309 void synchronize_irq(void) >> 310 { >> 311 if (irqs_running()) { >> 312 /* Stupid approach */ >> 313 cli(); >> 314 sti(); >> 315 } >> 316 } >> 317 >> 318 static inline void get_irqlock(int cpu) >> 319 { >> 320 if (test_and_set_bit(0,&global_irq_lock)) { >> 321 /* do we already hold the lock? */ >> 322 if ((unsigned char) cpu == global_irq_holder) >> 323 return; >> 324 /* Uhhuh.. Somebody else got it. Wait.. */ >> 325 do { >> 326 do { >> 327 rep_nop(); >> 328 } while (test_bit(0,&global_irq_lock)); >> 329 } while (test_and_set_bit(0,&global_irq_lock)); >> 330 } >> 331 /* >> 332 * We also to make sure that nobody else is running >> 333 * in an interrupt context. >> 334 */ >> 335 wait_on_irq(cpu); >> 336 >> 337 /* >> 338 * Ok, finally.. >> 339 */ >> 340 global_irq_holder = cpu; >> 341 } >> 342 >> 343 #define EFLAGS_IF_SHIFT 9 >> 344 >> 345 /* >> 346 * A global "cli()" while in an interrupt context >> 347 * turns into just a local cli(). Interrupts >> 348 * should use spinlocks for the (very unlikely) >> 349 * case that they ever want to protect against >> 350 * each other. >> 351 * >> 352 * If we already have local interrupts disabled, >> 353 * this will not turn a local disable into a >> 354 * global one (problems with spinlocks: this makes >> 355 * save_flags+cli+sti usable inside a spinlock). >> 356 */ >> 357 void __global_cli(void) >> 358 { >> 359 unsigned int flags; >> 360 >> 361 __save_flags(flags); >> 362 if (flags & (1 << EFLAGS_IF_SHIFT)) { >> 363 int cpu = smp_processor_id(); >> 364 __cli(); >> 365 if (!local_irq_count(cpu)) >> 366 get_irqlock(cpu); >> 367 } >> 368 } >> 369 >> 370 void __global_sti(void) >> 371 { >> 372 int cpu = smp_processor_id(); >> 373 >> 374 if (!local_irq_count(cpu)) >> 375 release_irqlock(cpu); >> 376 __sti(); >> 377 } >> 378 >> 379 /* >> 380 * SMP flags value to restore to: >> 381 * 0 - global cli >> 382 * 1 - global sti >> 383 * 2 - local cli >> 384 * 3 - local sti >> 385 */ >> 386 unsigned long __global_save_flags(void) >> 387 { >> 388 int retval; >> 389 int local_enabled; >> 390 unsigned long flags; >> 391 int cpu = smp_processor_id(); >> 392 >> 393 __save_flags(flags); >> 394 local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1; >> 395 /* default to local */ >> 396 retval = 2 + local_enabled; >> 397 >> 398 /* check for global flags if we're not in an interrupt */ >> 399 if (!local_irq_count(cpu)) { >> 400 if (local_enabled) >> 401 retval = 1; >> 402 if (global_irq_holder == cpu) >> 403 retval = 0; >> 404 } >> 405 return retval; >> 406 } >> 407 >> 408 void __global_restore_flags(unsigned long flags) >> 409 { >> 410 switch (flags) { >> 411 case 0: >> 412 __global_cli(); >> 413 break; >> 414 case 1: >> 415 __global_sti(); >> 416 break; >> 417 case 2: >> 418 __cli(); >> 419 break; >> 420 case 3: >> 421 __sti(); >> 422 break; >> 423 default: >> 424 printk("global_restore_flags: %08lx (%08lx)\n", >> 425 flags, (&flags)[-1]); >> 426 } >> 427 } >> 428 >> 429 #endif >> 430 >> 431 /* >> 432 * This should really return information about whether >> 433 * we should do bottom half handling etc. Right now we >> 434 * end up _always_ checking the bottom half, which is a >> 435 * waste of time and is not what some drivers would >> 436 * prefer. >> 437 */ >> 438 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action) >> 439 { >> 440 int status; >> 441 int cpu = smp_processor_id(); >> 442 >> 443 irq_enter(cpu, irq); >> 444 >> 445 status = 1; /* Force the "do bottom halves" bit */ >> 446 >> 447 if (!(action->flags & SA_INTERRUPT)) >> 448 __sti(); >> 449 >> 450 do { >> 451 status |= action->flags; >> 452 action->handler(irq, action->dev_id, regs); >> 453 action = action->next; >> 454 } while (action); >> 455 if (status & SA_SAMPLE_RANDOM) >> 456 add_interrupt_randomness(irq); >> 457 __cli(); >> 458 >> 459 irq_exit(cpu, irq); >> 460 >> 461 return status; >> 462 } >> 463 >> 464 /* >> 465 * Generic enable/disable code: this just calls >> 466 * down into the PIC-specific version for the actual >> 467 * hardware disable after having gotten the irq >> 468 * controller lock. >> 469 */ >> 470 >> 471 /** >> 472 * disable_irq_nosync - disable an irq without waiting >> 473 * @irq: Interrupt to disable >> 474 * >> 475 * Disable the selected interrupt line. Disables and Enables are >> 476 * nested. >> 477 * Unlike disable_irq(), this function does not ensure existing >> 478 * instances of the IRQ handler have completed before returning. >> 479 * >> 480 * This function may be called from IRQ context. >> 481 */ >> 482 >> 483 inline void disable_irq_nosync(unsigned int irq) >> 484 { >> 485 irq_desc_t *desc = irq_desc + irq; >> 486 unsigned long flags; >> 487 >> 488 spin_lock_irqsave(&desc->lock, flags); >> 489 if (!desc->depth++) { >> 490 desc->status |= IRQ_DISABLED; >> 491 desc->handler->disable(irq); >> 492 } >> 493 spin_unlock_irqrestore(&desc->lock, flags); >> 494 } >> 495 >> 496 /** >> 497 * disable_irq - disable an irq and wait for completion >> 498 * @irq: Interrupt to disable >> 499 * >> 500 * Disable the selected interrupt line. Enables and Disables are >> 501 * nested. >> 502 * This function waits for any pending IRQ handlers for this interrupt >> 503 * to complete before returning. If you use this function while >> 504 * holding a resource the IRQ handler may need you will deadlock. >> 505 * >> 506 * This function may be called - with care - from IRQ context. >> 507 */ >> 508 >> 509 void disable_irq(unsigned int irq) >> 510 { >> 511 disable_irq_nosync(irq); >> 512 >> 513 if (!local_irq_count(smp_processor_id())) { >> 514 do { >> 515 barrier(); >> 516 cpu_relax(); >> 517 } while (irq_desc[irq].status & IRQ_INPROGRESS); >> 518 } >> 519 } >> 520 >> 521 /** >> 522 * enable_irq - enable handling of an irq >> 523 * @irq: Interrupt to enable >> 524 * >> 525 * Undoes the effect of one call to disable_irq(). If this >> 526 * matches the last disable, processing of interrupts on this >> 527 * IRQ line is re-enabled. >> 528 * >> 529 * This function may be called from IRQ context. >> 530 */ >> 531 >> 532 void enable_irq(unsigned int irq) >> 533 { >> 534 irq_desc_t *desc = irq_desc + irq; >> 535 unsigned long flags; >> 536 >> 537 spin_lock_irqsave(&desc->lock, flags); >> 538 switch (desc->depth) { >> 539 case 1: { >> 540 unsigned int status = desc->status & ~IRQ_DISABLED; >> 541 desc->status = status; >> 542 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { >> 543 desc->status = status | IRQ_REPLAY; >> 544 hw_resend_irq(desc->handler,irq); >> 545 } >> 546 desc->handler->enable(irq); >> 547 /* fall-through */ >> 548 } >> 549 default: >> 550 desc->depth--; >> 551 break; >> 552 case 0: >> 553 printk("enable_irq(%u) unbalanced from %p\n", irq, >> 554 __builtin_return_address(0)); >> 555 } >> 556 spin_unlock_irqrestore(&desc->lock, flags); >> 557 } >> 558 >> 559 /* >> 560 * do_IRQ handles all normal device IRQ's (the special >> 561 * SMP cross-CPU interrupts have their own specific >> 562 * handlers). >> 563 */ >> 564 asmlinkage unsigned int do_IRQ(struct pt_regs regs) >> 565 { >> 566 /* >> 567 * We ack quickly, we don't want the irq controller >> 568 * thinking we're snobs just because some other CPU has >> 569 * disabled global interrupts (we have already done the >> 570 * INT_ACK cycles, it's too late to try to pretend to the >> 571 * controller that we aren't taking the interrupt). >> 572 * >> 573 * 0 return value means that this irq is already being >> 574 * handled by some other CPU. (or is disabled) >> 575 */ >> 576 int irq = regs.orig_eax & 0xff; /* high bits used in ret_from_ code */ >> 577 int cpu = smp_processor_id(); >> 578 irq_desc_t *desc = irq_desc + irq; >> 579 struct irqaction * action; >> 580 unsigned int status; >> 581 #ifdef CONFIG_DEBUG_STACKOVERFLOW >> 582 long esp; >> 583 >> 584 /* Debugging check for stack overflow: is there less than 1KB free? */ >> 585 __asm__ __volatile__("andl %%esp,%0" : "=r" (esp) : "" (8191)); >> 586 if (unlikely(esp < (sizeof(struct task_struct) + 1024))) { >> 587 extern void show_stack(unsigned long *); >> 588 >> 589 printk("do_IRQ: stack overflow: %ld\n", >> 590 esp - sizeof(struct task_struct)); >> 591 __asm__ __volatile__("movl %%esp,%0" : "=r" (esp)); >> 592 show_stack((void *)esp); >> 593 } >> 594 #endif >> 595 >> 596 kstat.irqs[cpu][irq]++; >> 597 spin_lock(&desc->lock); >> 598 desc->handler->ack(irq); >> 599 /* >> 600 REPLAY is when Linux resends an IRQ that was dropped earlier >> 601 WAITING is used by probe to mark irqs that are being tested >> 602 */ >> 603 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); >> 604 status |= IRQ_PENDING; /* we _want_ to handle it */ >> 605 >> 606 /* >> 607 * If the IRQ is disabled for whatever reason, we cannot >> 608 * use the action we have. >> 609 */ >> 610 action = NULL; >> 611 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { >> 612 action = desc->action; >> 613 status &= ~IRQ_PENDING; /* we commit to handling */ >> 614 status |= IRQ_INPROGRESS; /* we are handling it */ >> 615 } >> 616 desc->status = status; >> 617 >> 618 /* >> 619 * If there is no IRQ handler or it was disabled, exit early. >> 620 Since we set PENDING, if another processor is handling >> 621 a different instance of this same irq, the other processor >> 622 will take care of it. >> 623 */ >> 624 if (!action) >> 625 goto out; 22 626 23 void __irq_entry do_IRQ(struct pt_regs *regs) !! 627 /* >> 628 * Edge triggered interrupts need to remember >> 629 * pending events. >> 630 * This applies to any hw interrupts that allow a second >> 631 * instance of the same irq to arrive while we are in do_IRQ >> 632 * or in the handler. But the code here only handles the _second_ >> 633 * instance of the irq, not the third or fourth. So it is mostly >> 634 * useful for irq hardware that does not mask cleanly in an >> 635 * SMP environment. >> 636 */ >> 637 for (;;) { >> 638 spin_unlock(&desc->lock); >> 639 handle_IRQ_event(irq, ®s, action); >> 640 spin_lock(&desc->lock); >> 641 >> 642 if (!(desc->status & IRQ_PENDING)) >> 643 break; >> 644 desc->status &= ~IRQ_PENDING; >> 645 } >> 646 desc->status &= ~IRQ_INPROGRESS; >> 647 out: >> 648 /* >> 649 * The ->end() handler has to deal with interrupts which got >> 650 * disabled while the handler was running. >> 651 */ >> 652 desc->handler->end(irq); >> 653 spin_unlock(&desc->lock); >> 654 >> 655 if (softirq_pending(cpu)) >> 656 do_softirq(); >> 657 return 1; >> 658 } >> 659 >> 660 /** >> 661 * request_irq - allocate an interrupt line >> 662 * @irq: Interrupt line to allocate >> 663 * @handler: Function to be called when the IRQ occurs >> 664 * @irqflags: Interrupt type flags >> 665 * @devname: An ascii name for the claiming device >> 666 * @dev_id: A cookie passed back to the handler function >> 667 * >> 668 * This call allocates interrupt resources and enables the >> 669 * interrupt line and IRQ handling. From the point this >> 670 * call is made your handler function may be invoked. Since >> 671 * your handler function must clear any interrupt the board >> 672 * raises, you must take care both to initialise your hardware >> 673 * and to set up the interrupt handler in the right order. >> 674 * >> 675 * Dev_id must be globally unique. Normally the address of the >> 676 * device data structure is used as the cookie. Since the handler >> 677 * receives this value it makes sense to use it. >> 678 * >> 679 * If your interrupt is shared you must pass a non NULL dev_id >> 680 * as this is required when freeing the interrupt. >> 681 * >> 682 * Flags: >> 683 * >> 684 * SA_SHIRQ Interrupt is shared >> 685 * >> 686 * SA_INTERRUPT Disable local interrupts while processing >> 687 * >> 688 * SA_SAMPLE_RANDOM The interrupt can be used for entropy >> 689 * >> 690 */ >> 691 >> 692 int request_irq(unsigned int irq, >> 693 void (*handler)(int, void *, struct pt_regs *), >> 694 unsigned long irqflags, >> 695 const char * devname, >> 696 void *dev_id) >> 697 { >> 698 int retval; >> 699 struct irqaction * action; >> 700 >> 701 #if 1 >> 702 /* >> 703 * Sanity-check: shared interrupts should REALLY pass in >> 704 * a real dev-ID, otherwise we'll have trouble later trying >> 705 * to figure out which interrupt is which (messes up the >> 706 * interrupt freeing logic etc). >> 707 */ >> 708 if (irqflags & SA_SHIRQ) { >> 709 if (!dev_id) >> 710 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]); >> 711 } >> 712 #endif >> 713 >> 714 if (irq >= NR_IRQS) >> 715 return -EINVAL; >> 716 if (!handler) >> 717 return -EINVAL; >> 718 >> 719 action = (struct irqaction *) >> 720 kmalloc(sizeof(struct irqaction), GFP_KERNEL); >> 721 if (!action) >> 722 return -ENOMEM; >> 723 >> 724 action->handler = handler; >> 725 action->flags = irqflags; >> 726 action->mask = 0; >> 727 action->name = devname; >> 728 action->next = NULL; >> 729 action->dev_id = dev_id; >> 730 >> 731 retval = setup_irq(irq, action); >> 732 if (retval) >> 733 kfree(action); >> 734 return retval; >> 735 } >> 736 >> 737 /** >> 738 * free_irq - free an interrupt >> 739 * @irq: Interrupt line to free >> 740 * @dev_id: Device identity to free >> 741 * >> 742 * Remove an interrupt handler. The handler is removed and if the >> 743 * interrupt line is no longer in use by any driver it is disabled. >> 744 * On a shared IRQ the caller must ensure the interrupt is disabled >> 745 * on the card it drives before calling this function. The function >> 746 * does not return until any executing interrupts for this IRQ >> 747 * have completed. >> 748 * >> 749 * This function may be called from interrupt context. >> 750 * >> 751 * Bugs: Attempting to free an irq in a handler for the same irq hangs >> 752 * the machine. >> 753 */ >> 754 >> 755 void free_irq(unsigned int irq, void *dev_id) >> 756 { >> 757 irq_desc_t *desc; >> 758 struct irqaction **p; >> 759 unsigned long flags; >> 760 >> 761 if (irq >= NR_IRQS) >> 762 return; >> 763 >> 764 desc = irq_desc + irq; >> 765 spin_lock_irqsave(&desc->lock,flags); >> 766 p = &desc->action; >> 767 for (;;) { >> 768 struct irqaction * action = *p; >> 769 if (action) { >> 770 struct irqaction **pp = p; >> 771 p = &action->next; >> 772 if (action->dev_id != dev_id) >> 773 continue; >> 774 >> 775 /* Found it - now remove it from the list of entries */ >> 776 *pp = action->next; >> 777 if (!desc->action) { >> 778 desc->status |= IRQ_DISABLED; >> 779 desc->handler->shutdown(irq); >> 780 } >> 781 spin_unlock_irqrestore(&desc->lock,flags); >> 782 >> 783 #ifdef CONFIG_SMP >> 784 /* Wait to make sure it's not being used on another CPU */ >> 785 while (desc->status & IRQ_INPROGRESS) { >> 786 barrier(); >> 787 cpu_relax(); >> 788 } >> 789 #endif >> 790 kfree(action); >> 791 return; >> 792 } >> 793 printk("Trying to free free IRQ%d\n",irq); >> 794 spin_unlock_irqrestore(&desc->lock,flags); >> 795 return; >> 796 } >> 797 } >> 798 >> 799 /* >> 800 * IRQ autodetection code.. >> 801 * >> 802 * This depends on the fact that any interrupt that >> 803 * comes in on to an unassigned handler will get stuck >> 804 * with "IRQ_WAITING" cleared and the interrupt >> 805 * disabled. >> 806 */ >> 807 >> 808 static DECLARE_MUTEX(probe_sem); >> 809 >> 810 /** >> 811 * probe_irq_on - begin an interrupt autodetect >> 812 * >> 813 * Commence probing for an interrupt. The interrupts are scanned >> 814 * and a mask of potential interrupt lines is returned. >> 815 * >> 816 */ >> 817 >> 818 unsigned long probe_irq_on(void) >> 819 { >> 820 unsigned int i; >> 821 irq_desc_t *desc; >> 822 unsigned long val; >> 823 unsigned long delay; >> 824 >> 825 down(&probe_sem); >> 826 /* >> 827 * something may have generated an irq long ago and we want to >> 828 * flush such a longstanding irq before considering it as spurious. >> 829 */ >> 830 for (i = NR_IRQS-1; i > 0; i--) { >> 831 desc = irq_desc + i; >> 832 >> 833 spin_lock_irq(&desc->lock); >> 834 if (!irq_desc[i].action) >> 835 irq_desc[i].handler->startup(i); >> 836 spin_unlock_irq(&desc->lock); >> 837 } >> 838 >> 839 /* Wait for longstanding interrupts to trigger. */ >> 840 for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) >> 841 /* about 20ms delay */ synchronize_irq(); >> 842 >> 843 /* >> 844 * enable any unassigned irqs >> 845 * (we must startup again here because if a longstanding irq >> 846 * happened in the previous stage, it may have masked itself) >> 847 */ >> 848 for (i = NR_IRQS-1; i > 0; i--) { >> 849 desc = irq_desc + i; >> 850 >> 851 spin_lock_irq(&desc->lock); >> 852 if (!desc->action) { >> 853 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; >> 854 if (desc->handler->startup(i)) >> 855 desc->status |= IRQ_PENDING; >> 856 } >> 857 spin_unlock_irq(&desc->lock); >> 858 } >> 859 >> 860 /* >> 861 * Wait for spurious interrupts to trigger >> 862 */ >> 863 for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) >> 864 /* about 100ms delay */ synchronize_irq(); >> 865 >> 866 /* >> 867 * Now filter out any obviously spurious interrupts >> 868 */ >> 869 val = 0; >> 870 for (i = 0; i < NR_IRQS; i++) { >> 871 irq_desc_t *desc = irq_desc + i; >> 872 unsigned int status; >> 873 >> 874 spin_lock_irq(&desc->lock); >> 875 status = desc->status; >> 876 >> 877 if (status & IRQ_AUTODETECT) { >> 878 /* It triggered already - consider it spurious. */ >> 879 if (!(status & IRQ_WAITING)) { >> 880 desc->status = status & ~IRQ_AUTODETECT; >> 881 desc->handler->shutdown(i); >> 882 } else >> 883 if (i < 32) >> 884 val |= 1 << i; >> 885 } >> 886 spin_unlock_irq(&desc->lock); >> 887 } >> 888 >> 889 return val; >> 890 } >> 891 >> 892 /* >> 893 * Return a mask of triggered interrupts (this >> 894 * can handle only legacy ISA interrupts). >> 895 */ >> 896 >> 897 /** >> 898 * probe_irq_mask - scan a bitmap of interrupt lines >> 899 * @val: mask of interrupts to consider >> 900 * >> 901 * Scan the ISA bus interrupt lines and return a bitmap of >> 902 * active interrupts. The interrupt probe logic state is then >> 903 * returned to its previous value. >> 904 * >> 905 * Note: we need to scan all the irq's even though we will >> 906 * only return ISA irq numbers - just so that we reset them >> 907 * all to a known state. >> 908 */ >> 909 unsigned int probe_irq_mask(unsigned long val) >> 910 { >> 911 int i; >> 912 unsigned int mask; >> 913 >> 914 mask = 0; >> 915 for (i = 0; i < NR_IRQS; i++) { >> 916 irq_desc_t *desc = irq_desc + i; >> 917 unsigned int status; >> 918 >> 919 spin_lock_irq(&desc->lock); >> 920 status = desc->status; >> 921 >> 922 if (status & IRQ_AUTODETECT) { >> 923 if (i < 16 && !(status & IRQ_WAITING)) >> 924 mask |= 1 << i; >> 925 >> 926 desc->status = status & ~IRQ_AUTODETECT; >> 927 desc->handler->shutdown(i); >> 928 } >> 929 spin_unlock_irq(&desc->lock); >> 930 } >> 931 up(&probe_sem); >> 932 >> 933 return mask & val; >> 934 } >> 935 >> 936 /* >> 937 * Return the one interrupt that triggered (this can >> 938 * handle any interrupt source). >> 939 */ >> 940 >> 941 /** >> 942 * probe_irq_off - end an interrupt autodetect >> 943 * @val: mask of potential interrupts (unused) >> 944 * >> 945 * Scans the unused interrupt lines and returns the line which >> 946 * appears to have triggered the interrupt. If no interrupt was >> 947 * found then zero is returned. If more than one interrupt is >> 948 * found then minus the first candidate is returned to indicate >> 949 * their is doubt. >> 950 * >> 951 * The interrupt probe logic state is returned to its previous >> 952 * value. >> 953 * >> 954 * BUGS: When used in a module (which arguably shouldnt happen) >> 955 * nothing prevents two IRQ probe callers from overlapping. The >> 956 * results of this are non-optimal. >> 957 */ >> 958 >> 959 int probe_irq_off(unsigned long val) 24 { 960 { 25 struct pt_regs *old_regs = set_irq_reg !! 961 int i, irq_found, nr_irqs; 26 trace_hardirqs_off(); !! 962 >> 963 nr_irqs = 0; >> 964 irq_found = 0; >> 965 for (i = 0; i < NR_IRQS; i++) { >> 966 irq_desc_t *desc = irq_desc + i; >> 967 unsigned int status; 27 968 28 irq_enter(); !! 969 spin_lock_irq(&desc->lock); 29 handle_arch_irq(regs); !! 970 status = desc->status; 30 irq_exit(); !! 971 31 set_irq_regs(old_regs); !! 972 if (status & IRQ_AUTODETECT) { 32 trace_hardirqs_on(); !! 973 if (!(status & IRQ_WAITING)) { >> 974 if (!nr_irqs) >> 975 irq_found = i; >> 976 nr_irqs++; >> 977 } >> 978 desc->status = status & ~IRQ_AUTODETECT; >> 979 desc->handler->shutdown(i); >> 980 } >> 981 spin_unlock_irq(&desc->lock); >> 982 } >> 983 up(&probe_sem); >> 984 >> 985 if (nr_irqs > 1) >> 986 irq_found = -irq_found; >> 987 return irq_found; 33 } 988 } 34 989 35 void __init init_IRQ(void) !! 990 /* this was setup_x86_irq but it seems pretty generic */ >> 991 int setup_irq(unsigned int irq, struct irqaction * new) 36 { 992 { 37 /* process the entire interrupt tree i !! 993 int shared = 0; 38 irqchip_init(); !! 994 unsigned long flags; >> 995 struct irqaction *old, **p; >> 996 irq_desc_t *desc = irq_desc + irq; >> 997 >> 998 /* >> 999 * Some drivers like serial.c use request_irq() heavily, >> 1000 * so we have to be careful not to interfere with a >> 1001 * running system. >> 1002 */ >> 1003 if (new->flags & SA_SAMPLE_RANDOM) { >> 1004 /* >> 1005 * This function might sleep, we want to call it first, >> 1006 * outside of the atomic block. >> 1007 * Yes, this might clear the entropy pool if the wrong >> 1008 * driver is attempted to be loaded, without actually >> 1009 * installing a new handler, but is this really a problem, >> 1010 * only the sysadmin is able to do this. >> 1011 */ >> 1012 rand_initialize_irq(irq); >> 1013 } >> 1014 >> 1015 /* >> 1016 * The following block of code has to be executed atomically >> 1017 */ >> 1018 spin_lock_irqsave(&desc->lock,flags); >> 1019 p = &desc->action; >> 1020 if ((old = *p) != NULL) { >> 1021 /* Can't share interrupts unless both agree to */ >> 1022 if (!(old->flags & new->flags & SA_SHIRQ)) { >> 1023 spin_unlock_irqrestore(&desc->lock,flags); >> 1024 return -EBUSY; >> 1025 } >> 1026 >> 1027 /* add new interrupt at end of irq queue */ >> 1028 do { >> 1029 p = &old->next; >> 1030 old = *p; >> 1031 } while (old); >> 1032 shared = 1; >> 1033 } >> 1034 >> 1035 *p = new; >> 1036 >> 1037 if (!shared) { >> 1038 desc->depth = 0; >> 1039 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS); >> 1040 desc->handler->startup(irq); >> 1041 } >> 1042 spin_unlock_irqrestore(&desc->lock,flags); >> 1043 >> 1044 register_irq_proc(irq); >> 1045 return 0; 39 } 1046 } >> 1047 >> 1048 static struct proc_dir_entry * root_irq_dir; >> 1049 static struct proc_dir_entry * irq_dir [NR_IRQS]; >> 1050 >> 1051 #define HEX_DIGITS 8 >> 1052 >> 1053 static unsigned int parse_hex_value (const char *buffer, >> 1054 unsigned long count, unsigned long *ret) >> 1055 { >> 1056 unsigned char hexnum [HEX_DIGITS]; >> 1057 unsigned long value; >> 1058 int i; >> 1059 >> 1060 if (!count) >> 1061 return -EINVAL; >> 1062 if (count > HEX_DIGITS) >> 1063 count = HEX_DIGITS; >> 1064 if (copy_from_user(hexnum, buffer, count)) >> 1065 return -EFAULT; >> 1066 >> 1067 /* >> 1068 * Parse the first 8 characters as a hex string, any non-hex char >> 1069 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same. >> 1070 */ >> 1071 value = 0; >> 1072 >> 1073 for (i = 0; i < count; i++) { >> 1074 unsigned int c = hexnum[i]; >> 1075 >> 1076 switch (c) { >> 1077 case '' ... '9': c -= ''; break; >> 1078 case 'a' ... 'f': c -= 'a'-10; break; >> 1079 case 'A' ... 'F': c -= 'A'-10; break; >> 1080 default: >> 1081 goto out; >> 1082 } >> 1083 value = (value << 4) | c; >> 1084 } >> 1085 out: >> 1086 *ret = value; >> 1087 return 0; >> 1088 } >> 1089 >> 1090 #if CONFIG_SMP >> 1091 >> 1092 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS]; >> 1093 >> 1094 static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL }; >> 1095 static int irq_affinity_read_proc (char *page, char **start, off_t off, >> 1096 int count, int *eof, void *data) >> 1097 { >> 1098 if (count < HEX_DIGITS+1) >> 1099 return -EINVAL; >> 1100 return sprintf (page, "%08lx\n", irq_affinity[(long)data]); >> 1101 } >> 1102 >> 1103 static int irq_affinity_write_proc (struct file *file, const char *buffer, >> 1104 unsigned long count, void *data) >> 1105 { >> 1106 int irq = (long) data, full_count = count, err; >> 1107 unsigned long new_value; >> 1108 >> 1109 if (!irq_desc[irq].handler->set_affinity) >> 1110 return -EIO; >> 1111 >> 1112 err = parse_hex_value(buffer, count, &new_value); >> 1113 >> 1114 /* >> 1115 * Do not allow disabling IRQs completely - it's a too easy >> 1116 * way to make the system unusable accidentally :-) At least >> 1117 * one online CPU still has to be targeted. >> 1118 */ >> 1119 if (!(new_value & cpu_online_map)) >> 1120 return -EINVAL; >> 1121 >> 1122 irq_affinity[irq] = new_value; >> 1123 irq_desc[irq].handler->set_affinity(irq, new_value); >> 1124 >> 1125 return full_count; >> 1126 } >> 1127 >> 1128 #endif >> 1129 >> 1130 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off, >> 1131 int count, int *eof, void *data) >> 1132 { >> 1133 unsigned long *mask = (unsigned long *) data; >> 1134 if (count < HEX_DIGITS+1) >> 1135 return -EINVAL; >> 1136 return sprintf (page, "%08lx\n", *mask); >> 1137 } >> 1138 >> 1139 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer, >> 1140 unsigned long count, void *data) >> 1141 { >> 1142 unsigned long *mask = (unsigned long *) data, full_count = count, err; >> 1143 unsigned long new_value; >> 1144 >> 1145 err = parse_hex_value(buffer, count, &new_value); >> 1146 if (err) >> 1147 return err; >> 1148 >> 1149 *mask = new_value; >> 1150 return full_count; >> 1151 } >> 1152 >> 1153 #define MAX_NAMELEN 10 >> 1154 >> 1155 static void register_irq_proc (unsigned int irq) >> 1156 { >> 1157 char name [MAX_NAMELEN]; >> 1158 >> 1159 if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) || >> 1160 irq_dir[irq]) >> 1161 return; >> 1162 >> 1163 memset(name, 0, MAX_NAMELEN); >> 1164 sprintf(name, "%d", irq); >> 1165 >> 1166 /* create /proc/irq/1234 */ >> 1167 irq_dir[irq] = proc_mkdir(name, root_irq_dir); >> 1168 >> 1169 #if CONFIG_SMP >> 1170 { >> 1171 struct proc_dir_entry *entry; >> 1172 >> 1173 /* create /proc/irq/1234/smp_affinity */ >> 1174 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); >> 1175 >> 1176 if (entry) { >> 1177 entry->nlink = 1; >> 1178 entry->data = (void *)(long)irq; >> 1179 entry->read_proc = irq_affinity_read_proc; >> 1180 entry->write_proc = irq_affinity_write_proc; >> 1181 } >> 1182 >> 1183 smp_affinity_entry[irq] = entry; >> 1184 } >> 1185 #endif >> 1186 } >> 1187 >> 1188 unsigned long prof_cpu_mask = -1; >> 1189 >> 1190 void init_irq_proc (void) >> 1191 { >> 1192 struct proc_dir_entry *entry; >> 1193 int i; >> 1194 >> 1195 /* create /proc/irq */ >> 1196 root_irq_dir = proc_mkdir("irq", 0); >> 1197 >> 1198 /* create /proc/irq/prof_cpu_mask */ >> 1199 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); >> 1200 >> 1201 if (!entry) >> 1202 return; >> 1203 >> 1204 entry->nlink = 1; >> 1205 entry->data = (void *)&prof_cpu_mask; >> 1206 entry->read_proc = prof_cpu_mask_read_proc; >> 1207 entry->write_proc = prof_cpu_mask_write_proc; >> 1208 >> 1209 /* >> 1210 * Create entries for all existing IRQs. >> 1211 */ >> 1212 for (i = 0; i < NR_IRQS; i++) >> 1213 register_irq_proc(i); >> 1214 } >> 1215 40 1216
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.