1 // SPDX-License-Identifier: GPL-2.0-or-later !! 1 /* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $ 2 /* !! 2 * irq.c: UltraSparc IRQ handling/init/registry. 3 * OpenRISC irq.c << 4 * << 5 * Linux architectural port borrowing liberall << 6 * others. All original copyrights apply as p << 7 * declaration. << 8 * 3 * 9 * Modifications for the OpenRISC architecture !! 4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@s !! 5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) >> 6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) 11 */ 7 */ 12 8 >> 9 #include <linux/config.h> >> 10 #include <linux/module.h> >> 11 #include <linux/sched.h> >> 12 #include <linux/ptrace.h> >> 13 #include <linux/errno.h> >> 14 #include <linux/kernel_stat.h> >> 15 #include <linux/signal.h> >> 16 #include <linux/mm.h> 13 #include <linux/interrupt.h> 17 #include <linux/interrupt.h> >> 18 #include <linux/slab.h> >> 19 #include <linux/random.h> 14 #include <linux/init.h> 20 #include <linux/init.h> 15 #include <linux/ftrace.h> !! 21 #include <linux/delay.h> 16 #include <linux/irq.h> !! 22 #include <linux/proc_fs.h> 17 #include <linux/irqchip.h> !! 23 #include <linux/seq_file.h> 18 #include <linux/export.h> !! 24 19 #include <linux/irqflags.h> !! 25 #include <asm/ptrace.h> >> 26 #include <asm/processor.h> >> 27 #include <asm/atomic.h> >> 28 #include <asm/system.h> >> 29 #include <asm/irq.h> >> 30 #include <asm/sbus.h> >> 31 #include <asm/iommu.h> >> 32 #include <asm/upa.h> >> 33 #include <asm/oplib.h> >> 34 #include <asm/timer.h> >> 35 #include <asm/smp.h> >> 36 #include <asm/hardirq.h> >> 37 #include <asm/starfire.h> >> 38 #include <asm/uaccess.h> >> 39 #include <asm/cache.h> >> 40 #include <asm/cpudata.h> >> 41 >> 42 #ifdef CONFIG_SMP >> 43 static void distribute_irqs(void); >> 44 #endif >> 45 >> 46 /* UPA nodes send interrupt packet to UltraSparc with first data reg >> 47 * value low 5 (7 on Starfire) bits holding the IRQ identifier being >> 48 * delivered. We must translate this into a non-vector IRQ so we can >> 49 * set the softint on this cpu. >> 50 * >> 51 * To make processing these packets efficient and race free we use >> 52 * an array of irq buckets below. The interrupt vector handler in >> 53 * entry.S feeds incoming packets into per-cpu pil-indexed lists. >> 54 * The IVEC handler does not need to act atomically, the PIL dispatch >> 55 * code uses CAS to get an atomic snapshot of the list and clear it >> 56 * at the same time. >> 57 */ >> 58 >> 59 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES))); >> 60 >> 61 /* This has to be in the main kernel image, it cannot be >> 62 * turned into per-cpu data. The reason is that the main >> 63 * kernel image is locked into the TLB and this structure >> 64 * is accessed from the vectored interrupt trap handler. If >> 65 * access to this structure takes a TLB miss it could cause >> 66 * the 5-level sparc v9 trap stack to overflow. >> 67 */ >> 68 struct irq_work_struct { >> 69 unsigned int irq_worklists[16]; >> 70 }; >> 71 struct irq_work_struct __irq_work[NR_CPUS]; >> 72 #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)]) >> 73 >> 74 #ifdef CONFIG_PCI >> 75 /* This is a table of physical addresses used to deal with IBF_DMA_SYNC. >> 76 * It is used for PCI only to synchronize DMA transfers with IRQ delivery >> 77 * for devices behind busses other than APB on Sabre systems. >> 78 * >> 79 * Currently these physical addresses are just config space accesses >> 80 * to the command register for that device. >> 81 */ >> 82 unsigned long pci_dma_wsync; >> 83 unsigned long dma_sync_reg_table[256]; >> 84 unsigned char dma_sync_reg_table_entry = 0; >> 85 #endif >> 86 >> 87 /* This is based upon code in the 32-bit Sparc kernel written mostly by >> 88 * David Redman (djhr@tadpole.co.uk). >> 89 */ >> 90 #define MAX_STATIC_ALLOC 4 >> 91 static struct irqaction static_irqaction[MAX_STATIC_ALLOC]; >> 92 static int static_irq_count; >> 93 >> 94 /* This is exported so that fast IRQ handlers can get at it... -DaveM */ >> 95 struct irqaction *irq_action[NR_IRQS+1] = { >> 96 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, >> 97 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL >> 98 }; >> 99 >> 100 /* This only synchronizes entities which modify IRQ handler >> 101 * state and some selected user-level spots that want to >> 102 * read things in the table. IRQ handler processing orders >> 103 * its' accesses such that no locking is needed. >> 104 */ >> 105 static spinlock_t irq_action_lock = SPIN_LOCK_UNLOCKED; >> 106 >> 107 static void register_irq_proc (unsigned int irq); >> 108 >> 109 /* >> 110 * Upper 2b of irqaction->flags holds the ino. >> 111 * irqaction->mask holds the smp affinity information. >> 112 */ >> 113 #define put_ino_in_irqaction(action, irq) \ >> 114 action->flags &= 0xffffffffffffUL; \ >> 115 if (__bucket(irq) == &pil0_dummy_bucket) \ >> 116 action->flags |= 0xdeadUL << 48; \ >> 117 else \ >> 118 action->flags |= __irq_ino(irq) << 48; >> 119 #define get_ino_in_irqaction(action) (action->flags >> 48) >> 120 >> 121 #if NR_CPUS > 64 >> 122 #error irqaction embedded smp affinity does not work with > 64 cpus, FIXME >> 123 #endif >> 124 >> 125 #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff) >> 126 #define get_smpaff_in_irqaction(action) ((action)->mask) >> 127 >> 128 int show_interrupts(struct seq_file *p, void *v) >> 129 { >> 130 unsigned long flags; >> 131 int i; >> 132 struct irqaction *action; >> 133 #ifdef CONFIG_SMP >> 134 int j; >> 135 #endif >> 136 >> 137 spin_lock_irqsave(&irq_action_lock, flags); >> 138 for (i = 0; i < (NR_IRQS + 1); i++) { >> 139 if (!(action = *(i + irq_action))) >> 140 continue; >> 141 seq_printf(p, "%3d: ", i); >> 142 #ifndef CONFIG_SMP >> 143 seq_printf(p, "%10u ", kstat_irqs(i)); >> 144 #else >> 145 for (j = 0; j < NR_CPUS; j++) { >> 146 if (!cpu_online(j)) >> 147 continue; >> 148 seq_printf(p, "%10u ", >> 149 kstat_cpu(j).irqs[i]); >> 150 } >> 151 #endif >> 152 seq_printf(p, " %s:%lx", action->name, >> 153 get_ino_in_irqaction(action)); >> 154 for (action = action->next; action; action = action->next) { >> 155 seq_printf(p, ", %s:%lx", action->name, >> 156 get_ino_in_irqaction(action)); >> 157 } >> 158 seq_putc(p, '\n'); >> 159 } >> 160 spin_unlock_irqrestore(&irq_action_lock, flags); >> 161 >> 162 return 0; >> 163 } >> 164 >> 165 /* Now these are always passed a true fully specified sun4u INO. */ >> 166 void enable_irq(unsigned int irq) >> 167 { >> 168 struct ino_bucket *bucket = __bucket(irq); >> 169 unsigned long imap; >> 170 unsigned long tid; >> 171 >> 172 imap = bucket->imap; >> 173 if (imap == 0UL) >> 174 return; >> 175 >> 176 if (tlb_type == cheetah || tlb_type == cheetah_plus) { >> 177 unsigned long ver; >> 178 >> 179 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); >> 180 if ((ver >> 32) == 0x003e0016) { >> 181 /* We set it to our JBUS ID. */ >> 182 __asm__ __volatile__("ldxa [%%g0] %1, %0" >> 183 : "=r" (tid) >> 184 : "i" (ASI_JBUS_CONFIG)); >> 185 tid = ((tid & (0x1fUL<<17)) << 9); >> 186 tid &= IMAP_TID_JBUS; >> 187 } else { >> 188 /* We set it to our Safari AID. */ >> 189 __asm__ __volatile__("ldxa [%%g0] %1, %0" >> 190 : "=r" (tid) >> 191 : "i" (ASI_SAFARI_CONFIG)); >> 192 tid = ((tid & (0x3ffUL<<17)) << 9); >> 193 tid &= IMAP_AID_SAFARI; >> 194 } >> 195 } else if (this_is_starfire == 0) { >> 196 /* We set it to our UPA MID. */ >> 197 __asm__ __volatile__("ldxa [%%g0] %1, %0" >> 198 : "=r" (tid) >> 199 : "i" (ASI_UPA_CONFIG)); >> 200 tid = ((tid & UPA_CONFIG_MID) << 9); >> 201 tid &= IMAP_TID_UPA; >> 202 } else { >> 203 tid = (starfire_translate(imap, smp_processor_id()) << 26); >> 204 tid &= IMAP_TID_UPA; >> 205 } >> 206 >> 207 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product >> 208 * of this SYSIO's preconfigured IGN in the SYSIO Control >> 209 * Register, the hardware just mirrors that value here. >> 210 * However for Graphics and UPA Slave devices the full >> 211 * IMAP_INR field can be set by the programmer here. >> 212 * >> 213 * Things like FFB can now be handled via the new IRQ mechanism. >> 214 */ >> 215 upa_writel(tid | IMAP_VALID, imap); >> 216 } >> 217 >> 218 /* This now gets passed true ino's as well. */ >> 219 void disable_irq(unsigned int irq) >> 220 { >> 221 struct ino_bucket *bucket = __bucket(irq); >> 222 unsigned long imap; >> 223 >> 224 imap = bucket->imap; >> 225 if (imap != 0UL) { >> 226 u32 tmp; >> 227 >> 228 /* NOTE: We do not want to futz with the IRQ clear registers >> 229 * and move the state to IDLE, the SCSI code does call >> 230 * disable_irq() to assure atomicity in the queue cmd >> 231 * SCSI adapter driver code. Thus we'd lose interrupts. >> 232 */ >> 233 tmp = upa_readl(imap); >> 234 tmp &= ~IMAP_VALID; >> 235 upa_writel(tmp, imap); >> 236 } >> 237 } >> 238 >> 239 /* The timer is the one "weird" interrupt which is generated by >> 240 * the CPU %tick register and not by some normal vectored interrupt >> 241 * source. To handle this special case, we use this dummy INO bucket. >> 242 */ >> 243 static struct ino_bucket pil0_dummy_bucket = { >> 244 0, /* irq_chain */ >> 245 0, /* pil */ >> 246 0, /* pending */ >> 247 0, /* flags */ >> 248 0, /* __unused */ >> 249 NULL, /* irq_info */ >> 250 0UL, /* iclr */ >> 251 0UL, /* imap */ >> 252 }; >> 253 >> 254 unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap) >> 255 { >> 256 struct ino_bucket *bucket; >> 257 int ino; >> 258 >> 259 if (pil == 0) { >> 260 if (iclr != 0UL || imap != 0UL) { >> 261 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n", >> 262 iclr, imap); >> 263 prom_halt(); >> 264 } >> 265 return __irq(&pil0_dummy_bucket); >> 266 } >> 267 >> 268 /* RULE: Both must be specified in all other cases. */ >> 269 if (iclr == 0UL || imap == 0UL) { >> 270 prom_printf("Invalid build_irq %d %d %016lx %016lx\n", >> 271 pil, inofixup, iclr, imap); >> 272 prom_halt(); >> 273 } >> 274 >> 275 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; >> 276 if (ino > NUM_IVECS) { >> 277 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n", >> 278 ino, pil, inofixup, iclr, imap); >> 279 prom_halt(); >> 280 } >> 281 >> 282 /* Ok, looks good, set it up. Don't touch the irq_chain or >> 283 * the pending flag. >> 284 */ >> 285 bucket = &ivector_table[ino]; >> 286 if ((bucket->flags & IBF_ACTIVE) || >> 287 (bucket->irq_info != NULL)) { >> 288 /* This is a gross fatal error if it happens here. */ >> 289 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n"); >> 290 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n", >> 291 ino, pil, inofixup, iclr, imap); >> 292 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n", >> 293 bucket->pil, bucket->iclr, bucket->imap); >> 294 prom_printf("IRQ: Cannot continue, halting...\n"); >> 295 prom_halt(); >> 296 } >> 297 bucket->imap = imap; >> 298 bucket->iclr = iclr; >> 299 bucket->pil = pil; >> 300 bucket->flags = 0; >> 301 >> 302 bucket->irq_info = NULL; >> 303 >> 304 return __irq(bucket); >> 305 } >> 306 >> 307 static void atomic_bucket_insert(struct ino_bucket *bucket) >> 308 { >> 309 unsigned long pstate; >> 310 unsigned int *ent; >> 311 >> 312 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); >> 313 __asm__ __volatile__("wrpr %0, %1, %%pstate" >> 314 : : "r" (pstate), "i" (PSTATE_IE)); >> 315 ent = irq_work(smp_processor_id(), bucket->pil); >> 316 bucket->irq_chain = *ent; >> 317 *ent = __irq(bucket); >> 318 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); >> 319 } >> 320 >> 321 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), >> 322 unsigned long irqflags, const char *name, void *dev_id) >> 323 { >> 324 struct irqaction *action, *tmp = NULL; >> 325 struct ino_bucket *bucket = __bucket(irq); >> 326 unsigned long flags; >> 327 int pending = 0; >> 328 >> 329 if ((bucket != &pil0_dummy_bucket) && >> 330 (bucket < &ivector_table[0] || >> 331 bucket >= &ivector_table[NUM_IVECS])) { >> 332 unsigned int *caller; >> 333 >> 334 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); >> 335 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt " >> 336 "from %p, irq %08x.\n", caller, irq); >> 337 return -EINVAL; >> 338 } >> 339 if (!handler) >> 340 return -EINVAL; >> 341 >> 342 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) { >> 343 /* >> 344 * This function might sleep, we want to call it first, >> 345 * outside of the atomic block. In SA_STATIC_ALLOC case, >> 346 * random driver's kmalloc will fail, but it is safe. >> 347 * If already initialized, random driver will not reinit. >> 348 * Yes, this might clear the entropy pool if the wrong >> 349 * driver is attempted to be loaded, without actually >> 350 * installing a new handler, but is this really a problem, >> 351 * only the sysadmin is able to do this. >> 352 */ >> 353 rand_initialize_irq(irq); >> 354 } >> 355 >> 356 spin_lock_irqsave(&irq_action_lock, flags); >> 357 >> 358 action = *(bucket->pil + irq_action); >> 359 if (action) { >> 360 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) >> 361 for (tmp = action; tmp->next; tmp = tmp->next) >> 362 ; >> 363 else { >> 364 spin_unlock_irqrestore(&irq_action_lock, flags); >> 365 return -EBUSY; >> 366 } >> 367 action = NULL; /* Or else! */ >> 368 } 20 369 21 /* read interrupt enabled status */ !! 370 /* If this is flagged as statically allocated then we use our 22 unsigned long arch_local_save_flags(void) !! 371 * private struct which is never freed. >> 372 */ >> 373 if (irqflags & SA_STATIC_ALLOC) { >> 374 if (static_irq_count < MAX_STATIC_ALLOC) >> 375 action = &static_irqaction[static_irq_count++]; >> 376 else >> 377 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed " >> 378 "using kmalloc\n", irq, name); >> 379 } >> 380 if (action == NULL) >> 381 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), >> 382 GFP_ATOMIC); >> 383 >> 384 if (!action) { >> 385 spin_unlock_irqrestore(&irq_action_lock, flags); >> 386 return -ENOMEM; >> 387 } >> 388 >> 389 if (bucket == &pil0_dummy_bucket) { >> 390 bucket->irq_info = action; >> 391 bucket->flags |= IBF_ACTIVE; >> 392 } else { >> 393 if ((bucket->flags & IBF_ACTIVE) != 0) { >> 394 void *orig = bucket->irq_info; >> 395 void **vector = NULL; >> 396 >> 397 if ((bucket->flags & IBF_PCI) == 0) { >> 398 printk("IRQ: Trying to share non-PCI bucket.\n"); >> 399 goto free_and_ebusy; >> 400 } >> 401 if ((bucket->flags & IBF_MULTI) == 0) { >> 402 vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC); >> 403 if (vector == NULL) >> 404 goto free_and_enomem; >> 405 >> 406 /* We might have slept. */ >> 407 if ((bucket->flags & IBF_MULTI) != 0) { >> 408 int ent; >> 409 >> 410 kfree(vector); >> 411 vector = (void **)bucket->irq_info; >> 412 for(ent = 0; ent < 4; ent++) { >> 413 if (vector[ent] == NULL) { >> 414 vector[ent] = action; >> 415 break; >> 416 } >> 417 } >> 418 if (ent == 4) >> 419 goto free_and_ebusy; >> 420 } else { >> 421 vector[0] = orig; >> 422 vector[1] = action; >> 423 vector[2] = NULL; >> 424 vector[3] = NULL; >> 425 bucket->irq_info = vector; >> 426 bucket->flags |= IBF_MULTI; >> 427 } >> 428 } else { >> 429 int ent; >> 430 >> 431 vector = (void **)orig; >> 432 for (ent = 0; ent < 4; ent++) { >> 433 if (vector[ent] == NULL) { >> 434 vector[ent] = action; >> 435 break; >> 436 } >> 437 } >> 438 if (ent == 4) >> 439 goto free_and_ebusy; >> 440 } >> 441 } else { >> 442 bucket->irq_info = action; >> 443 bucket->flags |= IBF_ACTIVE; >> 444 } >> 445 pending = bucket->pending; >> 446 if (pending) >> 447 bucket->pending = 0; >> 448 } >> 449 >> 450 action->handler = handler; >> 451 action->flags = irqflags; >> 452 action->name = name; >> 453 action->next = NULL; >> 454 action->dev_id = dev_id; >> 455 put_ino_in_irqaction(action, irq); >> 456 put_smpaff_in_irqaction(action, 0); >> 457 >> 458 if (tmp) >> 459 tmp->next = action; >> 460 else >> 461 *(bucket->pil + irq_action) = action; >> 462 >> 463 enable_irq(irq); >> 464 >> 465 /* We ate the IVEC already, this makes sure it does not get lost. */ >> 466 if (pending) { >> 467 atomic_bucket_insert(bucket); >> 468 set_softint(1 << bucket->pil); >> 469 } >> 470 spin_unlock_irqrestore(&irq_action_lock, flags); >> 471 if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC))) >> 472 register_irq_proc(__irq_ino(irq)); >> 473 >> 474 #ifdef CONFIG_SMP >> 475 distribute_irqs(); >> 476 #endif >> 477 return 0; >> 478 >> 479 free_and_ebusy: >> 480 kfree(action); >> 481 spin_unlock_irqrestore(&irq_action_lock, flags); >> 482 return -EBUSY; >> 483 >> 484 free_and_enomem: >> 485 kfree(action); >> 486 spin_unlock_irqrestore(&irq_action_lock, flags); >> 487 return -ENOMEM; >> 488 } >> 489 >> 490 EXPORT_SYMBOL(request_irq); >> 491 >> 492 void free_irq(unsigned int irq, void *dev_id) >> 493 { >> 494 struct irqaction *action; >> 495 struct irqaction *tmp = NULL; >> 496 unsigned long flags; >> 497 struct ino_bucket *bucket = __bucket(irq), *bp; >> 498 >> 499 if ((bucket != &pil0_dummy_bucket) && >> 500 (bucket < &ivector_table[0] || >> 501 bucket >= &ivector_table[NUM_IVECS])) { >> 502 unsigned int *caller; >> 503 >> 504 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); >> 505 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt " >> 506 "from %p, irq %08x.\n", caller, irq); >> 507 return; >> 508 } >> 509 >> 510 spin_lock_irqsave(&irq_action_lock, flags); >> 511 >> 512 action = *(bucket->pil + irq_action); >> 513 if (!action->handler) { >> 514 printk("Freeing free IRQ %d\n", bucket->pil); >> 515 return; >> 516 } >> 517 if (dev_id) { >> 518 for ( ; action; action = action->next) { >> 519 if (action->dev_id == dev_id) >> 520 break; >> 521 tmp = action; >> 522 } >> 523 if (!action) { >> 524 printk("Trying to free free shared IRQ %d\n", bucket->pil); >> 525 spin_unlock_irqrestore(&irq_action_lock, flags); >> 526 return; >> 527 } >> 528 } else if (action->flags & SA_SHIRQ) { >> 529 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil); >> 530 spin_unlock_irqrestore(&irq_action_lock, flags); >> 531 return; >> 532 } >> 533 >> 534 if (action->flags & SA_STATIC_ALLOC) { >> 535 printk("Attempt to free statically allocated IRQ %d (%s)\n", >> 536 bucket->pil, action->name); >> 537 spin_unlock_irqrestore(&irq_action_lock, flags); >> 538 return; >> 539 } >> 540 >> 541 if (action && tmp) >> 542 tmp->next = action->next; >> 543 else >> 544 *(bucket->pil + irq_action) = action->next; >> 545 >> 546 spin_unlock_irqrestore(&irq_action_lock, flags); >> 547 >> 548 synchronize_irq(irq); >> 549 >> 550 spin_lock_irqsave(&irq_action_lock, flags); >> 551 >> 552 if (bucket != &pil0_dummy_bucket) { >> 553 unsigned long imap = bucket->imap; >> 554 void **vector, *orig; >> 555 int ent; >> 556 >> 557 orig = bucket->irq_info; >> 558 vector = (void **)orig; >> 559 >> 560 if ((bucket->flags & IBF_MULTI) != 0) { >> 561 int other = 0; >> 562 void *orphan = NULL; >> 563 for (ent = 0; ent < 4; ent++) { >> 564 if (vector[ent] == action) >> 565 vector[ent] = NULL; >> 566 else if (vector[ent] != NULL) { >> 567 orphan = vector[ent]; >> 568 other++; >> 569 } >> 570 } >> 571 >> 572 /* Only free when no other shared irq >> 573 * uses this bucket. >> 574 */ >> 575 if (other) { >> 576 if (other == 1) { >> 577 /* Convert back to non-shared bucket. */ >> 578 bucket->irq_info = orphan; >> 579 bucket->flags &= ~(IBF_MULTI); >> 580 kfree(vector); >> 581 } >> 582 goto out; >> 583 } >> 584 } else { >> 585 bucket->irq_info = NULL; >> 586 } >> 587 >> 588 /* This unique interrupt source is now inactive. */ >> 589 bucket->flags &= ~IBF_ACTIVE; >> 590 >> 591 /* See if any other buckets share this bucket's IMAP >> 592 * and are still active. >> 593 */ >> 594 for (ent = 0; ent < NUM_IVECS; ent++) { >> 595 bp = &ivector_table[ent]; >> 596 if (bp != bucket && >> 597 bp->imap == imap && >> 598 (bp->flags & IBF_ACTIVE) != 0) >> 599 break; >> 600 } >> 601 >> 602 /* Only disable when no other sub-irq levels of >> 603 * the same IMAP are active. >> 604 */ >> 605 if (ent == NUM_IVECS) >> 606 disable_irq(irq); >> 607 } >> 608 >> 609 out: >> 610 kfree(action); >> 611 spin_unlock_irqrestore(&irq_action_lock, flags); >> 612 } >> 613 >> 614 EXPORT_SYMBOL(free_irq); >> 615 >> 616 #ifdef CONFIG_SMP >> 617 void synchronize_irq(unsigned int irq) 23 { 618 { 24 return mfspr(SPR_SR) & (SPR_SR_IEE|SPR !! 619 struct ino_bucket *bucket = __bucket(irq); >> 620 >> 621 #if 0 >> 622 /* The following is how I wish I could implement this. >> 623 * Unfortunately the ICLR registers are read-only, you can >> 624 * only write ICLR_foo values to them. To get the current >> 625 * IRQ status you would need to get at the IRQ diag registers >> 626 * in the PCI/SBUS controller and the layout of those vary >> 627 * from one controller to the next, sigh... -DaveM >> 628 */ >> 629 unsigned long iclr = bucket->iclr; >> 630 >> 631 while (1) { >> 632 u32 tmp = upa_readl(iclr); >> 633 >> 634 if (tmp == ICLR_TRANSMIT || >> 635 tmp == ICLR_PENDING) { >> 636 cpu_relax(); >> 637 continue; >> 638 } >> 639 break; >> 640 } >> 641 #else >> 642 /* So we have to do this with a INPROGRESS bit just like x86. */ >> 643 while (bucket->flags & IBF_INPROGRESS) >> 644 cpu_relax(); >> 645 #endif 25 } 646 } 26 EXPORT_SYMBOL(arch_local_save_flags); !! 647 #endif /* CONFIG_SMP */ 27 648 28 /* set interrupt enabled status */ !! 649 void catch_disabled_ivec(struct pt_regs *regs) 29 void arch_local_irq_restore(unsigned long flag << 30 { 650 { 31 mtspr(SPR_SR, ((mfspr(SPR_SR) & ~(SPR_ !! 651 int cpu = smp_processor_id(); >> 652 struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0)); >> 653 >> 654 /* We can actually see this on Ultra/PCI PCI cards, which are bridges >> 655 * to other devices. Here a single IMAP enabled potentially multiple >> 656 * unique interrupt sources (which each do have a unique ICLR register. >> 657 * >> 658 * So what we do is just register that the IVEC arrived, when registered >> 659 * for real the request_irq() code will check the bit and signal >> 660 * a local CPU interrupt for it. >> 661 */ >> 662 #if 0 >> 663 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n", >> 664 bucket - &ivector_table[0], regs->tpc); >> 665 #endif >> 666 *irq_work(cpu, 0) = 0; >> 667 bucket->pending = 1; 32 } 668 } 33 EXPORT_SYMBOL(arch_local_irq_restore); << 34 669 >> 670 /* Tune this... */ >> 671 #define FORWARD_VOLUME 12 >> 672 >> 673 #ifdef CONFIG_SMP >> 674 >> 675 static inline void redirect_intr(int cpu, struct ino_bucket *bp) >> 676 { >> 677 /* Ok, here is what is going on: >> 678 * 1) Retargeting IRQs on Starfire is very >> 679 * expensive so just forget about it on them. >> 680 * 2) Moving around very high priority interrupts >> 681 * is a losing game. >> 682 * 3) If the current cpu is idle, interrupts are >> 683 * useful work, so keep them here. But do not >> 684 * pass to our neighbour if he is not very idle. >> 685 * 4) If sysadmin explicitly asks for directed intrs, >> 686 * Just Do It. >> 687 */ >> 688 struct irqaction *ap = bp->irq_info; >> 689 cpumask_t cpu_mask = get_smpaff_in_irqaction(ap); >> 690 unsigned int buddy, ticks; >> 691 >> 692 cpus_and(cpu_mask, cpu_mask, cpu_online_map); >> 693 if (cpus_empty(cpu_mask)) >> 694 cpu_mask = cpu_online_map; >> 695 >> 696 if (this_is_starfire != 0 || >> 697 bp->pil >= 10 || current->pid == 0) >> 698 goto out; >> 699 >> 700 /* 'cpu' is the MID (ie. UPAID), calculate the MID >> 701 * of our buddy. >> 702 */ >> 703 buddy = cpu + 1; >> 704 if (buddy >= NR_CPUS) >> 705 buddy = 0; >> 706 >> 707 ticks = 0; >> 708 while (!cpu_isset(buddy, cpu_mask)) { >> 709 if (++buddy >= NR_CPUS) >> 710 buddy = 0; >> 711 if (++ticks > NR_CPUS) { >> 712 put_smpaff_in_irqaction(ap, 0); >> 713 goto out; >> 714 } >> 715 } >> 716 >> 717 if (buddy == cpu) >> 718 goto out; >> 719 >> 720 /* Voo-doo programming. */ >> 721 if (cpu_data(buddy).idle_volume < FORWARD_VOLUME) >> 722 goto out; >> 723 >> 724 /* This just so happens to be correct on Cheetah >> 725 * at the moment. >> 726 */ >> 727 buddy <<= 26; >> 728 >> 729 /* Push it to our buddy. */ >> 730 upa_writel(buddy | IMAP_VALID, bp->imap); >> 731 >> 732 out: >> 733 return; >> 734 } >> 735 >> 736 #endif >> 737 >> 738 void handler_irq(int irq, struct pt_regs *regs) >> 739 { >> 740 struct ino_bucket *bp, *nbp; >> 741 int cpu = smp_processor_id(); >> 742 >> 743 #ifndef CONFIG_SMP >> 744 /* >> 745 * Check for TICK_INT on level 14 softint. >> 746 */ >> 747 { >> 748 unsigned long clr_mask = 1 << irq; >> 749 unsigned long tick_mask = tick_ops->softint_mask; >> 750 >> 751 if ((irq == 14) && (get_softint() & tick_mask)) { >> 752 irq = 0; >> 753 clr_mask = tick_mask; >> 754 } >> 755 clear_softint(clr_mask); >> 756 } >> 757 #else >> 758 int should_forward = 1; >> 759 >> 760 clear_softint(1 << irq); >> 761 #endif >> 762 >> 763 irq_enter(); >> 764 kstat_this_cpu.irqs[irq]++; >> 765 >> 766 /* Sliiiick... */ >> 767 #ifndef CONFIG_SMP >> 768 bp = ((irq != 0) ? >> 769 __bucket(xchg32(irq_work(cpu, irq), 0)) : >> 770 &pil0_dummy_bucket); >> 771 #else >> 772 bp = __bucket(xchg32(irq_work(cpu, irq), 0)); >> 773 #endif >> 774 for ( ; bp != NULL; bp = nbp) { >> 775 unsigned char flags = bp->flags; >> 776 unsigned char random = 0; >> 777 >> 778 nbp = __bucket(bp->irq_chain); >> 779 bp->irq_chain = 0; >> 780 >> 781 bp->flags |= IBF_INPROGRESS; >> 782 >> 783 if ((flags & IBF_ACTIVE) != 0) { >> 784 #ifdef CONFIG_PCI >> 785 if ((flags & IBF_DMA_SYNC) != 0) { >> 786 upa_readl(dma_sync_reg_table[bp->synctab_ent]); >> 787 upa_readq(pci_dma_wsync); >> 788 } >> 789 #endif >> 790 if ((flags & IBF_MULTI) == 0) { >> 791 struct irqaction *ap = bp->irq_info; >> 792 ap->handler(__irq(bp), ap->dev_id, regs); >> 793 random |= ap->flags & SA_SAMPLE_RANDOM; >> 794 } else { >> 795 void **vector = (void **)bp->irq_info; >> 796 int ent; >> 797 for (ent = 0; ent < 4; ent++) { >> 798 struct irqaction *ap = vector[ent]; >> 799 if (ap != NULL) { >> 800 ap->handler(__irq(bp), ap->dev_id, regs); >> 801 random |= ap->flags & SA_SAMPLE_RANDOM; >> 802 } >> 803 } >> 804 } >> 805 /* Only the dummy bucket lacks IMAP/ICLR. */ >> 806 if (bp->pil != 0) { >> 807 #ifdef CONFIG_SMP >> 808 if (should_forward) { >> 809 redirect_intr(cpu, bp); >> 810 should_forward = 0; >> 811 } >> 812 #endif >> 813 upa_writel(ICLR_IDLE, bp->iclr); >> 814 /* Test and add entropy */ >> 815 if (random) >> 816 add_interrupt_randomness(irq); >> 817 } >> 818 } else >> 819 bp->pending = 1; >> 820 >> 821 bp->flags &= ~IBF_INPROGRESS; >> 822 } >> 823 irq_exit(); >> 824 } >> 825 >> 826 #ifdef CONFIG_BLK_DEV_FD >> 827 extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs); >> 828 >> 829 void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs) >> 830 { >> 831 struct irqaction *action = *(irq + irq_action); >> 832 struct ino_bucket *bucket; >> 833 int cpu = smp_processor_id(); >> 834 >> 835 irq_enter(); >> 836 kstat_this_cpu.irqs[irq]++; >> 837 >> 838 *(irq_work(cpu, irq)) = 0; >> 839 bucket = get_ino_in_irqaction(action) + ivector_table; >> 840 >> 841 bucket->flags |= IBF_INPROGRESS; >> 842 >> 843 floppy_interrupt(irq, dev_cookie, regs); >> 844 upa_writel(ICLR_IDLE, bucket->iclr); >> 845 >> 846 bucket->flags &= ~IBF_INPROGRESS; >> 847 >> 848 irq_exit(); >> 849 } >> 850 #endif >> 851 >> 852 /* The following assumes that the branch lies before the place we >> 853 * are branching to. This is the case for a trap vector... >> 854 * You have been warned. >> 855 */ >> 856 #define SPARC_BRANCH(dest_addr, inst_addr) \ >> 857 (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff)) >> 858 >> 859 #define SPARC_NOP (0x01000000) >> 860 >> 861 static void install_fast_irq(unsigned int cpu_irq, >> 862 irqreturn_t (*handler)(int, void *, struct pt_regs *)) >> 863 { >> 864 extern unsigned long sparc64_ttable_tl0; >> 865 unsigned long ttent = (unsigned long) &sparc64_ttable_tl0; >> 866 unsigned int *insns; >> 867 >> 868 ttent += 0x820; >> 869 ttent += (cpu_irq - 1) << 5; >> 870 insns = (unsigned int *) ttent; >> 871 insns[0] = SPARC_BRANCH(((unsigned long) handler), >> 872 ((unsigned long)&insns[0])); >> 873 insns[1] = SPARC_NOP; >> 874 __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent)); >> 875 } >> 876 >> 877 int request_fast_irq(unsigned int irq, >> 878 irqreturn_t (*handler)(int, void *, struct pt_regs *), >> 879 unsigned long irqflags, const char *name, void *dev_id) >> 880 { >> 881 struct irqaction *action; >> 882 struct ino_bucket *bucket = __bucket(irq); >> 883 unsigned long flags; >> 884 >> 885 /* No pil0 dummy buckets allowed here. */ >> 886 if (bucket < &ivector_table[0] || >> 887 bucket >= &ivector_table[NUM_IVECS]) { >> 888 unsigned int *caller; >> 889 >> 890 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); >> 891 printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt " >> 892 "from %p, irq %08x.\n", caller, irq); >> 893 return -EINVAL; >> 894 } >> 895 >> 896 if (!handler) >> 897 return -EINVAL; >> 898 >> 899 if ((bucket->pil == 0) || (bucket->pil == 14)) { >> 900 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n"); >> 901 return -EBUSY; >> 902 } >> 903 >> 904 spin_lock_irqsave(&irq_action_lock, flags); >> 905 >> 906 action = *(bucket->pil + irq_action); >> 907 if (action) { >> 908 if (action->flags & SA_SHIRQ) >> 909 panic("Trying to register fast irq when already shared.\n"); >> 910 if (irqflags & SA_SHIRQ) >> 911 panic("Trying to register fast irq as shared.\n"); >> 912 printk("request_fast_irq: Trying to register yet already owned.\n"); >> 913 spin_unlock_irqrestore(&irq_action_lock, flags); >> 914 return -EBUSY; >> 915 } >> 916 >> 917 /* >> 918 * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we >> 919 * support smp intr affinity in this path. >> 920 */ >> 921 if (irqflags & SA_STATIC_ALLOC) { >> 922 if (static_irq_count < MAX_STATIC_ALLOC) >> 923 action = &static_irqaction[static_irq_count++]; >> 924 else >> 925 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed " >> 926 "using kmalloc\n", bucket->pil, name); >> 927 } >> 928 if (action == NULL) >> 929 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), >> 930 GFP_ATOMIC); >> 931 if (!action) { >> 932 spin_unlock_irqrestore(&irq_action_lock, flags); >> 933 return -ENOMEM; >> 934 } >> 935 install_fast_irq(bucket->pil, handler); >> 936 >> 937 bucket->irq_info = action; >> 938 bucket->flags |= IBF_ACTIVE; >> 939 >> 940 action->handler = handler; >> 941 action->flags = irqflags; >> 942 action->dev_id = NULL; >> 943 action->name = name; >> 944 action->next = NULL; >> 945 put_ino_in_irqaction(action, irq); >> 946 put_smpaff_in_irqaction(action, 0); >> 947 >> 948 *(bucket->pil + irq_action) = action; >> 949 enable_irq(irq); >> 950 >> 951 spin_unlock_irqrestore(&irq_action_lock, flags); >> 952 >> 953 #ifdef CONFIG_SMP >> 954 distribute_irqs(); >> 955 #endif >> 956 return 0; >> 957 } >> 958 >> 959 /* We really don't need these at all on the Sparc. We only have >> 960 * stubs here because they are exported to modules. >> 961 */ >> 962 unsigned long probe_irq_on(void) >> 963 { >> 964 return 0; >> 965 } >> 966 >> 967 EXPORT_SYMBOL(probe_irq_on); >> 968 >> 969 int probe_irq_off(unsigned long mask) >> 970 { >> 971 return 0; >> 972 } >> 973 >> 974 EXPORT_SYMBOL(probe_irq_off); >> 975 >> 976 #ifdef CONFIG_SMP >> 977 static int retarget_one_irq(struct irqaction *p, int goal_cpu) >> 978 { >> 979 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table; >> 980 unsigned long imap = bucket->imap; >> 981 unsigned int tid; >> 982 >> 983 while (!cpu_online(goal_cpu)) { >> 984 if (++goal_cpu >= NR_CPUS) >> 985 goal_cpu = 0; >> 986 } >> 987 >> 988 if (tlb_type == cheetah || tlb_type == cheetah_plus) { >> 989 tid = goal_cpu << 26; >> 990 tid &= IMAP_AID_SAFARI; >> 991 } else if (this_is_starfire == 0) { >> 992 tid = goal_cpu << 26; >> 993 tid &= IMAP_TID_UPA; >> 994 } else { >> 995 tid = (starfire_translate(imap, goal_cpu) << 26); >> 996 tid &= IMAP_TID_UPA; >> 997 } >> 998 upa_writel(tid | IMAP_VALID, imap); >> 999 >> 1000 while (!cpu_online(goal_cpu)) { >> 1001 if (++goal_cpu >= NR_CPUS) >> 1002 goal_cpu = 0; >> 1003 } >> 1004 >> 1005 return goal_cpu; >> 1006 } >> 1007 >> 1008 /* Called from request_irq. */ >> 1009 static void distribute_irqs(void) >> 1010 { >> 1011 unsigned long flags; >> 1012 int cpu, level; >> 1013 >> 1014 spin_lock_irqsave(&irq_action_lock, flags); >> 1015 cpu = 0; >> 1016 >> 1017 /* >> 1018 * Skip the timer at [0], and very rare error/power intrs at [15]. >> 1019 * Also level [12], it causes problems on Ex000 systems. >> 1020 */ >> 1021 for (level = 1; level < NR_IRQS; level++) { >> 1022 struct irqaction *p = irq_action[level]; >> 1023 if (level == 12) continue; >> 1024 while(p) { >> 1025 cpu = retarget_one_irq(p, cpu); >> 1026 p = p->next; >> 1027 } >> 1028 } >> 1029 spin_unlock_irqrestore(&irq_action_lock, flags); >> 1030 } >> 1031 #endif >> 1032 >> 1033 >> 1034 struct sun5_timer *prom_timers; >> 1035 static u64 prom_limit0, prom_limit1; >> 1036 >> 1037 static void map_prom_timers(void) >> 1038 { >> 1039 unsigned int addr[3]; >> 1040 int tnode, err; >> 1041 >> 1042 /* PROM timer node hangs out in the top level of device siblings... */ >> 1043 tnode = prom_finddevice("/counter-timer"); >> 1044 >> 1045 /* Assume if node is not present, PROM uses different tick mechanism >> 1046 * which we should not care about. >> 1047 */ >> 1048 if (tnode == 0 || tnode == -1) { >> 1049 prom_timers = (struct sun5_timer *) 0; >> 1050 return; >> 1051 } >> 1052 >> 1053 /* If PROM is really using this, it must be mapped by him. */ >> 1054 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr)); >> 1055 if (err == -1) { >> 1056 prom_printf("PROM does not have timer mapped, trying to continue.\n"); >> 1057 prom_timers = (struct sun5_timer *) 0; >> 1058 return; >> 1059 } >> 1060 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]); >> 1061 } >> 1062 >> 1063 static void kill_prom_timer(void) >> 1064 { >> 1065 if (!prom_timers) >> 1066 return; >> 1067 >> 1068 /* Save them away for later. */ >> 1069 prom_limit0 = prom_timers->limit0; >> 1070 prom_limit1 = prom_timers->limit1; >> 1071 >> 1072 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14. >> 1073 * We turn both off here just to be paranoid. >> 1074 */ >> 1075 prom_timers->limit0 = 0; >> 1076 prom_timers->limit1 = 0; >> 1077 >> 1078 /* Wheee, eat the interrupt packet too... */ >> 1079 __asm__ __volatile__( >> 1080 " mov 0x40, %%g2\n" >> 1081 " ldxa [%%g0] %0, %%g1\n" >> 1082 " ldxa [%%g2] %1, %%g1\n" >> 1083 " stxa %%g0, [%%g0] %0\n" >> 1084 " membar #Sync\n" >> 1085 : /* no outputs */ >> 1086 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R) >> 1087 : "g1", "g2"); >> 1088 } >> 1089 >> 1090 void enable_prom_timer(void) >> 1091 { >> 1092 if (!prom_timers) >> 1093 return; >> 1094 >> 1095 /* Set it to whatever was there before. */ >> 1096 prom_timers->limit1 = prom_limit1; >> 1097 prom_timers->count1 = 0; >> 1098 prom_timers->limit0 = prom_limit0; >> 1099 prom_timers->count0 = 0; >> 1100 } >> 1101 >> 1102 void init_irqwork_curcpu(void) >> 1103 { >> 1104 register struct irq_work_struct *workp asm("o2"); >> 1105 unsigned long tmp; >> 1106 >> 1107 memset(__irq_work + smp_processor_id(), 0, sizeof(*workp)); >> 1108 >> 1109 /* Make sure we are called with PSTATE_IE disabled. */ >> 1110 __asm__ __volatile__("rdpr %%pstate, %0\n\t" >> 1111 : "=r" (tmp)); >> 1112 if (tmp & PSTATE_IE) { >> 1113 prom_printf("BUG: init_irqwork_curcpu() called with " >> 1114 "PSTATE_IE enabled, bailing.\n"); >> 1115 __asm__ __volatile__("mov %%i7, %0\n\t" >> 1116 : "=r" (tmp)); >> 1117 prom_printf("BUG: Called from %lx\n", tmp); >> 1118 prom_halt(); >> 1119 } >> 1120 >> 1121 /* Set interrupt globals. */ >> 1122 workp = &__irq_work[smp_processor_id()]; >> 1123 __asm__ __volatile__( >> 1124 "rdpr %%pstate, %0\n\t" >> 1125 "wrpr %0, %1, %%pstate\n\t" >> 1126 "mov %2, %%g6\n\t" >> 1127 "wrpr %0, 0x0, %%pstate\n\t" >> 1128 : "=&r" (tmp) >> 1129 : "i" (PSTATE_IG), "r" (workp)); >> 1130 } >> 1131 >> 1132 /* Only invoked on boot processor. */ 35 void __init init_IRQ(void) 1133 void __init init_IRQ(void) 36 { 1134 { 37 irqchip_init(); !! 1135 map_prom_timers(); >> 1136 kill_prom_timer(); >> 1137 memset(&ivector_table[0], 0, sizeof(ivector_table)); >> 1138 >> 1139 /* We need to clear any IRQ's pending in the soft interrupt >> 1140 * registers, a spurious one could be left around from the >> 1141 * PROM timer which we just disabled. >> 1142 */ >> 1143 clear_softint(get_softint()); >> 1144 >> 1145 /* Now that ivector table is initialized, it is safe >> 1146 * to receive IRQ vector traps. We will normally take >> 1147 * one or two right now, in case some device PROM used >> 1148 * to boot us wants to speak to us. We just ignore them. >> 1149 */ >> 1150 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t" >> 1151 "or %%g1, %0, %%g1\n\t" >> 1152 "wrpr %%g1, 0x0, %%pstate" >> 1153 : /* No outputs */ >> 1154 : "i" (PSTATE_IE) >> 1155 : "g1"); 38 } 1156 } >> 1157 >> 1158 static struct proc_dir_entry * root_irq_dir; >> 1159 static struct proc_dir_entry * irq_dir [NUM_IVECS]; >> 1160 >> 1161 #ifdef CONFIG_SMP >> 1162 >> 1163 #define HEX_DIGITS 16 >> 1164 >> 1165 static unsigned int parse_hex_value (const char *buffer, >> 1166 unsigned long count, unsigned long *ret) >> 1167 { >> 1168 unsigned char hexnum [HEX_DIGITS]; >> 1169 unsigned long value; >> 1170 int i; >> 1171 >> 1172 if (!count) >> 1173 return -EINVAL; >> 1174 if (count > HEX_DIGITS) >> 1175 count = HEX_DIGITS; >> 1176 if (copy_from_user(hexnum, buffer, count)) >> 1177 return -EFAULT; >> 1178 >> 1179 /* >> 1180 * Parse the first 8 characters as a hex string, any non-hex char >> 1181 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same. >> 1182 */ >> 1183 value = 0; >> 1184 >> 1185 for (i = 0; i < count; i++) { >> 1186 unsigned int c = hexnum[i]; >> 1187 >> 1188 switch (c) { >> 1189 case '' ... '9': c -= ''; break; >> 1190 case 'a' ... 'f': c -= 'a'-10; break; >> 1191 case 'A' ... 'F': c -= 'A'-10; break; >> 1192 default: >> 1193 goto out; >> 1194 } >> 1195 value = (value << 4) | c; >> 1196 } >> 1197 out: >> 1198 *ret = value; >> 1199 return 0; >> 1200 } >> 1201 >> 1202 static int irq_affinity_read_proc (char *page, char **start, off_t off, >> 1203 int count, int *eof, void *data) >> 1204 { >> 1205 struct ino_bucket *bp = ivector_table + (long)data; >> 1206 struct irqaction *ap = bp->irq_info; >> 1207 unsigned long mask = get_smpaff_in_irqaction(ap); >> 1208 >> 1209 if (count < HEX_DIGITS+1) >> 1210 return -EINVAL; >> 1211 return sprintf (page, "%016lx\n", mask == 0 ? ~0UL : mask); >> 1212 } >> 1213 >> 1214 static inline void set_intr_affinity(int irq, unsigned long hw_aff) >> 1215 { >> 1216 struct ino_bucket *bp = ivector_table + irq; >> 1217 >> 1218 /* Users specify affinity in terms of hw cpu ids. >> 1219 * As soon as we do this, handler_irq() might see and take action. >> 1220 */ >> 1221 put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff); >> 1222 >> 1223 /* Migration is simply done by the next cpu to service this >> 1224 * interrupt. >> 1225 */ >> 1226 } >> 1227 >> 1228 static int irq_affinity_write_proc (struct file *file, const char *buffer, >> 1229 unsigned long count, void *data) >> 1230 { >> 1231 int irq = (long) data, full_count = count, err; >> 1232 unsigned long new_value, i; >> 1233 >> 1234 err = parse_hex_value(buffer, count, &new_value); >> 1235 >> 1236 /* >> 1237 * Do not allow disabling IRQs completely - it's a too easy >> 1238 * way to make the system unusable accidentally :-) At least >> 1239 * one online CPU still has to be targeted. >> 1240 */ >> 1241 for (i = 0; i < NR_CPUS; i++) { >> 1242 if ((new_value & (1UL << i)) != 0 && >> 1243 !cpu_online(i)) >> 1244 new_value &= ~(1UL << i); >> 1245 } >> 1246 >> 1247 if (!new_value) >> 1248 return -EINVAL; >> 1249 >> 1250 set_intr_affinity(irq, new_value); >> 1251 >> 1252 return full_count; >> 1253 } >> 1254 >> 1255 #endif >> 1256 >> 1257 #define MAX_NAMELEN 10 >> 1258 >> 1259 static void register_irq_proc (unsigned int irq) >> 1260 { >> 1261 char name [MAX_NAMELEN]; >> 1262 >> 1263 if (!root_irq_dir || irq_dir[irq]) >> 1264 return; >> 1265 >> 1266 memset(name, 0, MAX_NAMELEN); >> 1267 sprintf(name, "%x", irq); >> 1268 >> 1269 /* create /proc/irq/1234 */ >> 1270 irq_dir[irq] = proc_mkdir(name, root_irq_dir); >> 1271 >> 1272 #ifdef CONFIG_SMP >> 1273 /* XXX SMP affinity not supported on starfire yet. */ >> 1274 if (this_is_starfire == 0) { >> 1275 struct proc_dir_entry *entry; >> 1276 >> 1277 /* create /proc/irq/1234/smp_affinity */ >> 1278 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); >> 1279 >> 1280 if (entry) { >> 1281 entry->nlink = 1; >> 1282 entry->data = (void *)(long)irq; >> 1283 entry->read_proc = irq_affinity_read_proc; >> 1284 entry->write_proc = irq_affinity_write_proc; >> 1285 } >> 1286 } >> 1287 #endif >> 1288 } >> 1289 >> 1290 void init_irq_proc (void) >> 1291 { >> 1292 /* create /proc/irq */ >> 1293 root_irq_dir = proc_mkdir("irq", 0); >> 1294 } >> 1295 39 1296
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.