1 // SPDX-License-Identifier: GPL-2.0 << 2 /* 1 /* >> 2 * linux/kernel/irq/handle.c >> 3 * 3 * Copyright (C) 1992, 1998-2006 Linus Torvald 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 2005-2006, Thomas Gleixner, R 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 5 * 6 * 6 * This file contains the core interrupt handl !! 7 * This file contains the core interrupt handling code. 7 * information is available in Documentation/c !! 8 * >> 9 * Detailed information is available in Documentation/DocBook/genericirq 8 * 10 * 9 */ 11 */ 10 12 11 #include <linux/irq.h> 13 #include <linux/irq.h> 12 #include <linux/random.h> << 13 #include <linux/sched.h> 14 #include <linux/sched.h> >> 15 #include <linux/slab.h> >> 16 #include <linux/module.h> >> 17 #include <linux/random.h> 14 #include <linux/interrupt.h> 18 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 19 #include <linux/kernel_stat.h> 16 !! 20 #include <linux/rculist.h> 17 #include <asm/irq_regs.h> !! 21 #include <linux/hash.h> 18 !! 22 #include <linux/bootmem.h> 19 #include <trace/events/irq.h> 23 #include <trace/events/irq.h> 20 24 21 #include "internals.h" 25 #include "internals.h" 22 26 23 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER !! 27 /* 24 void (*handle_arch_irq)(struct pt_regs *) __ro !! 28 * lockdep: we want to handle all irq_desc locks as a single lock-class: 25 #endif !! 29 */ >> 30 struct lock_class_key irq_desc_lock_class; 26 31 27 /** 32 /** 28 * handle_bad_irq - handle spurious and unhand 33 * handle_bad_irq - handle spurious and unhandled irqs >> 34 * @irq: the interrupt number 29 * @desc: description of the interrupt 35 * @desc: description of the interrupt 30 * 36 * 31 * Handles spurious and unhandled IRQ's. It al 37 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 32 */ 38 */ 33 void handle_bad_irq(struct irq_desc *desc) !! 39 void handle_bad_irq(unsigned int irq, struct irq_desc *desc) >> 40 { >> 41 print_irq_desc(irq, desc); >> 42 kstat_incr_irqs_this_cpu(irq, desc); >> 43 ack_bad_irq(irq); >> 44 } >> 45 >> 46 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) >> 47 static void __init init_irq_default_affinity(void) >> 48 { >> 49 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); >> 50 cpumask_setall(irq_default_affinity); >> 51 } >> 52 #else >> 53 static void __init init_irq_default_affinity(void) >> 54 { >> 55 } >> 56 #endif >> 57 >> 58 /* >> 59 * Linux has a controller-independent interrupt architecture. >> 60 * Every controller has a 'controller-template', that is used >> 61 * by the main code to do the right thing. Each driver-visible >> 62 * interrupt source is transparently wired to the appropriate >> 63 * controller. Thus drivers need not be aware of the >> 64 * interrupt-controller. >> 65 * >> 66 * The code is designed to be easily extended with new/different >> 67 * interrupt controllers, without having to do assembly magic or >> 68 * having to touch the generic code. >> 69 * >> 70 * Controller mappings for all interrupt sources: >> 71 */ >> 72 int nr_irqs = NR_IRQS; >> 73 EXPORT_SYMBOL_GPL(nr_irqs); >> 74 >> 75 #ifdef CONFIG_SPARSE_IRQ >> 76 >> 77 static struct irq_desc irq_desc_init = { >> 78 .irq = -1, >> 79 .status = IRQ_DISABLED, >> 80 .chip = &no_irq_chip, >> 81 .handle_irq = handle_bad_irq, >> 82 .depth = 1, >> 83 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), >> 84 }; >> 85 >> 86 void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) >> 87 { >> 88 void *ptr; >> 89 >> 90 if (slab_is_available()) >> 91 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), >> 92 GFP_ATOMIC, node); >> 93 else >> 94 ptr = alloc_bootmem_node(NODE_DATA(node), >> 95 nr * sizeof(*desc->kstat_irqs)); >> 96 >> 97 /* >> 98 * don't overwite if can not get new one >> 99 * init_copy_kstat_irqs() could still use old one >> 100 */ >> 101 if (ptr) { >> 102 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); >> 103 desc->kstat_irqs = ptr; >> 104 } >> 105 } >> 106 >> 107 static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) >> 108 { >> 109 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); >> 110 >> 111 spin_lock_init(&desc->lock); >> 112 desc->irq = irq; >> 113 #ifdef CONFIG_SMP >> 114 desc->node = node; >> 115 #endif >> 116 lockdep_set_class(&desc->lock, &irq_desc_lock_class); >> 117 init_kstat_irqs(desc, node, nr_cpu_ids); >> 118 if (!desc->kstat_irqs) { >> 119 printk(KERN_ERR "can not alloc kstat_irqs\n"); >> 120 BUG_ON(1); >> 121 } >> 122 if (!alloc_desc_masks(desc, node, false)) { >> 123 printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); >> 124 BUG_ON(1); >> 125 } >> 126 init_desc_masks(desc); >> 127 arch_init_chip_data(desc, node); >> 128 } >> 129 >> 130 /* >> 131 * Protect the sparse_irqs: >> 132 */ >> 133 DEFINE_SPINLOCK(sparse_irq_lock); >> 134 >> 135 struct irq_desc **irq_desc_ptrs __read_mostly; >> 136 >> 137 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { >> 138 [0 ... NR_IRQS_LEGACY-1] = { >> 139 .irq = -1, >> 140 .status = IRQ_DISABLED, >> 141 .chip = &no_irq_chip, >> 142 .handle_irq = handle_bad_irq, >> 143 .depth = 1, >> 144 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), >> 145 } >> 146 }; >> 147 >> 148 static unsigned int *kstat_irqs_legacy; >> 149 >> 150 int __init early_irq_init(void) >> 151 { >> 152 struct irq_desc *desc; >> 153 int legacy_count; >> 154 int node; >> 155 int i; >> 156 >> 157 init_irq_default_affinity(); >> 158 >> 159 /* initialize nr_irqs based on nr_cpu_ids */ >> 160 arch_probe_nr_irqs(); >> 161 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); >> 162 >> 163 desc = irq_desc_legacy; >> 164 legacy_count = ARRAY_SIZE(irq_desc_legacy); >> 165 node = first_online_node; >> 166 >> 167 /* allocate irq_desc_ptrs array based on nr_irqs */ >> 168 irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT); >> 169 >> 170 /* allocate based on nr_cpu_ids */ >> 171 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * >> 172 sizeof(int), GFP_NOWAIT, node); >> 173 >> 174 for (i = 0; i < legacy_count; i++) { >> 175 desc[i].irq = i; >> 176 #ifdef CONFIG_SMP >> 177 desc[i].node = node; >> 178 #endif >> 179 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; >> 180 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); >> 181 alloc_desc_masks(&desc[i], node, true); >> 182 init_desc_masks(&desc[i]); >> 183 irq_desc_ptrs[i] = desc + i; >> 184 } >> 185 >> 186 for (i = legacy_count; i < nr_irqs; i++) >> 187 irq_desc_ptrs[i] = NULL; >> 188 >> 189 return arch_early_irq_init(); >> 190 } >> 191 >> 192 struct irq_desc *irq_to_desc(unsigned int irq) >> 193 { >> 194 if (irq_desc_ptrs && irq < nr_irqs) >> 195 return irq_desc_ptrs[irq]; >> 196 >> 197 return NULL; >> 198 } >> 199 >> 200 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) >> 201 { >> 202 struct irq_desc *desc; >> 203 unsigned long flags; >> 204 >> 205 if (irq >= nr_irqs) { >> 206 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", >> 207 irq, nr_irqs); >> 208 return NULL; >> 209 } >> 210 >> 211 desc = irq_desc_ptrs[irq]; >> 212 if (desc) >> 213 return desc; >> 214 >> 215 spin_lock_irqsave(&sparse_irq_lock, flags); >> 216 >> 217 /* We have to check it to avoid races with another CPU */ >> 218 desc = irq_desc_ptrs[irq]; >> 219 if (desc) >> 220 goto out_unlock; >> 221 >> 222 if (slab_is_available()) >> 223 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); >> 224 else >> 225 desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc)); >> 226 >> 227 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); >> 228 if (!desc) { >> 229 printk(KERN_ERR "can not alloc irq_desc\n"); >> 230 BUG_ON(1); >> 231 } >> 232 init_one_irq_desc(irq, desc, node); >> 233 >> 234 irq_desc_ptrs[irq] = desc; >> 235 >> 236 out_unlock: >> 237 spin_unlock_irqrestore(&sparse_irq_lock, flags); >> 238 >> 239 return desc; >> 240 } >> 241 >> 242 #else /* !CONFIG_SPARSE_IRQ */ >> 243 >> 244 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { >> 245 [0 ... NR_IRQS-1] = { >> 246 .status = IRQ_DISABLED, >> 247 .chip = &no_irq_chip, >> 248 .handle_irq = handle_bad_irq, >> 249 .depth = 1, >> 250 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), >> 251 } >> 252 }; >> 253 >> 254 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; >> 255 int __init early_irq_init(void) >> 256 { >> 257 struct irq_desc *desc; >> 258 int count; >> 259 int i; >> 260 >> 261 init_irq_default_affinity(); >> 262 >> 263 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); >> 264 >> 265 desc = irq_desc; >> 266 count = ARRAY_SIZE(irq_desc); >> 267 >> 268 for (i = 0; i < count; i++) { >> 269 desc[i].irq = i; >> 270 alloc_desc_masks(&desc[i], 0, true); >> 271 init_desc_masks(&desc[i]); >> 272 desc[i].kstat_irqs = kstat_irqs_all[i]; >> 273 } >> 274 return arch_early_irq_init(); >> 275 } >> 276 >> 277 struct irq_desc *irq_to_desc(unsigned int irq) >> 278 { >> 279 return (irq < NR_IRQS) ? irq_desc + irq : NULL; >> 280 } >> 281 >> 282 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) >> 283 { >> 284 return irq_to_desc(irq); >> 285 } >> 286 #endif /* !CONFIG_SPARSE_IRQ */ >> 287 >> 288 void clear_kstat_irqs(struct irq_desc *desc) >> 289 { >> 290 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); >> 291 } >> 292 >> 293 /* >> 294 * What should we do if we get a hw irq event on an illegal vector? >> 295 * Each architecture has to answer this themself. >> 296 */ >> 297 static void ack_bad(unsigned int irq) 34 { 298 { 35 unsigned int irq = irq_desc_get_irq(de !! 299 struct irq_desc *desc = irq_to_desc(irq); 36 300 37 print_irq_desc(irq, desc); 301 print_irq_desc(irq, desc); 38 kstat_incr_irqs_this_cpu(desc); << 39 ack_bad_irq(irq); 302 ack_bad_irq(irq); 40 } 303 } 41 EXPORT_SYMBOL_GPL(handle_bad_irq); !! 304 >> 305 /* >> 306 * NOP functions >> 307 */ >> 308 static void noop(unsigned int irq) >> 309 { >> 310 } >> 311 >> 312 static unsigned int noop_ret(unsigned int irq) >> 313 { >> 314 return 0; >> 315 } >> 316 >> 317 /* >> 318 * Generic no controller implementation >> 319 */ >> 320 struct irq_chip no_irq_chip = { >> 321 .name = "none", >> 322 .startup = noop_ret, >> 323 .shutdown = noop, >> 324 .enable = noop, >> 325 .disable = noop, >> 326 .ack = ack_bad, >> 327 .end = noop, >> 328 }; >> 329 >> 330 /* >> 331 * Generic dummy implementation which can be used for >> 332 * real dumb interrupt sources >> 333 */ >> 334 struct irq_chip dummy_irq_chip = { >> 335 .name = "dummy", >> 336 .startup = noop_ret, >> 337 .shutdown = noop, >> 338 .enable = noop, >> 339 .disable = noop, >> 340 .ack = noop, >> 341 .mask = noop, >> 342 .unmask = noop, >> 343 .end = noop, >> 344 }; 42 345 43 /* 346 /* 44 * Special, empty irq handler: 347 * Special, empty irq handler: 45 */ 348 */ 46 irqreturn_t no_action(int cpl, void *dev_id) 349 irqreturn_t no_action(int cpl, void *dev_id) 47 { 350 { 48 return IRQ_NONE; 351 return IRQ_NONE; 49 } 352 } 50 EXPORT_SYMBOL_GPL(no_action); << 51 353 52 static void warn_no_thread(unsigned int irq, s 354 static void warn_no_thread(unsigned int irq, struct irqaction *action) 53 { 355 { 54 if (test_and_set_bit(IRQTF_WARNED, &ac 356 if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags)) 55 return; 357 return; 56 358 57 printk(KERN_WARNING "IRQ %d device %s 359 printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " 58 "but no thread function availab 360 "but no thread function available.", irq, action->name); 59 } 361 } 60 362 61 void __irq_wake_thread(struct irq_desc *desc, !! 363 /** 62 { !! 364 * handle_IRQ_event - irq action chain handler 63 /* !! 365 * @irq: the interrupt number 64 * In case the thread crashed and was !! 366 * @action: the interrupt action chain for this irq 65 * we handled the interrupt. The hardi !! 367 * 66 * device interrupt, so no irq storm i !! 368 * Handles the action chain of an irq event 67 */ !! 369 */ 68 if (action->thread->flags & PF_EXITING !! 370 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) 69 return; << 70 << 71 /* << 72 * Wake up the handler thread for this << 73 * RUNTHREAD bit is already set, nothi << 74 */ << 75 if (test_and_set_bit(IRQTF_RUNTHREAD, << 76 return; << 77 << 78 /* << 79 * It's safe to OR the mask lockless h << 80 * places which write to threads_onesh << 81 * irq thread. << 82 * << 83 * This code is the hard irq context a << 84 * cpus in parallel. If it ever does w << 85 * problems than this bitmask. << 86 * << 87 * The irq threads of this irq which c << 88 * in threads_oneshot are serialized v << 89 * each other and they are serialized << 90 * IRQS_INPROGRESS. << 91 * << 92 * Hard irq handler: << 93 * << 94 * spin_lock(desc->lock); << 95 * desc->state |= IRQS_INPROGRESS << 96 * spin_unlock(desc->lock); << 97 * set_bit(IRQTF_RUNTHREAD, &acti << 98 * desc->threads_oneshot |= mask; << 99 * spin_lock(desc->lock); << 100 * desc->state &= ~IRQS_INPROGRES << 101 * spin_unlock(desc->lock); << 102 * << 103 * irq thread: << 104 * << 105 * again: << 106 * spin_lock(desc->lock); << 107 * if (desc->state & IRQS_INPROGR << 108 * spin_unlock(desc->lock << 109 * while(desc->state & IR << 110 * cpu_relax(); << 111 * goto again; << 112 * } << 113 * if (!test_bit(IRQTF_RUNTHREAD, << 114 * desc->threads_oneshot << 115 * spin_unlock(desc->lock); << 116 * << 117 * So either the thread waits for us t << 118 * or we are waiting in the flow handl << 119 * released before we reach this point << 120 * IRQTF_RUNTHREAD under desc->lock. I << 121 * threads_oneshot untouched and runs << 122 */ << 123 desc->threads_oneshot |= action->threa << 124 << 125 /* << 126 * We increment the threads_active cou << 127 * the irq thread. The irq thread decr << 128 * it returns from the handler or in t << 129 * up waiters which are stuck in synch << 130 * active count becomes zero. synchron << 131 * against this code (hard irq handler << 132 * like the finalize_oneshot() code. S << 133 */ << 134 atomic_inc(&desc->threads_active); << 135 << 136 wake_up_process(action->thread); << 137 } << 138 << 139 irqreturn_t __handle_irq_event_percpu(struct i << 140 { 371 { 141 irqreturn_t retval = IRQ_NONE; !! 372 irqreturn_t ret, retval = IRQ_NONE; 142 unsigned int irq = desc->irq_data.irq; !! 373 unsigned int flags = 0; 143 struct irqaction *action; << 144 374 145 record_irq_time(desc); !! 375 if (!(action->flags & IRQF_DISABLED)) 146 !! 376 local_irq_enable_in_hardirq(); 147 for_each_action_of_desc(desc, action) << 148 irqreturn_t res; << 149 << 150 /* << 151 * If this IRQ would be thread << 152 */ << 153 if (irq_settings_can_thread(de << 154 !(action->flags & (IRQF_NO << 155 lockdep_hardirq_thread << 156 377 >> 378 do { 157 trace_irq_handler_entry(irq, a 379 trace_irq_handler_entry(irq, action); 158 res = action->handler(irq, act !! 380 ret = action->handler(irq, action->dev_id); 159 trace_irq_handler_exit(irq, ac !! 381 trace_irq_handler_exit(irq, action, ret); 160 << 161 if (WARN_ONCE(!irqs_disabled() << 162 irq, action->han << 163 local_irq_disable(); << 164 382 165 switch (res) { !! 383 switch (ret) { 166 case IRQ_WAKE_THREAD: 384 case IRQ_WAKE_THREAD: 167 /* 385 /* >> 386 * Set result to handled so the spurious check >> 387 * does not trigger. >> 388 */ >> 389 ret = IRQ_HANDLED; >> 390 >> 391 /* 168 * Catch drivers which 392 * Catch drivers which return WAKE_THREAD but 169 * did not set up a th 393 * did not set up a thread function 170 */ 394 */ 171 if (unlikely(!action-> 395 if (unlikely(!action->thread_fn)) { 172 warn_no_thread 396 warn_no_thread(irq, action); 173 break; 397 break; 174 } 398 } 175 399 176 __irq_wake_thread(desc !! 400 /* >> 401 * Wake up the handler thread for this >> 402 * action. In case the thread crashed and was >> 403 * killed we just pretend that we handled the >> 404 * interrupt. The hardirq handler above has >> 405 * disabled the device interrupt, so no irq >> 406 * storm is lurking. >> 407 */ >> 408 if (likely(!test_bit(IRQTF_DIED, >> 409 &action->thread_flags))) { >> 410 set_bit(IRQTF_RUNTHREAD, &action->thread_flags); >> 411 wake_up_process(action->thread); >> 412 } >> 413 >> 414 /* Fall through to add to randomness */ >> 415 case IRQ_HANDLED: >> 416 flags |= action->flags; 177 break; 417 break; 178 418 179 default: 419 default: 180 break; 420 break; 181 } 421 } 182 422 183 retval |= res; !! 423 retval |= ret; 184 } !! 424 action = action->next; >> 425 } while (action); >> 426 >> 427 add_interrupt_randomness(irq, flags); >> 428 local_irq_disable(); 185 429 186 return retval; 430 return retval; 187 } 431 } 188 432 189 irqreturn_t handle_irq_event_percpu(struct irq !! 433 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ >> 434 >> 435 #ifdef CONFIG_ENABLE_WARN_DEPRECATED >> 436 # warning __do_IRQ is deprecated. Please convert to proper flow handlers >> 437 #endif >> 438 >> 439 /** >> 440 * __do_IRQ - original all in one highlevel IRQ handler >> 441 * @irq: the interrupt number >> 442 * >> 443 * __do_IRQ handles all normal device IRQ's (the special >> 444 * SMP cross-CPU interrupts have their own specific >> 445 * handlers). >> 446 * >> 447 * This is the original x86 implementation which is used for every >> 448 * interrupt type. >> 449 */ >> 450 unsigned int __do_IRQ(unsigned int irq) 190 { 451 { 191 irqreturn_t retval; !! 452 struct irq_desc *desc = irq_to_desc(irq); >> 453 struct irqaction *action; >> 454 unsigned int status; 192 455 193 retval = __handle_irq_event_percpu(des !! 456 kstat_incr_irqs_this_cpu(irq, desc); 194 457 195 add_interrupt_randomness(desc->irq_dat !! 458 if (CHECK_IRQ_PER_CPU(desc->status)) { >> 459 irqreturn_t action_ret; 196 460 197 if (!irq_settings_no_debug(desc)) !! 461 /* 198 note_interrupt(desc, retval); !! 462 * No locking required for CPU-local interrupts: 199 return retval; !! 463 */ 200 } !! 464 if (desc->chip->ack) >> 465 desc->chip->ack(irq); >> 466 if (likely(!(desc->status & IRQ_DISABLED))) { >> 467 action_ret = handle_IRQ_event(irq, desc->action); >> 468 if (!noirqdebug) >> 469 note_interrupt(irq, desc, action_ret); >> 470 } >> 471 desc->chip->end(irq); >> 472 return 1; >> 473 } 201 474 202 irqreturn_t handle_irq_event(struct irq_desc * !! 475 spin_lock(&desc->lock); 203 { !! 476 if (desc->chip->ack) 204 irqreturn_t ret; !! 477 desc->chip->ack(irq); >> 478 /* >> 479 * REPLAY is when Linux resends an IRQ that was dropped earlier >> 480 * WAITING is used by probe to mark irqs that are being tested >> 481 */ >> 482 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); >> 483 status |= IRQ_PENDING; /* we _want_ to handle it */ 205 484 206 desc->istate &= ~IRQS_PENDING; !! 485 /* 207 irqd_set(&desc->irq_data, IRQD_IRQ_INP !! 486 * If the IRQ is disabled for whatever reason, we cannot 208 raw_spin_unlock(&desc->lock); !! 487 * use the action we have. >> 488 */ >> 489 action = NULL; >> 490 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { >> 491 action = desc->action; >> 492 status &= ~IRQ_PENDING; /* we commit to handling */ >> 493 status |= IRQ_INPROGRESS; /* we are handling it */ >> 494 } >> 495 desc->status = status; >> 496 >> 497 /* >> 498 * If there is no IRQ handler or it was disabled, exit early. >> 499 * Since we set PENDING, if another processor is handling >> 500 * a different instance of this same irq, the other processor >> 501 * will take care of it. >> 502 */ >> 503 if (unlikely(!action)) >> 504 goto out; 209 505 210 ret = handle_irq_event_percpu(desc); !! 506 /* >> 507 * Edge triggered interrupts need to remember >> 508 * pending events. >> 509 * This applies to any hw interrupts that allow a second >> 510 * instance of the same irq to arrive while we are in do_IRQ >> 511 * or in the handler. But the code here only handles the _second_ >> 512 * instance of the irq, not the third or fourth. So it is mostly >> 513 * useful for irq hardware that does not mask cleanly in an >> 514 * SMP environment. >> 515 */ >> 516 for (;;) { >> 517 irqreturn_t action_ret; >> 518 >> 519 spin_unlock(&desc->lock); >> 520 >> 521 action_ret = handle_IRQ_event(irq, action); >> 522 if (!noirqdebug) >> 523 note_interrupt(irq, desc, action_ret); >> 524 >> 525 spin_lock(&desc->lock); >> 526 if (likely(!(desc->status & IRQ_PENDING))) >> 527 break; >> 528 desc->status &= ~IRQ_PENDING; >> 529 } >> 530 desc->status &= ~IRQ_INPROGRESS; 211 531 212 raw_spin_lock(&desc->lock); !! 532 out: 213 irqd_clear(&desc->irq_data, IRQD_IRQ_I !! 533 /* 214 return ret; !! 534 * The ->end() handler has to deal with interrupts which got >> 535 * disabled while the handler was running. >> 536 */ >> 537 desc->chip->end(irq); >> 538 spin_unlock(&desc->lock); >> 539 >> 540 return 1; 215 } 541 } >> 542 #endif 216 543 217 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER !! 544 void early_init_irq_lock_class(void) 218 int __init set_handle_irq(void (*handle_irq)(s << 219 { 545 { 220 if (handle_arch_irq) !! 546 struct irq_desc *desc; 221 return -EBUSY; !! 547 int i; 222 548 223 handle_arch_irq = handle_irq; !! 549 for_each_irq_desc(i, desc) { 224 return 0; !! 550 lockdep_set_class(&desc->lock, &irq_desc_lock_class); >> 551 } 225 } 552 } 226 553 227 /** !! 554 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 228 * generic_handle_arch_irq - root irq handler !! 555 { 229 * entry accounting !! 556 struct irq_desc *desc = irq_to_desc(irq); 230 * @regs: Register file coming from the !! 557 return desc ? desc->kstat_irqs[cpu] : 0; 231 */ << 232 asmlinkage void noinstr generic_handle_arch_ir << 233 { << 234 struct pt_regs *old_regs; << 235 << 236 irq_enter(); << 237 old_regs = set_irq_regs(regs); << 238 handle_arch_irq(regs); << 239 set_irq_regs(old_regs); << 240 irq_exit(); << 241 } 558 } 242 #endif !! 559 EXPORT_SYMBOL(kstat_irqs_cpu); >> 560 243 561
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.