irq.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Code to handle x86 style IRQs plus some generic interrupt stuff.
  7. *
  8. * Copyright (C) 1992 Linus Torvalds
  9. * Copyright (C) 1994 - 2000 Ralf Baechle
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/delay.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/kernel_stat.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/mm.h>
  18. #include <linux/random.h>
  19. #include <linux/sched.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/kallsyms.h>
  22. #include <linux/kgdb.h>
  23. #include <linux/ftrace.h>
  24. #include <linux/irqdomain.h>
  25. #include <linux/atomic.h>
  26. #include <linux/uaccess.h>
  27. void *irq_stack[NR_CPUS];
  28. /*
  29. * 'what should we do if we get a hw irq event on an illegal vector'.
  30. * each architecture has to answer this themselves.
  31. */
  32. void ack_bad_irq(unsigned int irq)
  33. {
  34. printk("unexpected IRQ # %d\n", irq);
  35. }
  36. atomic_t irq_err_count;
  37. int arch_show_interrupts(struct seq_file *p, int prec)
  38. {
  39. seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
  40. return 0;
  41. }
  42. asmlinkage void spurious_interrupt(void)
  43. {
  44. atomic_inc(&irq_err_count);
  45. }
  46. void __init init_IRQ(void)
  47. {
  48. int i;
  49. unsigned int order = get_order(IRQ_STACK_SIZE);
  50. for (i = 0; i < NR_IRQS; i++)
  51. irq_set_noprobe(i);
  52. if (cpu_has_veic)
  53. clear_c0_status(ST0_IM);
  54. arch_init_irq();
  55. for_each_possible_cpu(i) {
  56. void *s = (void *)__get_free_pages(GFP_KERNEL, order);
  57. irq_stack[i] = s;
  58. pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
  59. irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
  60. }
  61. }
  62. #ifdef CONFIG_DEBUG_STACKOVERFLOW
  63. static inline void check_stack_overflow(void)
  64. {
  65. unsigned long sp;
  66. __asm__ __volatile__("move %0, $sp" : "=r" (sp));
  67. sp &= THREAD_MASK;
  68. /*
  69. * Check for stack overflow: is there less than STACK_WARN free?
  70. * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
  71. */
  72. if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
  73. printk("do_IRQ: stack overflow: %ld\n",
  74. sp - sizeof(struct thread_info));
  75. dump_stack();
  76. }
  77. }
  78. #else
  79. static inline void check_stack_overflow(void) {}
  80. #endif
  81. /*
  82. * do_IRQ handles all normal device IRQ's (the special
  83. * SMP cross-CPU interrupts have their own specific
  84. * handlers).
  85. */
  86. void __irq_entry do_IRQ(unsigned int irq)
  87. {
  88. irq_enter();
  89. check_stack_overflow();
  90. generic_handle_irq(irq);
  91. irq_exit();
  92. }
  93. #ifdef CONFIG_IRQ_DOMAIN
  94. void __irq_entry do_domain_IRQ(struct irq_domain *domain, unsigned int hwirq)
  95. {
  96. irq_enter();
  97. check_stack_overflow();
  98. generic_handle_domain_irq(domain, hwirq);
  99. irq_exit();
  100. }
  101. #endif