irq.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/arch/arm/mach-pxa/irq.c
  4. *
  5. * Generic PXA IRQ handling
  6. *
  7. * Author: Nicolas Pitre
  8. * Created: Jun 15, 2001
  9. * Copyright: MontaVista Software Inc.
  10. */
  11. #include <linux/bitops.h>
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/syscore_ops.h>
  16. #include <linux/io.h>
  17. #include <linux/irq.h>
  18. #include <linux/of_address.h>
  19. #include <linux/of_irq.h>
  20. #include <linux/soc/pxa/cpu.h>
  21. #include <asm/exception.h>
  22. #include "irqs.h"
  23. #include "generic.h"
  24. #include "pxa-regs.h"
  25. #define ICIP (0x000)
  26. #define ICMR (0x004)
  27. #define ICLR (0x008)
  28. #define ICFR (0x00c)
  29. #define ICPR (0x010)
  30. #define ICCR (0x014)
  31. #define ICHP (0x018)
  32. #define IPR(i) (((i) < 32) ? (0x01c + ((i) << 2)) : \
  33. ((i) < 64) ? (0x0b0 + (((i) - 32) << 2)) : \
  34. (0x144 + (((i) - 64) << 2)))
  35. #define ICHP_VAL_IRQ (1 << 31)
  36. #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff)
  37. #define IPR_VALID (1 << 31)
  38. #define MAX_INTERNAL_IRQS 128
  39. /*
  40. * This is for peripheral IRQs internal to the PXA chip.
  41. */
  42. static void __iomem *pxa_irq_base;
  43. static int pxa_internal_irq_nr;
  44. static bool cpu_has_ipr;
  45. static struct irq_domain *pxa_irq_domain;
  46. static inline void __iomem *irq_base(int i)
  47. {
  48. static unsigned long phys_base_offset[] = {
  49. 0x0,
  50. 0x9c,
  51. 0x130,
  52. };
  53. return pxa_irq_base + phys_base_offset[i];
  54. }
  55. void pxa_mask_irq(struct irq_data *d)
  56. {
  57. void __iomem *base = irq_data_get_irq_chip_data(d);
  58. irq_hw_number_t irq = irqd_to_hwirq(d);
  59. uint32_t icmr = __raw_readl(base + ICMR);
  60. icmr &= ~BIT(irq & 0x1f);
  61. __raw_writel(icmr, base + ICMR);
  62. }
  63. void pxa_unmask_irq(struct irq_data *d)
  64. {
  65. void __iomem *base = irq_data_get_irq_chip_data(d);
  66. irq_hw_number_t irq = irqd_to_hwirq(d);
  67. uint32_t icmr = __raw_readl(base + ICMR);
  68. icmr |= BIT(irq & 0x1f);
  69. __raw_writel(icmr, base + ICMR);
  70. }
  71. static struct irq_chip pxa_internal_irq_chip = {
  72. .name = "SC",
  73. .irq_ack = pxa_mask_irq,
  74. .irq_mask = pxa_mask_irq,
  75. .irq_unmask = pxa_unmask_irq,
  76. };
  77. asmlinkage void __exception_irq_entry icip_handle_irq(struct pt_regs *regs)
  78. {
  79. uint32_t icip, icmr, mask;
  80. do {
  81. icip = __raw_readl(pxa_irq_base + ICIP);
  82. icmr = __raw_readl(pxa_irq_base + ICMR);
  83. mask = icip & icmr;
  84. if (mask == 0)
  85. break;
  86. handle_IRQ(PXA_IRQ(fls(mask) - 1), regs);
  87. } while (1);
  88. }
  89. asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
  90. {
  91. uint32_t ichp;
  92. do {
  93. __asm__ __volatile__("mrc p6, 0, %0, c5, c0, 0\n": "=r"(ichp));
  94. if ((ichp & ICHP_VAL_IRQ) == 0)
  95. break;
  96. handle_IRQ(PXA_IRQ(ICHP_IRQ(ichp)), regs);
  97. } while (1);
  98. }
  99. static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
  100. irq_hw_number_t hw)
  101. {
  102. void __iomem *base = irq_base(hw / 32);
  103. /* initialize interrupt priority */
  104. if (cpu_has_ipr)
  105. __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
  106. irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
  107. handle_level_irq);
  108. irq_set_chip_data(virq, base);
  109. return 0;
  110. }
  111. static const struct irq_domain_ops pxa_irq_ops = {
  112. .map = pxa_irq_map,
  113. .xlate = irq_domain_xlate_onecell,
  114. };
  115. static __init void
  116. pxa_init_irq_common(struct device_node *node, int irq_nr,
  117. int (*fn)(struct irq_data *, unsigned int))
  118. {
  119. int n;
  120. pxa_internal_irq_nr = irq_nr;
  121. pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
  122. PXA_IRQ(0), 0,
  123. &pxa_irq_ops, NULL);
  124. if (!pxa_irq_domain)
  125. panic("Unable to add PXA IRQ domain\n");
  126. irq_set_default_host(pxa_irq_domain);
  127. for (n = 0; n < irq_nr; n += 32) {
  128. void __iomem *base = irq_base(n >> 5);
  129. __raw_writel(0, base + ICMR); /* disable all IRQs */
  130. __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
  131. }
  132. /* only unmasked interrupts kick us out of idle */
  133. __raw_writel(1, irq_base(0) + ICCR);
  134. pxa_internal_irq_chip.irq_set_wake = fn;
  135. }
  136. void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
  137. {
  138. BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
  139. pxa_irq_base = io_p2v(0x40d00000);
  140. cpu_has_ipr = !cpu_is_pxa25x();
  141. pxa_init_irq_common(NULL, irq_nr, fn);
  142. }
  143. #ifdef CONFIG_PM
  144. static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
  145. static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
  146. static int pxa_irq_suspend(void)
  147. {
  148. int i;
  149. for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
  150. void __iomem *base = irq_base(i);
  151. saved_icmr[i] = __raw_readl(base + ICMR);
  152. __raw_writel(0, base + ICMR);
  153. }
  154. if (cpu_has_ipr) {
  155. for (i = 0; i < pxa_internal_irq_nr; i++)
  156. saved_ipr[i] = __raw_readl(pxa_irq_base + IPR(i));
  157. }
  158. return 0;
  159. }
  160. static void pxa_irq_resume(void)
  161. {
  162. int i;
  163. for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
  164. void __iomem *base = irq_base(i);
  165. __raw_writel(saved_icmr[i], base + ICMR);
  166. __raw_writel(0, base + ICLR);
  167. }
  168. if (cpu_has_ipr)
  169. for (i = 0; i < pxa_internal_irq_nr; i++)
  170. __raw_writel(saved_ipr[i], pxa_irq_base + IPR(i));
  171. __raw_writel(1, pxa_irq_base + ICCR);
  172. }
  173. #else
  174. #define pxa_irq_suspend NULL
  175. #define pxa_irq_resume NULL
  176. #endif
  177. struct syscore_ops pxa_irq_syscore_ops = {
  178. .suspend = pxa_irq_suspend,
  179. .resume = pxa_irq_resume,
  180. };
  181. #ifdef CONFIG_OF
  182. static const struct of_device_id intc_ids[] __initconst = {
  183. { .compatible = "marvell,pxa-intc", },
  184. {}
  185. };
  186. void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
  187. {
  188. struct device_node *node;
  189. struct resource res;
  190. int ret;
  191. node = of_find_matching_node(NULL, intc_ids);
  192. if (!node) {
  193. pr_err("Failed to find interrupt controller in arch-pxa\n");
  194. return;
  195. }
  196. ret = of_property_read_u32(node, "marvell,intc-nr-irqs",
  197. &pxa_internal_irq_nr);
  198. if (ret) {
  199. pr_err("Not found marvell,intc-nr-irqs property\n");
  200. return;
  201. }
  202. ret = of_address_to_resource(node, 0, &res);
  203. if (ret < 0) {
  204. pr_err("No registers defined for node\n");
  205. return;
  206. }
  207. pxa_irq_base = io_p2v(res.start);
  208. if (of_find_property(node, "marvell,intc-priority", NULL))
  209. cpu_has_ipr = 1;
  210. ret = irq_alloc_descs(-1, 0, pxa_internal_irq_nr, 0);
  211. if (ret < 0) {
  212. pr_err("Failed to allocate IRQ numbers\n");
  213. return;
  214. }
  215. pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
  216. }
  217. #endif /* CONFIG_OF */