irq.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (C) 2010 John Crispin <[email protected]>
  5. * Copyright (C) 2010 Thomas Langer <[email protected]>
  6. */
  7. #include <linux/interrupt.h>
  8. #include <linux/ioport.h>
  9. #include <linux/sched.h>
  10. #include <linux/irqchip.h>
  11. #include <linux/irqdomain.h>
  12. #include <linux/of_platform.h>
  13. #include <linux/of_address.h>
  14. #include <linux/of_irq.h>
  15. #include <asm/bootinfo.h>
  16. #include <asm/irq_cpu.h>
  17. #include <lantiq_soc.h>
  18. #include <irq.h>
  19. /* register definitions - internal irqs */
  20. #define LTQ_ICU_ISR 0x0000
  21. #define LTQ_ICU_IER 0x0008
  22. #define LTQ_ICU_IOSR 0x0010
  23. #define LTQ_ICU_IRSR 0x0018
  24. #define LTQ_ICU_IMR 0x0020
  25. #define LTQ_ICU_IM_SIZE 0x28
  26. /* register definitions - external irqs */
  27. #define LTQ_EIU_EXIN_C 0x0000
  28. #define LTQ_EIU_EXIN_INIC 0x0004
  29. #define LTQ_EIU_EXIN_INC 0x0008
  30. #define LTQ_EIU_EXIN_INEN 0x000C
  31. /* number of external interrupts */
  32. #define MAX_EIU 6
  33. /* the performance counter */
  34. #define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31)
  35. /*
  36. * irqs generated by devices attached to the EBU need to be acked in
  37. * a special manner
  38. */
  39. #define LTQ_ICU_EBU_IRQ 22
  40. #define ltq_icu_w32(vpe, m, x, y) \
  41. ltq_w32((x), ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (y))
  42. #define ltq_icu_r32(vpe, m, x) \
  43. ltq_r32(ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (x))
  44. #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
  45. #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
  46. /* we have a cascade of 8 irqs */
  47. #define MIPS_CPU_IRQ_CASCADE 8
  48. static int exin_avail;
  49. static u32 ltq_eiu_irq[MAX_EIU];
  50. static void __iomem *ltq_icu_membase[NR_CPUS];
  51. static void __iomem *ltq_eiu_membase;
  52. static struct irq_domain *ltq_domain;
  53. static DEFINE_SPINLOCK(ltq_eiu_lock);
  54. static DEFINE_RAW_SPINLOCK(ltq_icu_lock);
  55. static int ltq_perfcount_irq;
  56. int ltq_eiu_get_irq(int exin)
  57. {
  58. if (exin < exin_avail)
  59. return ltq_eiu_irq[exin];
  60. return -1;
  61. }
  62. void ltq_disable_irq(struct irq_data *d)
  63. {
  64. unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  65. unsigned long im = offset / INT_NUM_IM_OFFSET;
  66. unsigned long flags;
  67. int vpe;
  68. offset %= INT_NUM_IM_OFFSET;
  69. raw_spin_lock_irqsave(&ltq_icu_lock, flags);
  70. for_each_present_cpu(vpe) {
  71. ltq_icu_w32(vpe, im,
  72. ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
  73. LTQ_ICU_IER);
  74. }
  75. raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
  76. }
  77. void ltq_mask_and_ack_irq(struct irq_data *d)
  78. {
  79. unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  80. unsigned long im = offset / INT_NUM_IM_OFFSET;
  81. unsigned long flags;
  82. int vpe;
  83. offset %= INT_NUM_IM_OFFSET;
  84. raw_spin_lock_irqsave(&ltq_icu_lock, flags);
  85. for_each_present_cpu(vpe) {
  86. ltq_icu_w32(vpe, im,
  87. ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
  88. LTQ_ICU_IER);
  89. ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
  90. }
  91. raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
  92. }
  93. static void ltq_ack_irq(struct irq_data *d)
  94. {
  95. unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  96. unsigned long im = offset / INT_NUM_IM_OFFSET;
  97. unsigned long flags;
  98. int vpe;
  99. offset %= INT_NUM_IM_OFFSET;
  100. raw_spin_lock_irqsave(&ltq_icu_lock, flags);
  101. for_each_present_cpu(vpe) {
  102. ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
  103. }
  104. raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
  105. }
  106. void ltq_enable_irq(struct irq_data *d)
  107. {
  108. unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  109. unsigned long im = offset / INT_NUM_IM_OFFSET;
  110. unsigned long flags;
  111. int vpe;
  112. offset %= INT_NUM_IM_OFFSET;
  113. vpe = cpumask_first(irq_data_get_effective_affinity_mask(d));
  114. /* This shouldn't be even possible, maybe during CPU hotplug spam */
  115. if (unlikely(vpe >= nr_cpu_ids))
  116. vpe = smp_processor_id();
  117. raw_spin_lock_irqsave(&ltq_icu_lock, flags);
  118. ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, LTQ_ICU_IER) | BIT(offset),
  119. LTQ_ICU_IER);
  120. raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
  121. }
  122. static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
  123. {
  124. int i;
  125. unsigned long flags;
  126. for (i = 0; i < exin_avail; i++) {
  127. if (d->hwirq == ltq_eiu_irq[i]) {
  128. int val = 0;
  129. int edge = 0;
  130. switch (type) {
  131. case IRQF_TRIGGER_NONE:
  132. break;
  133. case IRQF_TRIGGER_RISING:
  134. val = 1;
  135. edge = 1;
  136. break;
  137. case IRQF_TRIGGER_FALLING:
  138. val = 2;
  139. edge = 1;
  140. break;
  141. case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
  142. val = 3;
  143. edge = 1;
  144. break;
  145. case IRQF_TRIGGER_HIGH:
  146. val = 5;
  147. break;
  148. case IRQF_TRIGGER_LOW:
  149. val = 6;
  150. break;
  151. default:
  152. pr_err("invalid type %d for irq %ld\n",
  153. type, d->hwirq);
  154. return -EINVAL;
  155. }
  156. if (edge)
  157. irq_set_handler(d->hwirq, handle_edge_irq);
  158. spin_lock_irqsave(&ltq_eiu_lock, flags);
  159. ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
  160. (~(7 << (i * 4)))) | (val << (i * 4)),
  161. LTQ_EIU_EXIN_C);
  162. spin_unlock_irqrestore(&ltq_eiu_lock, flags);
  163. }
  164. }
  165. return 0;
  166. }
  167. static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
  168. {
  169. int i;
  170. ltq_enable_irq(d);
  171. for (i = 0; i < exin_avail; i++) {
  172. if (d->hwirq == ltq_eiu_irq[i]) {
  173. /* by default we are low level triggered */
  174. ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
  175. /* clear all pending */
  176. ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
  177. LTQ_EIU_EXIN_INC);
  178. /* enable */
  179. ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
  180. LTQ_EIU_EXIN_INEN);
  181. break;
  182. }
  183. }
  184. return 0;
  185. }
  186. static void ltq_shutdown_eiu_irq(struct irq_data *d)
  187. {
  188. int i;
  189. ltq_disable_irq(d);
  190. for (i = 0; i < exin_avail; i++) {
  191. if (d->hwirq == ltq_eiu_irq[i]) {
  192. /* disable */
  193. ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
  194. LTQ_EIU_EXIN_INEN);
  195. break;
  196. }
  197. }
  198. }
  199. #if defined(CONFIG_SMP)
  200. static int ltq_icu_irq_set_affinity(struct irq_data *d,
  201. const struct cpumask *cpumask, bool force)
  202. {
  203. struct cpumask tmask;
  204. if (!cpumask_and(&tmask, cpumask, cpu_online_mask))
  205. return -EINVAL;
  206. irq_data_update_effective_affinity(d, &tmask);
  207. return IRQ_SET_MASK_OK;
  208. }
  209. #endif
  210. static struct irq_chip ltq_irq_type = {
  211. .name = "icu",
  212. .irq_enable = ltq_enable_irq,
  213. .irq_disable = ltq_disable_irq,
  214. .irq_unmask = ltq_enable_irq,
  215. .irq_ack = ltq_ack_irq,
  216. .irq_mask = ltq_disable_irq,
  217. .irq_mask_ack = ltq_mask_and_ack_irq,
  218. #if defined(CONFIG_SMP)
  219. .irq_set_affinity = ltq_icu_irq_set_affinity,
  220. #endif
  221. };
  222. static struct irq_chip ltq_eiu_type = {
  223. .name = "eiu",
  224. .irq_startup = ltq_startup_eiu_irq,
  225. .irq_shutdown = ltq_shutdown_eiu_irq,
  226. .irq_enable = ltq_enable_irq,
  227. .irq_disable = ltq_disable_irq,
  228. .irq_unmask = ltq_enable_irq,
  229. .irq_ack = ltq_ack_irq,
  230. .irq_mask = ltq_disable_irq,
  231. .irq_mask_ack = ltq_mask_and_ack_irq,
  232. .irq_set_type = ltq_eiu_settype,
  233. #if defined(CONFIG_SMP)
  234. .irq_set_affinity = ltq_icu_irq_set_affinity,
  235. #endif
  236. };
  237. static void ltq_hw_irq_handler(struct irq_desc *desc)
  238. {
  239. unsigned int module = irq_desc_get_irq(desc) - 2;
  240. u32 irq;
  241. irq_hw_number_t hwirq;
  242. int vpe = smp_processor_id();
  243. irq = ltq_icu_r32(vpe, module, LTQ_ICU_IOSR);
  244. if (irq == 0)
  245. return;
  246. /*
  247. * silicon bug causes only the msb set to 1 to be valid. all
  248. * other bits might be bogus
  249. */
  250. irq = __fls(irq);
  251. hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
  252. generic_handle_domain_irq(ltq_domain, hwirq);
  253. /* if this is a EBU irq, we need to ack it or get a deadlock */
  254. if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
  255. ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
  256. LTQ_EBU_PCC_ISTAT);
  257. }
  258. static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
  259. {
  260. struct irq_chip *chip = &ltq_irq_type;
  261. struct irq_data *data;
  262. int i;
  263. if (hw < MIPS_CPU_IRQ_CASCADE)
  264. return 0;
  265. for (i = 0; i < exin_avail; i++)
  266. if (hw == ltq_eiu_irq[i])
  267. chip = &ltq_eiu_type;
  268. data = irq_get_irq_data(irq);
  269. irq_data_update_effective_affinity(data, cpumask_of(0));
  270. irq_set_chip_and_handler(irq, chip, handle_level_irq);
  271. return 0;
  272. }
  273. static const struct irq_domain_ops irq_domain_ops = {
  274. .xlate = irq_domain_xlate_onetwocell,
  275. .map = icu_map,
  276. };
  277. int __init icu_of_init(struct device_node *node, struct device_node *parent)
  278. {
  279. struct device_node *eiu_node;
  280. struct resource res;
  281. int i, ret, vpe;
  282. /* load register regions of available ICUs */
  283. for_each_possible_cpu(vpe) {
  284. if (of_address_to_resource(node, vpe, &res))
  285. panic("Failed to get icu%i memory range", vpe);
  286. if (!request_mem_region(res.start, resource_size(&res),
  287. res.name))
  288. pr_err("Failed to request icu%i memory\n", vpe);
  289. ltq_icu_membase[vpe] = ioremap(res.start,
  290. resource_size(&res));
  291. if (!ltq_icu_membase[vpe])
  292. panic("Failed to remap icu%i memory", vpe);
  293. }
  294. /* turn off all irqs by default */
  295. for_each_possible_cpu(vpe) {
  296. for (i = 0; i < MAX_IM; i++) {
  297. /* make sure all irqs are turned off by default */
  298. ltq_icu_w32(vpe, i, 0, LTQ_ICU_IER);
  299. /* clear all possibly pending interrupts */
  300. ltq_icu_w32(vpe, i, ~0, LTQ_ICU_ISR);
  301. ltq_icu_w32(vpe, i, ~0, LTQ_ICU_IMR);
  302. /* clear resend */
  303. ltq_icu_w32(vpe, i, 0, LTQ_ICU_IRSR);
  304. }
  305. }
  306. mips_cpu_irq_init();
  307. for (i = 0; i < MAX_IM; i++)
  308. irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
  309. ltq_domain = irq_domain_add_linear(node,
  310. (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
  311. &irq_domain_ops, 0);
  312. /* tell oprofile which irq to use */
  313. ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
  314. /* the external interrupts are optional and xway only */
  315. eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
  316. if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
  317. /* find out how many external irq sources we have */
  318. exin_avail = of_property_count_u32_elems(eiu_node,
  319. "lantiq,eiu-irqs");
  320. if (exin_avail > MAX_EIU)
  321. exin_avail = MAX_EIU;
  322. ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
  323. ltq_eiu_irq, exin_avail);
  324. if (ret)
  325. panic("failed to load external irq resources");
  326. if (!request_mem_region(res.start, resource_size(&res),
  327. res.name))
  328. pr_err("Failed to request eiu memory");
  329. ltq_eiu_membase = ioremap(res.start,
  330. resource_size(&res));
  331. if (!ltq_eiu_membase)
  332. panic("Failed to remap eiu memory");
  333. }
  334. of_node_put(eiu_node);
  335. return 0;
  336. }
  337. int get_c0_perfcount_int(void)
  338. {
  339. return ltq_perfcount_irq;
  340. }
  341. EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
  342. unsigned int get_c0_compare_int(void)
  343. {
  344. return CP0_LEGACY_COMPARE_IRQ;
  345. }
  346. IRQCHIP_DECLARE(lantiq_icu, "lantiq,icu", icu_of_init);
  347. void __init arch_init_irq(void)
  348. {
  349. irqchip_init();
  350. }