ip27-irq.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
  4. *
  5. * Copyright (C) 1999, 2000 Ralf Baechle ([email protected])
  6. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  7. * Copyright (C) 1999 - 2001 Kanoj Sarcar
  8. */
  9. #include <linux/interrupt.h>
  10. #include <linux/irq.h>
  11. #include <linux/irqdomain.h>
  12. #include <linux/ioport.h>
  13. #include <linux/kernel.h>
  14. #include <linux/bitops.h>
  15. #include <linux/sched.h>
  16. #include <asm/io.h>
  17. #include <asm/irq_cpu.h>
  18. #include <asm/sn/addrs.h>
  19. #include <asm/sn/agent.h>
  20. #include <asm/sn/arch.h>
  21. #include <asm/sn/intr.h>
  22. #include <asm/sn/irq_alloc.h>
  23. struct hub_irq_data {
  24. u64 *irq_mask[2];
  25. cpuid_t cpu;
  26. };
  27. static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT);
  28. static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask);
  29. static inline int alloc_level(void)
  30. {
  31. int level;
  32. again:
  33. level = find_first_zero_bit(hub_irq_map, IP27_HUB_IRQ_COUNT);
  34. if (level >= IP27_HUB_IRQ_COUNT)
  35. return -ENOSPC;
  36. if (test_and_set_bit(level, hub_irq_map))
  37. goto again;
  38. return level;
  39. }
  40. static void enable_hub_irq(struct irq_data *d)
  41. {
  42. struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
  43. unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
  44. set_bit(d->hwirq, mask);
  45. __raw_writeq(mask[0], hd->irq_mask[0]);
  46. __raw_writeq(mask[1], hd->irq_mask[1]);
  47. }
  48. static void disable_hub_irq(struct irq_data *d)
  49. {
  50. struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
  51. unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
  52. clear_bit(d->hwirq, mask);
  53. __raw_writeq(mask[0], hd->irq_mask[0]);
  54. __raw_writeq(mask[1], hd->irq_mask[1]);
  55. }
  56. static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
  57. {
  58. nasid_t nasid;
  59. int cpu;
  60. cpu = cpumask_first_and(mask, cpu_online_mask);
  61. if (cpu >= nr_cpu_ids)
  62. cpu = cpumask_any(cpu_online_mask);
  63. nasid = cpu_to_node(cpu);
  64. hd->cpu = cpu;
  65. if (!cputoslice(cpu)) {
  66. hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A);
  67. hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A);
  68. } else {
  69. hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
  70. hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
  71. }
  72. }
  73. static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
  74. bool force)
  75. {
  76. struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
  77. if (!hd)
  78. return -EINVAL;
  79. if (irqd_is_started(d))
  80. disable_hub_irq(d);
  81. setup_hub_mask(hd, mask);
  82. if (irqd_is_started(d))
  83. enable_hub_irq(d);
  84. irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
  85. return 0;
  86. }
  87. static struct irq_chip hub_irq_type = {
  88. .name = "HUB",
  89. .irq_mask = disable_hub_irq,
  90. .irq_unmask = enable_hub_irq,
  91. .irq_set_affinity = set_affinity_hub_irq,
  92. };
  93. static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
  94. unsigned int nr_irqs, void *arg)
  95. {
  96. struct irq_alloc_info *info = arg;
  97. struct hub_irq_data *hd;
  98. struct hub_data *hub;
  99. struct irq_desc *desc;
  100. int swlevel;
  101. if (nr_irqs > 1 || !info)
  102. return -EINVAL;
  103. hd = kzalloc(sizeof(*hd), GFP_KERNEL);
  104. if (!hd)
  105. return -ENOMEM;
  106. swlevel = alloc_level();
  107. if (unlikely(swlevel < 0)) {
  108. kfree(hd);
  109. return -EAGAIN;
  110. }
  111. irq_domain_set_info(domain, virq, swlevel, &hub_irq_type, hd,
  112. handle_level_irq, NULL, NULL);
  113. /* use CPU connected to nearest hub */
  114. hub = hub_data(info->nasid);
  115. setup_hub_mask(hd, &hub->h_cpus);
  116. info->nasid = cpu_to_node(hd->cpu);
  117. /* Make sure it's not already pending when we connect it. */
  118. REMOTE_HUB_CLR_INTR(info->nasid, swlevel);
  119. desc = irq_to_desc(virq);
  120. desc->irq_common_data.node = info->nasid;
  121. cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus);
  122. return 0;
  123. }
  124. static void hub_domain_free(struct irq_domain *domain,
  125. unsigned int virq, unsigned int nr_irqs)
  126. {
  127. struct irq_data *irqd;
  128. if (nr_irqs > 1)
  129. return;
  130. irqd = irq_domain_get_irq_data(domain, virq);
  131. if (irqd && irqd->chip_data)
  132. kfree(irqd->chip_data);
  133. }
  134. static const struct irq_domain_ops hub_domain_ops = {
  135. .alloc = hub_domain_alloc,
  136. .free = hub_domain_free,
  137. };
  138. /*
  139. * This code is unnecessarily complex, because we do
  140. * intr enabling. Basically, once we grab the set of intrs we need
  141. * to service, we must mask _all_ these interrupts; firstly, to make
  142. * sure the same intr does not intr again, causing recursion that
  143. * can lead to stack overflow. Secondly, we can not just mask the
  144. * one intr we are do_IRQing, because the non-masked intrs in the
  145. * first set might intr again, causing multiple servicings of the
  146. * same intr. This effect is mostly seen for intercpu intrs.
  147. * Kanoj 05.13.00
  148. */
  149. static void ip27_do_irq_mask0(struct irq_desc *desc)
  150. {
  151. cpuid_t cpu = smp_processor_id();
  152. unsigned long *mask = per_cpu(irq_enable_mask, cpu);
  153. struct irq_domain *domain;
  154. u64 pend0;
  155. int ret;
  156. /* copied from Irix intpend0() */
  157. pend0 = LOCAL_HUB_L(PI_INT_PEND0);
  158. pend0 &= mask[0]; /* Pick intrs we should look at */
  159. if (!pend0)
  160. return;
  161. #ifdef CONFIG_SMP
  162. if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
  163. LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
  164. scheduler_ipi();
  165. } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
  166. LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
  167. scheduler_ipi();
  168. } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
  169. LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
  170. generic_smp_call_function_interrupt();
  171. } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
  172. LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
  173. generic_smp_call_function_interrupt();
  174. } else
  175. #endif
  176. {
  177. domain = irq_desc_get_handler_data(desc);
  178. ret = generic_handle_domain_irq(domain, __ffs(pend0));
  179. if (ret)
  180. spurious_interrupt();
  181. }
  182. LOCAL_HUB_L(PI_INT_PEND0);
  183. }
  184. static void ip27_do_irq_mask1(struct irq_desc *desc)
  185. {
  186. cpuid_t cpu = smp_processor_id();
  187. unsigned long *mask = per_cpu(irq_enable_mask, cpu);
  188. struct irq_domain *domain;
  189. u64 pend1;
  190. int ret;
  191. /* copied from Irix intpend0() */
  192. pend1 = LOCAL_HUB_L(PI_INT_PEND1);
  193. pend1 &= mask[1]; /* Pick intrs we should look at */
  194. if (!pend1)
  195. return;
  196. domain = irq_desc_get_handler_data(desc);
  197. ret = generic_handle_domain_irq(domain, __ffs(pend1) + 64);
  198. if (ret)
  199. spurious_interrupt();
  200. LOCAL_HUB_L(PI_INT_PEND1);
  201. }
  202. void install_ipi(void)
  203. {
  204. int cpu = smp_processor_id();
  205. unsigned long *mask = per_cpu(irq_enable_mask, cpu);
  206. int slice = LOCAL_HUB_L(PI_CPU_NUM);
  207. int resched, call;
  208. resched = CPU_RESCHED_A_IRQ + slice;
  209. set_bit(resched, mask);
  210. LOCAL_HUB_CLR_INTR(resched);
  211. call = CPU_CALL_A_IRQ + slice;
  212. set_bit(call, mask);
  213. LOCAL_HUB_CLR_INTR(call);
  214. if (slice == 0) {
  215. LOCAL_HUB_S(PI_INT_MASK0_A, mask[0]);
  216. LOCAL_HUB_S(PI_INT_MASK1_A, mask[1]);
  217. } else {
  218. LOCAL_HUB_S(PI_INT_MASK0_B, mask[0]);
  219. LOCAL_HUB_S(PI_INT_MASK1_B, mask[1]);
  220. }
  221. }
  222. void __init arch_init_irq(void)
  223. {
  224. struct irq_domain *domain;
  225. struct fwnode_handle *fn;
  226. int i;
  227. mips_cpu_irq_init();
  228. /*
  229. * Some interrupts are reserved by hardware or by software convention.
  230. * Mark these as reserved right away so they won't be used accidentally
  231. * later.
  232. */
  233. for (i = 0; i <= CPU_CALL_B_IRQ; i++)
  234. set_bit(i, hub_irq_map);
  235. for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
  236. set_bit(i, hub_irq_map);
  237. fn = irq_domain_alloc_named_fwnode("HUB");
  238. WARN_ON(fn == NULL);
  239. if (!fn)
  240. return;
  241. domain = irq_domain_create_linear(fn, IP27_HUB_IRQ_COUNT,
  242. &hub_domain_ops, NULL);
  243. WARN_ON(domain == NULL);
  244. if (!domain)
  245. return;
  246. irq_set_default_host(domain);
  247. irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
  248. irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0,
  249. domain);
  250. irq_set_percpu_devid(IP27_HUB_PEND1_IRQ);
  251. irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1,
  252. domain);
  253. }