interrupt.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Cell Internal Interrupt Controller
  4. *
  5. * Copyright (C) 2006 Benjamin Herrenschmidt ([email protected])
  6. * IBM, Corp.
  7. *
  8. * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
  9. *
  10. * Author: Arnd Bergmann <[email protected]>
  11. *
  12. * TODO:
  13. * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers
  14. * vs node numbers in the setup code
  15. * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from
  16. * a non-active node to the active node)
  17. */
  18. #include <linux/interrupt.h>
  19. #include <linux/irq.h>
  20. #include <linux/irqdomain.h>
  21. #include <linux/export.h>
  22. #include <linux/percpu.h>
  23. #include <linux/types.h>
  24. #include <linux/ioport.h>
  25. #include <linux/kernel_stat.h>
  26. #include <linux/pgtable.h>
  27. #include <linux/of_address.h>
  28. #include <asm/io.h>
  29. #include <asm/ptrace.h>
  30. #include <asm/machdep.h>
  31. #include <asm/cell-regs.h>
  32. #include "interrupt.h"
  33. struct iic {
  34. struct cbe_iic_thread_regs __iomem *regs;
  35. u8 target_id;
  36. u8 eoi_stack[16];
  37. int eoi_ptr;
  38. struct device_node *node;
  39. };
  40. static DEFINE_PER_CPU(struct iic, cpu_iic);
  41. #define IIC_NODE_COUNT 2
  42. static struct irq_domain *iic_host;
  43. /* Convert between "pending" bits and hw irq number */
  44. static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
  45. {
  46. unsigned char unit = bits.source & 0xf;
  47. unsigned char node = bits.source >> 4;
  48. unsigned char class = bits.class & 3;
  49. /* Decode IPIs */
  50. if (bits.flags & CBE_IIC_IRQ_IPI)
  51. return IIC_IRQ_TYPE_IPI | (bits.prio >> 4);
  52. else
  53. return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
  54. }
  55. static void iic_mask(struct irq_data *d)
  56. {
  57. }
  58. static void iic_unmask(struct irq_data *d)
  59. {
  60. }
  61. static void iic_eoi(struct irq_data *d)
  62. {
  63. struct iic *iic = this_cpu_ptr(&cpu_iic);
  64. out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
  65. BUG_ON(iic->eoi_ptr < 0);
  66. }
  67. static struct irq_chip iic_chip = {
  68. .name = "CELL-IIC",
  69. .irq_mask = iic_mask,
  70. .irq_unmask = iic_unmask,
  71. .irq_eoi = iic_eoi,
  72. };
  73. static void iic_ioexc_eoi(struct irq_data *d)
  74. {
  75. }
  76. static void iic_ioexc_cascade(struct irq_desc *desc)
  77. {
  78. struct irq_chip *chip = irq_desc_get_chip(desc);
  79. struct cbe_iic_regs __iomem *node_iic =
  80. (void __iomem *)irq_desc_get_handler_data(desc);
  81. unsigned int irq = irq_desc_get_irq(desc);
  82. unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
  83. unsigned long bits, ack;
  84. int cascade;
  85. for (;;) {
  86. bits = in_be64(&node_iic->iic_is);
  87. if (bits == 0)
  88. break;
  89. /* pre-ack edge interrupts */
  90. ack = bits & IIC_ISR_EDGE_MASK;
  91. if (ack)
  92. out_be64(&node_iic->iic_is, ack);
  93. /* handle them */
  94. for (cascade = 63; cascade >= 0; cascade--)
  95. if (bits & (0x8000000000000000UL >> cascade))
  96. generic_handle_domain_irq(iic_host,
  97. base | cascade);
  98. /* post-ack level interrupts */
  99. ack = bits & ~IIC_ISR_EDGE_MASK;
  100. if (ack)
  101. out_be64(&node_iic->iic_is, ack);
  102. }
  103. chip->irq_eoi(&desc->irq_data);
  104. }
  105. static struct irq_chip iic_ioexc_chip = {
  106. .name = "CELL-IOEX",
  107. .irq_mask = iic_mask,
  108. .irq_unmask = iic_unmask,
  109. .irq_eoi = iic_ioexc_eoi,
  110. };
  111. /* Get an IRQ number from the pending state register of the IIC */
  112. static unsigned int iic_get_irq(void)
  113. {
  114. struct cbe_iic_pending_bits pending;
  115. struct iic *iic;
  116. unsigned int virq;
  117. iic = this_cpu_ptr(&cpu_iic);
  118. *(unsigned long *) &pending =
  119. in_be64((u64 __iomem *) &iic->regs->pending_destr);
  120. if (!(pending.flags & CBE_IIC_IRQ_VALID))
  121. return 0;
  122. virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
  123. if (!virq)
  124. return 0;
  125. iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
  126. BUG_ON(iic->eoi_ptr > 15);
  127. return virq;
  128. }
  129. void iic_setup_cpu(void)
  130. {
  131. out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
  132. }
  133. u8 iic_get_target_id(int cpu)
  134. {
  135. return per_cpu(cpu_iic, cpu).target_id;
  136. }
  137. EXPORT_SYMBOL_GPL(iic_get_target_id);
  138. #ifdef CONFIG_SMP
  139. /* Use the highest interrupt priorities for IPI */
  140. static inline int iic_msg_to_irq(int msg)
  141. {
  142. return IIC_IRQ_TYPE_IPI + 0xf - msg;
  143. }
  144. void iic_message_pass(int cpu, int msg)
  145. {
  146. out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
  147. }
  148. static void iic_request_ipi(int msg)
  149. {
  150. int virq;
  151. virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg));
  152. if (!virq) {
  153. printk(KERN_ERR
  154. "iic: failed to map IPI %s\n", smp_ipi_name[msg]);
  155. return;
  156. }
  157. /*
  158. * If smp_request_message_ipi encounters an error it will notify
  159. * the error. If a message is not needed it will return non-zero.
  160. */
  161. if (smp_request_message_ipi(virq, msg))
  162. irq_dispose_mapping(virq);
  163. }
  164. void iic_request_IPIs(void)
  165. {
  166. iic_request_ipi(PPC_MSG_CALL_FUNCTION);
  167. iic_request_ipi(PPC_MSG_RESCHEDULE);
  168. iic_request_ipi(PPC_MSG_TICK_BROADCAST);
  169. iic_request_ipi(PPC_MSG_NMI_IPI);
  170. }
  171. #endif /* CONFIG_SMP */
  172. static int iic_host_match(struct irq_domain *h, struct device_node *node,
  173. enum irq_domain_bus_token bus_token)
  174. {
  175. return of_device_is_compatible(node,
  176. "IBM,CBEA-Internal-Interrupt-Controller");
  177. }
  178. static int iic_host_map(struct irq_domain *h, unsigned int virq,
  179. irq_hw_number_t hw)
  180. {
  181. switch (hw & IIC_IRQ_TYPE_MASK) {
  182. case IIC_IRQ_TYPE_IPI:
  183. irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
  184. break;
  185. case IIC_IRQ_TYPE_IOEXC:
  186. irq_set_chip_and_handler(virq, &iic_ioexc_chip,
  187. handle_edge_eoi_irq);
  188. break;
  189. default:
  190. irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq);
  191. }
  192. return 0;
  193. }
  194. static int iic_host_xlate(struct irq_domain *h, struct device_node *ct,
  195. const u32 *intspec, unsigned int intsize,
  196. irq_hw_number_t *out_hwirq, unsigned int *out_flags)
  197. {
  198. unsigned int node, ext, unit, class;
  199. const u32 *val;
  200. if (!of_device_is_compatible(ct,
  201. "IBM,CBEA-Internal-Interrupt-Controller"))
  202. return -ENODEV;
  203. if (intsize != 1)
  204. return -ENODEV;
  205. val = of_get_property(ct, "#interrupt-cells", NULL);
  206. if (val == NULL || *val != 1)
  207. return -ENODEV;
  208. node = intspec[0] >> 24;
  209. ext = (intspec[0] >> 16) & 0xff;
  210. class = (intspec[0] >> 8) & 0xff;
  211. unit = intspec[0] & 0xff;
  212. /* Check if node is in supported range */
  213. if (node > 1)
  214. return -EINVAL;
  215. /* Build up interrupt number, special case for IO exceptions */
  216. *out_hwirq = (node << IIC_IRQ_NODE_SHIFT);
  217. if (unit == IIC_UNIT_IIC && class == 1)
  218. *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext;
  219. else
  220. *out_hwirq |= IIC_IRQ_TYPE_NORMAL |
  221. (class << IIC_IRQ_CLASS_SHIFT) | unit;
  222. /* Dummy flags, ignored by iic code */
  223. *out_flags = IRQ_TYPE_EDGE_RISING;
  224. return 0;
  225. }
  226. static const struct irq_domain_ops iic_host_ops = {
  227. .match = iic_host_match,
  228. .map = iic_host_map,
  229. .xlate = iic_host_xlate,
  230. };
  231. static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
  232. struct device_node *node)
  233. {
  234. /* XXX FIXME: should locate the linux CPU number from the HW cpu
  235. * number properly. We are lucky for now
  236. */
  237. struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
  238. iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
  239. BUG_ON(iic->regs == NULL);
  240. iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
  241. iic->eoi_stack[0] = 0xff;
  242. iic->node = of_node_get(node);
  243. out_be64(&iic->regs->prio, 0);
  244. printk(KERN_INFO "IIC for CPU %d target id 0x%x : %pOF\n",
  245. hw_cpu, iic->target_id, node);
  246. }
  247. static int __init setup_iic(void)
  248. {
  249. struct device_node *dn;
  250. struct resource r0, r1;
  251. unsigned int node, cascade, found = 0;
  252. struct cbe_iic_regs __iomem *node_iic;
  253. const u32 *np;
  254. for_each_node_by_name(dn, "interrupt-controller") {
  255. if (!of_device_is_compatible(dn,
  256. "IBM,CBEA-Internal-Interrupt-Controller"))
  257. continue;
  258. np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL);
  259. if (np == NULL) {
  260. printk(KERN_WARNING "IIC: CPU association not found\n");
  261. of_node_put(dn);
  262. return -ENODEV;
  263. }
  264. if (of_address_to_resource(dn, 0, &r0) ||
  265. of_address_to_resource(dn, 1, &r1)) {
  266. printk(KERN_WARNING "IIC: Can't resolve addresses\n");
  267. of_node_put(dn);
  268. return -ENODEV;
  269. }
  270. found++;
  271. init_one_iic(np[0], r0.start, dn);
  272. init_one_iic(np[1], r1.start, dn);
  273. /* Setup cascade for IO exceptions. XXX cleanup tricks to get
  274. * node vs CPU etc...
  275. * Note that we configure the IIC_IRR here with a hard coded
  276. * priority of 1. We might want to improve that later.
  277. */
  278. node = np[0] >> 1;
  279. node_iic = cbe_get_cpu_iic_regs(np[0]);
  280. cascade = node << IIC_IRQ_NODE_SHIFT;
  281. cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
  282. cascade |= IIC_UNIT_IIC;
  283. cascade = irq_create_mapping(iic_host, cascade);
  284. if (!cascade)
  285. continue;
  286. /*
  287. * irq_data is a generic pointer that gets passed back
  288. * to us later, so the forced cast is fine.
  289. */
  290. irq_set_handler_data(cascade, (void __force *)node_iic);
  291. irq_set_chained_handler(cascade, iic_ioexc_cascade);
  292. out_be64(&node_iic->iic_ir,
  293. (1 << 12) /* priority */ |
  294. (node << 4) /* dest node */ |
  295. IIC_UNIT_THREAD_0 /* route them to thread 0 */);
  296. /* Flush pending (make sure it triggers if there is
  297. * anything pending
  298. */
  299. out_be64(&node_iic->iic_is, 0xfffffffffffffffful);
  300. }
  301. if (found)
  302. return 0;
  303. else
  304. return -ENODEV;
  305. }
  306. void __init iic_init_IRQ(void)
  307. {
  308. /* Setup an irq host data structure */
  309. iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops,
  310. NULL);
  311. BUG_ON(iic_host == NULL);
  312. irq_set_default_host(iic_host);
  313. /* Discover and initialize iics */
  314. if (setup_iic() < 0)
  315. panic("IIC: Failed to initialize !\n");
  316. /* Set master interrupt handling function */
  317. ppc_md.get_irq = iic_get_irq;
  318. /* Enable on current CPU */
  319. iic_setup_cpu();
  320. }
  321. void iic_set_interrupt_routing(int cpu, int thread, int priority)
  322. {
  323. struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
  324. u64 iic_ir = 0;
  325. int node = cpu >> 1;
  326. /* Set which node and thread will handle the next interrupt */
  327. iic_ir |= CBE_IIC_IR_PRIO(priority) |
  328. CBE_IIC_IR_DEST_NODE(node);
  329. if (thread == 0)
  330. iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
  331. else
  332. iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
  333. out_be64(&iic_regs->iic_ir, iic_ir);
  334. }