xics-common.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright 2011 IBM Corporation.
  4. */
  5. #include <linux/types.h>
  6. #include <linux/threads.h>
  7. #include <linux/kernel.h>
  8. #include <linux/irq.h>
  9. #include <linux/irqdomain.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/smp.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/init.h>
  15. #include <linux/cpu.h>
  16. #include <linux/of.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/delay.h>
  20. #include <asm/io.h>
  21. #include <asm/smp.h>
  22. #include <asm/machdep.h>
  23. #include <asm/irq.h>
  24. #include <asm/errno.h>
  25. #include <asm/rtas.h>
  26. #include <asm/xics.h>
  27. #include <asm/firmware.h>
  28. /* Globals common to all ICP/ICS implementations */
  29. const struct icp_ops *icp_ops;
  30. unsigned int xics_default_server = 0xff;
  31. unsigned int xics_default_distrib_server = 0;
  32. unsigned int xics_interrupt_server_size = 8;
  33. DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
  34. struct irq_domain *xics_host;
  35. static struct ics *xics_ics;
  36. void xics_update_irq_servers(void)
  37. {
  38. int i, j;
  39. struct device_node *np;
  40. u32 ilen;
  41. const __be32 *ireg;
  42. u32 hcpuid;
  43. /* Find the server numbers for the boot cpu. */
  44. np = of_get_cpu_node(boot_cpuid, NULL);
  45. BUG_ON(!np);
  46. hcpuid = get_hard_smp_processor_id(boot_cpuid);
  47. xics_default_server = xics_default_distrib_server = hcpuid;
  48. pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server);
  49. ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
  50. if (!ireg) {
  51. of_node_put(np);
  52. return;
  53. }
  54. i = ilen / sizeof(int);
  55. /* Global interrupt distribution server is specified in the last
  56. * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
  57. * entry fom this property for current boot cpu id and use it as
  58. * default distribution server
  59. */
  60. for (j = 0; j < i; j += 2) {
  61. if (be32_to_cpu(ireg[j]) == hcpuid) {
  62. xics_default_distrib_server = be32_to_cpu(ireg[j+1]);
  63. break;
  64. }
  65. }
  66. pr_devel("xics: xics_default_distrib_server = 0x%x\n",
  67. xics_default_distrib_server);
  68. of_node_put(np);
  69. }
  70. /* GIQ stuff, currently only supported on RTAS setups, will have
  71. * to be sorted properly for bare metal
  72. */
  73. void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
  74. {
  75. #ifdef CONFIG_PPC_RTAS
  76. int index;
  77. int status;
  78. if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
  79. return;
  80. index = (1UL << xics_interrupt_server_size) - 1 - gserver;
  81. status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
  82. WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
  83. GLOBAL_INTERRUPT_QUEUE, index, join, status);
  84. #endif
  85. }
  86. void xics_setup_cpu(void)
  87. {
  88. icp_ops->set_priority(LOWEST_PRIORITY);
  89. xics_set_cpu_giq(xics_default_distrib_server, 1);
  90. }
  91. void xics_mask_unknown_vec(unsigned int vec)
  92. {
  93. pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec);
  94. if (WARN_ON(!xics_ics))
  95. return;
  96. xics_ics->mask_unknown(xics_ics, vec);
  97. }
  98. #ifdef CONFIG_SMP
  99. static void __init xics_request_ipi(void)
  100. {
  101. unsigned int ipi;
  102. ipi = irq_create_mapping(xics_host, XICS_IPI);
  103. BUG_ON(!ipi);
  104. /*
  105. * IPIs are marked IRQF_PERCPU. The handler was set in map.
  106. */
  107. BUG_ON(request_irq(ipi, icp_ops->ipi_action,
  108. IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
  109. }
  110. void __init xics_smp_probe(void)
  111. {
  112. /* Register all the IPIs */
  113. xics_request_ipi();
  114. /* Setup cause_ipi callback based on which ICP is used */
  115. smp_ops->cause_ipi = icp_ops->cause_ipi;
  116. }
  117. #endif /* CONFIG_SMP */
  118. noinstr void xics_teardown_cpu(void)
  119. {
  120. struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
  121. /*
  122. * we have to reset the cppr index to 0 because we're
  123. * not going to return from the IPI
  124. */
  125. os_cppr->index = 0;
  126. icp_ops->set_priority(0);
  127. icp_ops->teardown_cpu();
  128. }
  129. noinstr void xics_kexec_teardown_cpu(int secondary)
  130. {
  131. xics_teardown_cpu();
  132. icp_ops->flush_ipi();
  133. /*
  134. * Some machines need to have at least one cpu in the GIQ,
  135. * so leave the master cpu in the group.
  136. */
  137. if (secondary)
  138. xics_set_cpu_giq(xics_default_distrib_server, 0);
  139. }
  140. #ifdef CONFIG_HOTPLUG_CPU
  141. /* Interrupts are disabled. */
  142. void xics_migrate_irqs_away(void)
  143. {
  144. int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
  145. unsigned int irq, virq;
  146. struct irq_desc *desc;
  147. pr_debug("%s: CPU %u\n", __func__, cpu);
  148. /* If we used to be the default server, move to the new "boot_cpuid" */
  149. if (hw_cpu == xics_default_server)
  150. xics_update_irq_servers();
  151. /* Reject any interrupt that was queued to us... */
  152. icp_ops->set_priority(0);
  153. /* Remove ourselves from the global interrupt queue */
  154. xics_set_cpu_giq(xics_default_distrib_server, 0);
  155. for_each_irq_desc(virq, desc) {
  156. struct irq_chip *chip;
  157. long server;
  158. unsigned long flags;
  159. struct irq_data *irqd;
  160. /* We can't set affinity on ISA interrupts */
  161. if (virq < NR_IRQS_LEGACY)
  162. continue;
  163. /* We only need to migrate enabled IRQS */
  164. if (!desc->action)
  165. continue;
  166. /* We need a mapping in the XICS IRQ domain */
  167. irqd = irq_domain_get_irq_data(xics_host, virq);
  168. if (!irqd)
  169. continue;
  170. irq = irqd_to_hwirq(irqd);
  171. /* We need to get IPIs still. */
  172. if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
  173. continue;
  174. chip = irq_desc_get_chip(desc);
  175. if (!chip || !chip->irq_set_affinity)
  176. continue;
  177. raw_spin_lock_irqsave(&desc->lock, flags);
  178. /* Locate interrupt server */
  179. server = xics_ics->get_server(xics_ics, irq);
  180. if (server < 0) {
  181. pr_err("%s: Can't find server for irq %d/%x\n",
  182. __func__, virq, irq);
  183. goto unlock;
  184. }
  185. /* We only support delivery to all cpus or to one cpu.
  186. * The irq has to be migrated only in the single cpu
  187. * case.
  188. */
  189. if (server != hw_cpu)
  190. goto unlock;
  191. /* This is expected during cpu offline. */
  192. if (cpu_online(cpu))
  193. pr_warn("IRQ %u affinity broken off cpu %u\n",
  194. virq, cpu);
  195. /* Reset affinity to all cpus */
  196. raw_spin_unlock_irqrestore(&desc->lock, flags);
  197. irq_set_affinity(virq, cpu_all_mask);
  198. continue;
  199. unlock:
  200. raw_spin_unlock_irqrestore(&desc->lock, flags);
  201. }
  202. /* Allow "sufficient" time to drop any inflight IRQ's */
  203. mdelay(5);
  204. /*
  205. * Allow IPIs again. This is done at the very end, after migrating all
  206. * interrupts, the expectation is that we'll only get woken up by an IPI
  207. * interrupt beyond this point, but leave externals masked just to be
  208. * safe. If we're using icp-opal this may actually allow all
  209. * interrupts anyway, but that should be OK.
  210. */
  211. icp_ops->set_priority(DEFAULT_PRIORITY);
  212. }
  213. #endif /* CONFIG_HOTPLUG_CPU */
  214. #ifdef CONFIG_SMP
  215. /*
  216. * For the moment we only implement delivery to all cpus or one cpu.
  217. *
  218. * If the requested affinity is cpu_all_mask, we set global affinity.
  219. * If not we set it to the first cpu in the mask, even if multiple cpus
  220. * are set. This is so things like irqbalance (which set core and package
  221. * wide affinities) do the right thing.
  222. *
  223. * We need to fix this to implement support for the links
  224. */
  225. int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
  226. unsigned int strict_check)
  227. {
  228. if (!distribute_irqs)
  229. return xics_default_server;
  230. if (!cpumask_subset(cpu_possible_mask, cpumask)) {
  231. int server = cpumask_first_and(cpu_online_mask, cpumask);
  232. if (server < nr_cpu_ids)
  233. return get_hard_smp_processor_id(server);
  234. if (strict_check)
  235. return -1;
  236. }
  237. /*
  238. * Workaround issue with some versions of JS20 firmware that
  239. * deliver interrupts to cpus which haven't been started. This
  240. * happens when using the maxcpus= boot option.
  241. */
  242. if (cpumask_equal(cpu_online_mask, cpu_present_mask))
  243. return xics_default_distrib_server;
  244. return xics_default_server;
  245. }
  246. #endif /* CONFIG_SMP */
  247. static int xics_host_match(struct irq_domain *h, struct device_node *node,
  248. enum irq_domain_bus_token bus_token)
  249. {
  250. if (WARN_ON(!xics_ics))
  251. return 0;
  252. return xics_ics->host_match(xics_ics, node) ? 1 : 0;
  253. }
  254. /* Dummies */
  255. static void xics_ipi_unmask(struct irq_data *d) { }
  256. static void xics_ipi_mask(struct irq_data *d) { }
  257. static struct irq_chip xics_ipi_chip = {
  258. .name = "XICS",
  259. .irq_eoi = NULL, /* Patched at init time */
  260. .irq_mask = xics_ipi_mask,
  261. .irq_unmask = xics_ipi_unmask,
  262. };
  263. static int xics_host_map(struct irq_domain *domain, unsigned int virq,
  264. irq_hw_number_t hwirq)
  265. {
  266. pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hwirq);
  267. /*
  268. * Mark interrupts as edge sensitive by default so that resend
  269. * actually works. The device-tree parsing will turn the LSIs
  270. * back to level.
  271. */
  272. irq_clear_status_flags(virq, IRQ_LEVEL);
  273. /* Don't call into ICS for IPIs */
  274. if (hwirq == XICS_IPI) {
  275. irq_set_chip_and_handler(virq, &xics_ipi_chip,
  276. handle_percpu_irq);
  277. return 0;
  278. }
  279. if (WARN_ON(!xics_ics))
  280. return -EINVAL;
  281. if (xics_ics->check(xics_ics, hwirq))
  282. return -EINVAL;
  283. /* Let the ICS be the chip data for the XICS domain. For ICS native */
  284. irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
  285. xics_ics, handle_fasteoi_irq, NULL, NULL);
  286. return 0;
  287. }
  288. static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
  289. const u32 *intspec, unsigned int intsize,
  290. irq_hw_number_t *out_hwirq, unsigned int *out_flags)
  291. {
  292. *out_hwirq = intspec[0];
  293. /*
  294. * If intsize is at least 2, we look for the type in the second cell,
  295. * we assume the LSB indicates a level interrupt.
  296. */
  297. if (intsize > 1) {
  298. if (intspec[1] & 1)
  299. *out_flags = IRQ_TYPE_LEVEL_LOW;
  300. else
  301. *out_flags = IRQ_TYPE_EDGE_RISING;
  302. } else
  303. *out_flags = IRQ_TYPE_LEVEL_LOW;
  304. return 0;
  305. }
  306. int xics_set_irq_type(struct irq_data *d, unsigned int flow_type)
  307. {
  308. /*
  309. * We only support these. This has really no effect other than setting
  310. * the corresponding descriptor bits mind you but those will in turn
  311. * affect the resend function when re-enabling an edge interrupt.
  312. *
  313. * Set set the default to edge as explained in map().
  314. */
  315. if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
  316. flow_type = IRQ_TYPE_EDGE_RISING;
  317. if (flow_type != IRQ_TYPE_EDGE_RISING &&
  318. flow_type != IRQ_TYPE_LEVEL_LOW)
  319. return -EINVAL;
  320. irqd_set_trigger_type(d, flow_type);
  321. return IRQ_SET_MASK_OK_NOCOPY;
  322. }
  323. int xics_retrigger(struct irq_data *data)
  324. {
  325. /*
  326. * We need to push a dummy CPPR when retriggering, since the subsequent
  327. * EOI will try to pop it. Passing 0 works, as the function hard codes
  328. * the priority value anyway.
  329. */
  330. xics_push_cppr(0);
  331. /* Tell the core to do a soft retrigger */
  332. return 0;
  333. }
  334. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  335. static int xics_host_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
  336. unsigned long *hwirq, unsigned int *type)
  337. {
  338. return xics_host_xlate(d, to_of_node(fwspec->fwnode), fwspec->param,
  339. fwspec->param_count, hwirq, type);
  340. }
  341. static int xics_host_domain_alloc(struct irq_domain *domain, unsigned int virq,
  342. unsigned int nr_irqs, void *arg)
  343. {
  344. struct irq_fwspec *fwspec = arg;
  345. irq_hw_number_t hwirq;
  346. unsigned int type = IRQ_TYPE_NONE;
  347. int i, rc;
  348. rc = xics_host_domain_translate(domain, fwspec, &hwirq, &type);
  349. if (rc)
  350. return rc;
  351. pr_debug("%s %d/%lx #%d\n", __func__, virq, hwirq, nr_irqs);
  352. for (i = 0; i < nr_irqs; i++)
  353. irq_domain_set_info(domain, virq + i, hwirq + i, xics_ics->chip,
  354. xics_ics, handle_fasteoi_irq, NULL, NULL);
  355. return 0;
  356. }
  357. static void xics_host_domain_free(struct irq_domain *domain,
  358. unsigned int virq, unsigned int nr_irqs)
  359. {
  360. pr_debug("%s %d #%d\n", __func__, virq, nr_irqs);
  361. }
  362. #endif
  363. static const struct irq_domain_ops xics_host_ops = {
  364. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  365. .alloc = xics_host_domain_alloc,
  366. .free = xics_host_domain_free,
  367. .translate = xics_host_domain_translate,
  368. #endif
  369. .match = xics_host_match,
  370. .map = xics_host_map,
  371. .xlate = xics_host_xlate,
  372. };
  373. static int __init xics_allocate_domain(void)
  374. {
  375. struct fwnode_handle *fn;
  376. fn = irq_domain_alloc_named_fwnode("XICS");
  377. if (!fn)
  378. return -ENOMEM;
  379. xics_host = irq_domain_create_tree(fn, &xics_host_ops, NULL);
  380. if (!xics_host) {
  381. irq_domain_free_fwnode(fn);
  382. return -ENOMEM;
  383. }
  384. irq_set_default_host(xics_host);
  385. return 0;
  386. }
  387. void __init xics_register_ics(struct ics *ics)
  388. {
  389. if (WARN_ONCE(xics_ics, "XICS: Source Controller is already defined !"))
  390. return;
  391. xics_ics = ics;
  392. }
  393. static void __init xics_get_server_size(void)
  394. {
  395. struct device_node *np;
  396. const __be32 *isize;
  397. /* We fetch the interrupt server size from the first ICS node
  398. * we find if any
  399. */
  400. np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics");
  401. if (!np)
  402. return;
  403. isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
  404. if (isize)
  405. xics_interrupt_server_size = be32_to_cpu(*isize);
  406. of_node_put(np);
  407. }
  408. void __init xics_init(void)
  409. {
  410. int rc = -1;
  411. /* Fist locate ICP */
  412. if (firmware_has_feature(FW_FEATURE_LPAR))
  413. rc = icp_hv_init();
  414. if (rc < 0) {
  415. rc = icp_native_init();
  416. if (rc == -ENODEV)
  417. rc = icp_opal_init();
  418. }
  419. if (rc < 0) {
  420. pr_warn("XICS: Cannot find a Presentation Controller !\n");
  421. return;
  422. }
  423. /* Copy get_irq callback over to ppc_md */
  424. ppc_md.get_irq = icp_ops->get_irq;
  425. /* Patch up IPI chip EOI */
  426. xics_ipi_chip.irq_eoi = icp_ops->eoi;
  427. /* Now locate ICS */
  428. rc = ics_rtas_init();
  429. if (rc < 0)
  430. rc = ics_opal_init();
  431. if (rc < 0)
  432. rc = ics_native_init();
  433. if (rc < 0)
  434. pr_warn("XICS: Cannot find a Source Controller !\n");
  435. /* Initialize common bits */
  436. xics_get_server_size();
  437. xics_update_irq_servers();
  438. rc = xics_allocate_domain();
  439. if (rc < 0)
  440. pr_err("XICS: Failed to create IRQ domain");
  441. xics_setup_cpu();
  442. }