icp-hv.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright 2011 IBM Corporation.
  4. */
  5. #include <linux/types.h>
  6. #include <linux/kernel.h>
  7. #include <linux/irq.h>
  8. #include <linux/smp.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/irqdomain.h>
  11. #include <linux/cpu.h>
  12. #include <linux/of.h>
  13. #include <asm/smp.h>
  14. #include <asm/irq.h>
  15. #include <asm/errno.h>
  16. #include <asm/xics.h>
  17. #include <asm/io.h>
  18. #include <asm/hvcall.h>
  19. static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
  20. {
  21. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  22. long rc;
  23. unsigned int ret = XICS_IRQ_SPURIOUS;
  24. rc = plpar_hcall(H_XIRR, retbuf, cppr);
  25. if (rc == H_SUCCESS) {
  26. ret = (unsigned int)retbuf[0];
  27. } else {
  28. pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n",
  29. __func__, cppr, rc);
  30. WARN_ON_ONCE(1);
  31. }
  32. return ret;
  33. }
  34. static inline void icp_hv_set_cppr(u8 value)
  35. {
  36. long rc = plpar_hcall_norets(H_CPPR, value);
  37. if (rc != H_SUCCESS) {
  38. pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n",
  39. __func__, value, rc);
  40. WARN_ON_ONCE(1);
  41. }
  42. }
  43. static inline void icp_hv_set_xirr(unsigned int value)
  44. {
  45. long rc = plpar_hcall_norets(H_EOI, value);
  46. if (rc != H_SUCCESS) {
  47. pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n",
  48. __func__, value, rc);
  49. WARN_ON_ONCE(1);
  50. icp_hv_set_cppr(value >> 24);
  51. }
  52. }
  53. static inline void icp_hv_set_qirr(int n_cpu , u8 value)
  54. {
  55. int hw_cpu = get_hard_smp_processor_id(n_cpu);
  56. long rc;
  57. /* Make sure all previous accesses are ordered before IPI sending */
  58. mb();
  59. rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
  60. if (rc != H_SUCCESS) {
  61. pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
  62. "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
  63. WARN_ON_ONCE(1);
  64. }
  65. }
  66. static void icp_hv_eoi(struct irq_data *d)
  67. {
  68. unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
  69. iosync();
  70. icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq);
  71. }
  72. static void icp_hv_teardown_cpu(void)
  73. {
  74. int cpu = smp_processor_id();
  75. /* Clear any pending IPI */
  76. icp_hv_set_qirr(cpu, 0xff);
  77. }
  78. static void icp_hv_flush_ipi(void)
  79. {
  80. /* We take the ipi irq but and never return so we
  81. * need to EOI the IPI, but want to leave our priority 0
  82. *
  83. * should we check all the other interrupts too?
  84. * should we be flagging idle loop instead?
  85. * or creating some task to be scheduled?
  86. */
  87. icp_hv_set_xirr((0x00 << 24) | XICS_IPI);
  88. }
  89. static unsigned int icp_hv_get_irq(void)
  90. {
  91. unsigned int xirr = icp_hv_get_xirr(xics_cppr_top());
  92. unsigned int vec = xirr & 0x00ffffff;
  93. unsigned int irq;
  94. if (vec == XICS_IRQ_SPURIOUS)
  95. return 0;
  96. irq = irq_find_mapping(xics_host, vec);
  97. if (likely(irq)) {
  98. xics_push_cppr(vec);
  99. return irq;
  100. }
  101. /* We don't have a linux mapping, so have rtas mask it. */
  102. xics_mask_unknown_vec(vec);
  103. /* We might learn about it later, so EOI it */
  104. icp_hv_set_xirr(xirr);
  105. return 0;
  106. }
  107. static void icp_hv_set_cpu_priority(unsigned char cppr)
  108. {
  109. xics_set_base_cppr(cppr);
  110. icp_hv_set_cppr(cppr);
  111. iosync();
  112. }
  113. #ifdef CONFIG_SMP
  114. static void icp_hv_cause_ipi(int cpu)
  115. {
  116. icp_hv_set_qirr(cpu, IPI_PRIORITY);
  117. }
  118. static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id)
  119. {
  120. int cpu = smp_processor_id();
  121. icp_hv_set_qirr(cpu, 0xff);
  122. return smp_ipi_demux();
  123. }
  124. #endif /* CONFIG_SMP */
  125. static const struct icp_ops icp_hv_ops = {
  126. .get_irq = icp_hv_get_irq,
  127. .eoi = icp_hv_eoi,
  128. .set_priority = icp_hv_set_cpu_priority,
  129. .teardown_cpu = icp_hv_teardown_cpu,
  130. .flush_ipi = icp_hv_flush_ipi,
  131. #ifdef CONFIG_SMP
  132. .ipi_action = icp_hv_ipi_action,
  133. .cause_ipi = icp_hv_cause_ipi,
  134. #endif
  135. };
  136. int __init icp_hv_init(void)
  137. {
  138. struct device_node *np;
  139. np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp");
  140. if (!np)
  141. np = of_find_node_by_type(NULL,
  142. "PowerPC-External-Interrupt-Presentation");
  143. if (!np)
  144. return -ENODEV;
  145. icp_ops = &icp_hv_ops;
  146. of_node_put(np);
  147. return 0;
  148. }