irq-sp7021-intc.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
  2. /*
  3. * Copyright (C) Sunplus Technology Co., Ltd.
  4. * All rights reserved.
  5. */
  6. #include <linux/irq.h>
  7. #include <linux/irqdomain.h>
  8. #include <linux/io.h>
  9. #include <linux/irqchip.h>
  10. #include <linux/irqchip/chained_irq.h>
  11. #include <linux/of_address.h>
  12. #include <linux/of_irq.h>
  13. #define SP_INTC_HWIRQ_MIN 0
  14. #define SP_INTC_HWIRQ_MAX 223
  15. #define SP_INTC_NR_IRQS (SP_INTC_HWIRQ_MAX - SP_INTC_HWIRQ_MIN + 1)
  16. #define SP_INTC_NR_GROUPS DIV_ROUND_UP(SP_INTC_NR_IRQS, 32)
  17. #define SP_INTC_REG_SIZE (SP_INTC_NR_GROUPS * 4)
  18. /* REG_GROUP_0 regs */
  19. #define REG_INTR_TYPE (sp_intc.g0)
  20. #define REG_INTR_POLARITY (REG_INTR_TYPE + SP_INTC_REG_SIZE)
  21. #define REG_INTR_PRIORITY (REG_INTR_POLARITY + SP_INTC_REG_SIZE)
  22. #define REG_INTR_MASK (REG_INTR_PRIORITY + SP_INTC_REG_SIZE)
  23. /* REG_GROUP_1 regs */
  24. #define REG_INTR_CLEAR (sp_intc.g1)
  25. #define REG_MASKED_EXT1 (REG_INTR_CLEAR + SP_INTC_REG_SIZE)
  26. #define REG_MASKED_EXT0 (REG_MASKED_EXT1 + SP_INTC_REG_SIZE)
  27. #define REG_INTR_GROUP (REG_INTR_CLEAR + 31 * 4)
  28. #define GROUP_MASK (BIT(SP_INTC_NR_GROUPS) - 1)
  29. #define GROUP_SHIFT_EXT1 (0)
  30. #define GROUP_SHIFT_EXT0 (8)
  31. /*
  32. * When GPIO_INT0~7 set to edge trigger, doesn't work properly.
  33. * WORKAROUND: change it to level trigger, and toggle the polarity
  34. * at ACK/Handler to make the HW work.
  35. */
  36. #define GPIO_INT0_HWIRQ 120
  37. #define GPIO_INT7_HWIRQ 127
  38. #define IS_GPIO_INT(irq) \
  39. ({ \
  40. u32 i = irq; \
  41. (i >= GPIO_INT0_HWIRQ) && (i <= GPIO_INT7_HWIRQ); \
  42. })
  43. /* index of states */
  44. enum {
  45. _IS_EDGE = 0,
  46. _IS_LOW,
  47. _IS_ACTIVE
  48. };
  49. #define STATE_BIT(irq, idx) (((irq) - GPIO_INT0_HWIRQ) * 3 + (idx))
  50. #define ASSIGN_STATE(irq, idx, v) assign_bit(STATE_BIT(irq, idx), sp_intc.states, v)
  51. #define TEST_STATE(irq, idx) test_bit(STATE_BIT(irq, idx), sp_intc.states)
  52. static struct sp_intctl {
  53. /*
  54. * REG_GROUP_0: include type/polarity/priority/mask regs.
  55. * REG_GROUP_1: include clear/masked_ext0/masked_ext1/group regs.
  56. */
  57. void __iomem *g0; // REG_GROUP_0 base
  58. void __iomem *g1; // REG_GROUP_1 base
  59. struct irq_domain *domain;
  60. raw_spinlock_t lock;
  61. /*
  62. * store GPIO_INT states
  63. * each interrupt has 3 states: is_edge, is_low, is_active
  64. */
  65. DECLARE_BITMAP(states, (GPIO_INT7_HWIRQ - GPIO_INT0_HWIRQ + 1) * 3);
  66. } sp_intc;
  67. static struct irq_chip sp_intc_chip;
  68. static void sp_intc_assign_bit(u32 hwirq, void __iomem *base, bool value)
  69. {
  70. u32 offset, mask;
  71. unsigned long flags;
  72. void __iomem *reg;
  73. offset = (hwirq / 32) * 4;
  74. reg = base + offset;
  75. raw_spin_lock_irqsave(&sp_intc.lock, flags);
  76. mask = readl_relaxed(reg);
  77. if (value)
  78. mask |= BIT(hwirq % 32);
  79. else
  80. mask &= ~BIT(hwirq % 32);
  81. writel_relaxed(mask, reg);
  82. raw_spin_unlock_irqrestore(&sp_intc.lock, flags);
  83. }
  84. static void sp_intc_ack_irq(struct irq_data *d)
  85. {
  86. u32 hwirq = d->hwirq;
  87. if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_EDGE))) { // WORKAROUND
  88. sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, !TEST_STATE(hwirq, _IS_LOW));
  89. ASSIGN_STATE(hwirq, _IS_ACTIVE, true);
  90. }
  91. sp_intc_assign_bit(hwirq, REG_INTR_CLEAR, 1);
  92. }
  93. static void sp_intc_mask_irq(struct irq_data *d)
  94. {
  95. sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 0);
  96. }
  97. static void sp_intc_unmask_irq(struct irq_data *d)
  98. {
  99. sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 1);
  100. }
  101. static int sp_intc_set_type(struct irq_data *d, unsigned int type)
  102. {
  103. u32 hwirq = d->hwirq;
  104. bool is_edge = !(type & IRQ_TYPE_LEVEL_MASK);
  105. bool is_low = (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING);
  106. irq_set_handler_locked(d, is_edge ? handle_edge_irq : handle_level_irq);
  107. if (unlikely(IS_GPIO_INT(hwirq) && is_edge)) { // WORKAROUND
  108. /* store states */
  109. ASSIGN_STATE(hwirq, _IS_EDGE, is_edge);
  110. ASSIGN_STATE(hwirq, _IS_LOW, is_low);
  111. ASSIGN_STATE(hwirq, _IS_ACTIVE, false);
  112. /* change to level */
  113. is_edge = false;
  114. }
  115. sp_intc_assign_bit(hwirq, REG_INTR_TYPE, is_edge);
  116. sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, is_low);
  117. return 0;
  118. }
  119. static int sp_intc_get_ext_irq(int ext_num)
  120. {
  121. void __iomem *base = ext_num ? REG_MASKED_EXT1 : REG_MASKED_EXT0;
  122. u32 shift = ext_num ? GROUP_SHIFT_EXT1 : GROUP_SHIFT_EXT0;
  123. u32 groups;
  124. u32 pending_group;
  125. u32 group;
  126. u32 pending_irq;
  127. groups = readl_relaxed(REG_INTR_GROUP);
  128. pending_group = (groups >> shift) & GROUP_MASK;
  129. if (!pending_group)
  130. return -1;
  131. group = fls(pending_group) - 1;
  132. pending_irq = readl_relaxed(base + group * 4);
  133. if (!pending_irq)
  134. return -1;
  135. return (group * 32) + fls(pending_irq) - 1;
  136. }
  137. static void sp_intc_handle_ext_cascaded(struct irq_desc *desc)
  138. {
  139. struct irq_chip *chip = irq_desc_get_chip(desc);
  140. int ext_num = (uintptr_t)irq_desc_get_handler_data(desc);
  141. int hwirq;
  142. chained_irq_enter(chip, desc);
  143. while ((hwirq = sp_intc_get_ext_irq(ext_num)) >= 0) {
  144. if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_ACTIVE))) { // WORKAROUND
  145. ASSIGN_STATE(hwirq, _IS_ACTIVE, false);
  146. sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, TEST_STATE(hwirq, _IS_LOW));
  147. } else {
  148. generic_handle_domain_irq(sp_intc.domain, hwirq);
  149. }
  150. }
  151. chained_irq_exit(chip, desc);
  152. }
  153. static struct irq_chip sp_intc_chip = {
  154. .name = "sp_intc",
  155. .irq_ack = sp_intc_ack_irq,
  156. .irq_mask = sp_intc_mask_irq,
  157. .irq_unmask = sp_intc_unmask_irq,
  158. .irq_set_type = sp_intc_set_type,
  159. };
  160. static int sp_intc_irq_domain_map(struct irq_domain *domain,
  161. unsigned int irq, irq_hw_number_t hwirq)
  162. {
  163. irq_set_chip_and_handler(irq, &sp_intc_chip, handle_level_irq);
  164. irq_set_chip_data(irq, &sp_intc_chip);
  165. irq_set_noprobe(irq);
  166. return 0;
  167. }
  168. static const struct irq_domain_ops sp_intc_dm_ops = {
  169. .xlate = irq_domain_xlate_twocell,
  170. .map = sp_intc_irq_domain_map,
  171. };
  172. static int sp_intc_irq_map(struct device_node *node, int i)
  173. {
  174. unsigned int irq;
  175. irq = irq_of_parse_and_map(node, i);
  176. if (!irq)
  177. return -ENOENT;
  178. irq_set_chained_handler_and_data(irq, sp_intc_handle_ext_cascaded, (void *)(uintptr_t)i);
  179. return 0;
  180. }
  181. static int __init sp_intc_init_dt(struct device_node *node, struct device_node *parent)
  182. {
  183. int i, ret;
  184. sp_intc.g0 = of_iomap(node, 0);
  185. if (!sp_intc.g0)
  186. return -ENXIO;
  187. sp_intc.g1 = of_iomap(node, 1);
  188. if (!sp_intc.g1) {
  189. ret = -ENXIO;
  190. goto out_unmap0;
  191. }
  192. ret = sp_intc_irq_map(node, 0); // EXT_INT0
  193. if (ret)
  194. goto out_unmap1;
  195. ret = sp_intc_irq_map(node, 1); // EXT_INT1
  196. if (ret)
  197. goto out_unmap1;
  198. /* initial regs */
  199. for (i = 0; i < SP_INTC_NR_GROUPS; i++) {
  200. /* all mask */
  201. writel_relaxed(0, REG_INTR_MASK + i * 4);
  202. /* all edge */
  203. writel_relaxed(~0, REG_INTR_TYPE + i * 4);
  204. /* all high-active */
  205. writel_relaxed(0, REG_INTR_POLARITY + i * 4);
  206. /* all EXT_INT0 */
  207. writel_relaxed(~0, REG_INTR_PRIORITY + i * 4);
  208. /* all clear */
  209. writel_relaxed(~0, REG_INTR_CLEAR + i * 4);
  210. }
  211. sp_intc.domain = irq_domain_add_linear(node, SP_INTC_NR_IRQS,
  212. &sp_intc_dm_ops, &sp_intc);
  213. if (!sp_intc.domain) {
  214. ret = -ENOMEM;
  215. goto out_unmap1;
  216. }
  217. raw_spin_lock_init(&sp_intc.lock);
  218. return 0;
  219. out_unmap1:
  220. iounmap(sp_intc.g1);
  221. out_unmap0:
  222. iounmap(sp_intc.g0);
  223. return ret;
  224. }
  225. IRQCHIP_DECLARE(sp_intc, "sunplus,sp7021-intc", sp_intc_init_dt);