irq-armada-370-xp.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*
  2. * Marvell Armada 370 and Armada XP SoC IRQ handling
  3. *
  4. * Copyright (C) 2012 Marvell
  5. *
  6. * Lior Amsalem <[email protected]>
  7. * Gregory CLEMENT <[email protected]>
  8. * Thomas Petazzoni <[email protected]>
  9. * Ben Dooks <[email protected]>
  10. *
  11. * This file is licensed under the terms of the GNU General Public
  12. * License version 2. This program is licensed "as is" without any
  13. * warranty of any kind, whether express or implied.
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/init.h>
  18. #include <linux/irq.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/irqchip.h>
  21. #include <linux/irqchip/chained_irq.h>
  22. #include <linux/cpu.h>
  23. #include <linux/io.h>
  24. #include <linux/of_address.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/of_pci.h>
  27. #include <linux/irqdomain.h>
  28. #include <linux/slab.h>
  29. #include <linux/syscore_ops.h>
  30. #include <linux/msi.h>
  31. #include <asm/mach/arch.h>
  32. #include <asm/exception.h>
  33. #include <asm/smp_plat.h>
  34. #include <asm/mach/irq.h>
  35. /*
  36. * Overall diagram of the Armada XP interrupt controller:
  37. *
  38. * To CPU 0 To CPU 1
  39. *
  40. * /\ /\
  41. * || ||
  42. * +---------------+ +---------------+
  43. * | | | |
  44. * | per-CPU | | per-CPU |
  45. * | mask/unmask | | mask/unmask |
  46. * | CPU0 | | CPU1 |
  47. * | | | |
  48. * +---------------+ +---------------+
  49. * /\ /\
  50. * || ||
  51. * \\_______________________//
  52. * ||
  53. * +-------------------+
  54. * | |
  55. * | Global interrupt |
  56. * | mask/unmask |
  57. * | |
  58. * +-------------------+
  59. * /\
  60. * ||
  61. * interrupt from
  62. * device
  63. *
  64. * The "global interrupt mask/unmask" is modified using the
  65. * ARMADA_370_XP_INT_SET_ENABLE_OFFS and
  66. * ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS registers, which are relative
  67. * to "main_int_base".
  68. *
  69. * The "per-CPU mask/unmask" is modified using the
  70. * ARMADA_370_XP_INT_SET_MASK_OFFS and
  71. * ARMADA_370_XP_INT_CLEAR_MASK_OFFS registers, which are relative to
  72. * "per_cpu_int_base". This base address points to a special address,
  73. * which automatically accesses the registers of the current CPU.
  74. *
  75. * The per-CPU mask/unmask can also be adjusted using the global
  76. * per-interrupt ARMADA_370_XP_INT_SOURCE_CTL register, which we use
  77. * to configure interrupt affinity.
  78. *
  79. * Due to this model, all interrupts need to be mask/unmasked at two
  80. * different levels: at the global level and at the per-CPU level.
  81. *
  82. * This driver takes the following approach to deal with this:
  83. *
  84. * - For global interrupts:
  85. *
  86. * At ->map() time, a global interrupt is unmasked at the per-CPU
  87. * mask/unmask level. It is therefore unmasked at this level for
  88. * the current CPU, running the ->map() code. This allows to have
  89. * the interrupt unmasked at this level in non-SMP
  90. * configurations. In SMP configurations, the ->set_affinity()
  91. * callback is called, which using the
  92. * ARMADA_370_XP_INT_SOURCE_CTL() readjusts the per-CPU mask/unmask
  93. * for the interrupt.
  94. *
  95. * The ->mask() and ->unmask() operations only mask/unmask the
  96. * interrupt at the "global" level.
  97. *
  98. * So, a global interrupt is enabled at the per-CPU level as soon
  99. * as it is mapped. At run time, the masking/unmasking takes place
  100. * at the global level.
  101. *
  102. * - For per-CPU interrupts
  103. *
  104. * At ->map() time, a per-CPU interrupt is unmasked at the global
  105. * mask/unmask level.
  106. *
  107. * The ->mask() and ->unmask() operations mask/unmask the interrupt
  108. * at the per-CPU level.
  109. *
  110. * So, a per-CPU interrupt is enabled at the global level as soon
  111. * as it is mapped. At run time, the masking/unmasking takes place
  112. * at the per-CPU level.
  113. */
  114. /* Registers relative to main_int_base */
  115. #define ARMADA_370_XP_INT_CONTROL (0x00)
  116. #define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x04)
  117. #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
  118. #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
  119. #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
  120. #define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
  121. #define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid)
  122. /* Registers relative to per_cpu_int_base */
  123. #define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x08)
  124. #define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0x0c)
  125. #define ARMADA_375_PPI_CAUSE (0x10)
  126. #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
  127. #define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
  128. #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
  129. #define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54)
  130. #define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu)
  131. #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
  132. #define IPI_DOORBELL_START (0)
  133. #define IPI_DOORBELL_END (8)
  134. #define IPI_DOORBELL_MASK 0xFF
  135. #define PCI_MSI_DOORBELL_START (16)
  136. #define PCI_MSI_DOORBELL_NR (16)
  137. #define PCI_MSI_DOORBELL_END (32)
  138. #define PCI_MSI_DOORBELL_MASK 0xFFFF0000
  139. static void __iomem *per_cpu_int_base;
  140. static void __iomem *main_int_base;
  141. static struct irq_domain *armada_370_xp_mpic_domain;
  142. static u32 doorbell_mask_reg;
  143. static int parent_irq;
  144. #ifdef CONFIG_PCI_MSI
  145. static struct irq_domain *armada_370_xp_msi_domain;
  146. static struct irq_domain *armada_370_xp_msi_inner_domain;
  147. static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
  148. static DEFINE_MUTEX(msi_used_lock);
  149. static phys_addr_t msi_doorbell_addr;
  150. #endif
  151. static inline bool is_percpu_irq(irq_hw_number_t irq)
  152. {
  153. if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
  154. return true;
  155. return false;
  156. }
  157. /*
  158. * In SMP mode:
  159. * For shared global interrupts, mask/unmask global enable bit
  160. * For CPU interrupts, mask/unmask the calling CPU's bit
  161. */
  162. static void armada_370_xp_irq_mask(struct irq_data *d)
  163. {
  164. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  165. if (!is_percpu_irq(hwirq))
  166. writel(hwirq, main_int_base +
  167. ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
  168. else
  169. writel(hwirq, per_cpu_int_base +
  170. ARMADA_370_XP_INT_SET_MASK_OFFS);
  171. }
  172. static void armada_370_xp_irq_unmask(struct irq_data *d)
  173. {
  174. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  175. if (!is_percpu_irq(hwirq))
  176. writel(hwirq, main_int_base +
  177. ARMADA_370_XP_INT_SET_ENABLE_OFFS);
  178. else
  179. writel(hwirq, per_cpu_int_base +
  180. ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  181. }
  182. #ifdef CONFIG_PCI_MSI
  183. static struct irq_chip armada_370_xp_msi_irq_chip = {
  184. .name = "MPIC MSI",
  185. .irq_mask = pci_msi_mask_irq,
  186. .irq_unmask = pci_msi_unmask_irq,
  187. };
  188. static struct msi_domain_info armada_370_xp_msi_domain_info = {
  189. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  190. MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
  191. .chip = &armada_370_xp_msi_irq_chip,
  192. };
  193. static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
  194. {
  195. unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
  196. msg->address_lo = lower_32_bits(msi_doorbell_addr);
  197. msg->address_hi = upper_32_bits(msi_doorbell_addr);
  198. msg->data = BIT(cpu + 8) | (data->hwirq + PCI_MSI_DOORBELL_START);
  199. }
  200. static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
  201. const struct cpumask *mask, bool force)
  202. {
  203. unsigned int cpu;
  204. if (!force)
  205. cpu = cpumask_any_and(mask, cpu_online_mask);
  206. else
  207. cpu = cpumask_first(mask);
  208. if (cpu >= nr_cpu_ids)
  209. return -EINVAL;
  210. irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
  211. return IRQ_SET_MASK_OK;
  212. }
  213. static struct irq_chip armada_370_xp_msi_bottom_irq_chip = {
  214. .name = "MPIC MSI",
  215. .irq_compose_msi_msg = armada_370_xp_compose_msi_msg,
  216. .irq_set_affinity = armada_370_xp_msi_set_affinity,
  217. };
  218. static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
  219. unsigned int nr_irqs, void *args)
  220. {
  221. int hwirq, i;
  222. mutex_lock(&msi_used_lock);
  223. hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
  224. order_base_2(nr_irqs));
  225. mutex_unlock(&msi_used_lock);
  226. if (hwirq < 0)
  227. return -ENOSPC;
  228. for (i = 0; i < nr_irqs; i++) {
  229. irq_domain_set_info(domain, virq + i, hwirq + i,
  230. &armada_370_xp_msi_bottom_irq_chip,
  231. domain->host_data, handle_simple_irq,
  232. NULL, NULL);
  233. }
  234. return 0;
  235. }
  236. static void armada_370_xp_msi_free(struct irq_domain *domain,
  237. unsigned int virq, unsigned int nr_irqs)
  238. {
  239. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  240. mutex_lock(&msi_used_lock);
  241. bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
  242. mutex_unlock(&msi_used_lock);
  243. }
  244. static const struct irq_domain_ops armada_370_xp_msi_domain_ops = {
  245. .alloc = armada_370_xp_msi_alloc,
  246. .free = armada_370_xp_msi_free,
  247. };
  248. static void armada_370_xp_msi_reenable_percpu(void)
  249. {
  250. u32 reg;
  251. /* Enable MSI doorbell mask and combined cpu local interrupt */
  252. reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
  253. | PCI_MSI_DOORBELL_MASK;
  254. writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
  255. /* Unmask local doorbell interrupt */
  256. writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  257. }
  258. static int armada_370_xp_msi_init(struct device_node *node,
  259. phys_addr_t main_int_phys_base)
  260. {
  261. msi_doorbell_addr = main_int_phys_base +
  262. ARMADA_370_XP_SW_TRIG_INT_OFFS;
  263. armada_370_xp_msi_inner_domain =
  264. irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
  265. &armada_370_xp_msi_domain_ops, NULL);
  266. if (!armada_370_xp_msi_inner_domain)
  267. return -ENOMEM;
  268. armada_370_xp_msi_domain =
  269. pci_msi_create_irq_domain(of_node_to_fwnode(node),
  270. &armada_370_xp_msi_domain_info,
  271. armada_370_xp_msi_inner_domain);
  272. if (!armada_370_xp_msi_domain) {
  273. irq_domain_remove(armada_370_xp_msi_inner_domain);
  274. return -ENOMEM;
  275. }
  276. armada_370_xp_msi_reenable_percpu();
  277. return 0;
  278. }
  279. #else
  280. static void armada_370_xp_msi_reenable_percpu(void) {}
  281. static inline int armada_370_xp_msi_init(struct device_node *node,
  282. phys_addr_t main_int_phys_base)
  283. {
  284. return 0;
  285. }
  286. #endif
  287. static void armada_xp_mpic_perf_init(void)
  288. {
  289. unsigned long cpuid;
  290. /*
  291. * This Performance Counter Overflow interrupt is specific for
  292. * Armada 370 and XP. It is not available on Armada 375, 38x and 39x.
  293. */
  294. if (!of_machine_is_compatible("marvell,armada-370-xp"))
  295. return;
  296. cpuid = cpu_logical_map(smp_processor_id());
  297. /* Enable Performance Counter Overflow interrupts */
  298. writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
  299. per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS);
  300. }
  301. #ifdef CONFIG_SMP
  302. static struct irq_domain *ipi_domain;
  303. static void armada_370_xp_ipi_mask(struct irq_data *d)
  304. {
  305. u32 reg;
  306. reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
  307. reg &= ~BIT(d->hwirq);
  308. writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
  309. }
  310. static void armada_370_xp_ipi_unmask(struct irq_data *d)
  311. {
  312. u32 reg;
  313. reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
  314. reg |= BIT(d->hwirq);
  315. writel(reg, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
  316. }
  317. static void armada_370_xp_ipi_send_mask(struct irq_data *d,
  318. const struct cpumask *mask)
  319. {
  320. unsigned long map = 0;
  321. int cpu;
  322. /* Convert our logical CPU mask into a physical one. */
  323. for_each_cpu(cpu, mask)
  324. map |= 1 << cpu_logical_map(cpu);
  325. /*
  326. * Ensure that stores to Normal memory are visible to the
  327. * other CPUs before issuing the IPI.
  328. */
  329. dsb();
  330. /* submit softirq */
  331. writel((map << 8) | d->hwirq, main_int_base +
  332. ARMADA_370_XP_SW_TRIG_INT_OFFS);
  333. }
  334. static void armada_370_xp_ipi_ack(struct irq_data *d)
  335. {
  336. writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
  337. }
  338. static struct irq_chip ipi_irqchip = {
  339. .name = "IPI",
  340. .irq_ack = armada_370_xp_ipi_ack,
  341. .irq_mask = armada_370_xp_ipi_mask,
  342. .irq_unmask = armada_370_xp_ipi_unmask,
  343. .ipi_send_mask = armada_370_xp_ipi_send_mask,
  344. };
  345. static int armada_370_xp_ipi_alloc(struct irq_domain *d,
  346. unsigned int virq,
  347. unsigned int nr_irqs, void *args)
  348. {
  349. int i;
  350. for (i = 0; i < nr_irqs; i++) {
  351. irq_set_percpu_devid(virq + i);
  352. irq_domain_set_info(d, virq + i, i, &ipi_irqchip,
  353. d->host_data,
  354. handle_percpu_devid_irq,
  355. NULL, NULL);
  356. }
  357. return 0;
  358. }
  359. static void armada_370_xp_ipi_free(struct irq_domain *d,
  360. unsigned int virq,
  361. unsigned int nr_irqs)
  362. {
  363. /* Not freeing IPIs */
  364. }
  365. static const struct irq_domain_ops ipi_domain_ops = {
  366. .alloc = armada_370_xp_ipi_alloc,
  367. .free = armada_370_xp_ipi_free,
  368. };
  369. static void ipi_resume(void)
  370. {
  371. int i;
  372. for (i = 0; i < IPI_DOORBELL_END; i++) {
  373. int irq;
  374. irq = irq_find_mapping(ipi_domain, i);
  375. if (irq <= 0)
  376. continue;
  377. if (irq_percpu_is_enabled(irq)) {
  378. struct irq_data *d;
  379. d = irq_domain_get_irq_data(ipi_domain, irq);
  380. armada_370_xp_ipi_unmask(d);
  381. }
  382. }
  383. }
  384. static __init void armada_xp_ipi_init(struct device_node *node)
  385. {
  386. int base_ipi;
  387. ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node),
  388. IPI_DOORBELL_END,
  389. &ipi_domain_ops, NULL);
  390. if (WARN_ON(!ipi_domain))
  391. return;
  392. irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
  393. base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, IPI_DOORBELL_END,
  394. NUMA_NO_NODE, NULL, false, NULL);
  395. if (WARN_ON(!base_ipi))
  396. return;
  397. set_smp_ipi_range(base_ipi, IPI_DOORBELL_END);
  398. }
  399. static DEFINE_RAW_SPINLOCK(irq_controller_lock);
  400. static int armada_xp_set_affinity(struct irq_data *d,
  401. const struct cpumask *mask_val, bool force)
  402. {
  403. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  404. unsigned long reg, mask;
  405. int cpu;
  406. /* Select a single core from the affinity mask which is online */
  407. cpu = cpumask_any_and(mask_val, cpu_online_mask);
  408. mask = 1UL << cpu_logical_map(cpu);
  409. raw_spin_lock(&irq_controller_lock);
  410. reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
  411. reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
  412. writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
  413. raw_spin_unlock(&irq_controller_lock);
  414. irq_data_update_effective_affinity(d, cpumask_of(cpu));
  415. return IRQ_SET_MASK_OK;
  416. }
  417. static void armada_xp_mpic_smp_cpu_init(void)
  418. {
  419. u32 control;
  420. int nr_irqs, i;
  421. control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
  422. nr_irqs = (control >> 2) & 0x3ff;
  423. for (i = 0; i < nr_irqs; i++)
  424. writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
  425. /* Disable all IPIs */
  426. writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
  427. /* Clear pending IPIs */
  428. writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
  429. /* Unmask IPI interrupt */
  430. writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  431. }
  432. static void armada_xp_mpic_reenable_percpu(void)
  433. {
  434. unsigned int irq;
  435. /* Re-enable per-CPU interrupts that were enabled before suspend */
  436. for (irq = 0; irq < ARMADA_370_XP_MAX_PER_CPU_IRQS; irq++) {
  437. struct irq_data *data;
  438. int virq;
  439. virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
  440. if (virq == 0)
  441. continue;
  442. data = irq_get_irq_data(virq);
  443. if (!irq_percpu_is_enabled(virq))
  444. continue;
  445. armada_370_xp_irq_unmask(data);
  446. }
  447. ipi_resume();
  448. armada_370_xp_msi_reenable_percpu();
  449. }
  450. static int armada_xp_mpic_starting_cpu(unsigned int cpu)
  451. {
  452. armada_xp_mpic_perf_init();
  453. armada_xp_mpic_smp_cpu_init();
  454. armada_xp_mpic_reenable_percpu();
  455. return 0;
  456. }
  457. static int mpic_cascaded_starting_cpu(unsigned int cpu)
  458. {
  459. armada_xp_mpic_perf_init();
  460. armada_xp_mpic_reenable_percpu();
  461. enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
  462. return 0;
  463. }
  464. #else
  465. static void armada_xp_mpic_smp_cpu_init(void) {}
  466. static void ipi_resume(void) {}
  467. #endif
  468. static struct irq_chip armada_370_xp_irq_chip = {
  469. .name = "MPIC",
  470. .irq_mask = armada_370_xp_irq_mask,
  471. .irq_mask_ack = armada_370_xp_irq_mask,
  472. .irq_unmask = armada_370_xp_irq_unmask,
  473. #ifdef CONFIG_SMP
  474. .irq_set_affinity = armada_xp_set_affinity,
  475. #endif
  476. .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
  477. };
  478. static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
  479. unsigned int virq, irq_hw_number_t hw)
  480. {
  481. armada_370_xp_irq_mask(irq_get_irq_data(virq));
  482. if (!is_percpu_irq(hw))
  483. writel(hw, per_cpu_int_base +
  484. ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  485. else
  486. writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
  487. irq_set_status_flags(virq, IRQ_LEVEL);
  488. if (is_percpu_irq(hw)) {
  489. irq_set_percpu_devid(virq);
  490. irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
  491. handle_percpu_devid_irq);
  492. } else {
  493. irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
  494. handle_level_irq);
  495. irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
  496. }
  497. irq_set_probe(virq);
  498. return 0;
  499. }
  500. static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
  501. .map = armada_370_xp_mpic_irq_map,
  502. .xlate = irq_domain_xlate_onecell,
  503. };
  504. #ifdef CONFIG_PCI_MSI
  505. static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
  506. {
  507. u32 msimask, msinr;
  508. msimask = readl_relaxed(per_cpu_int_base +
  509. ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
  510. & PCI_MSI_DOORBELL_MASK;
  511. writel(~msimask, per_cpu_int_base +
  512. ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
  513. for (msinr = PCI_MSI_DOORBELL_START;
  514. msinr < PCI_MSI_DOORBELL_END; msinr++) {
  515. unsigned int irq;
  516. if (!(msimask & BIT(msinr)))
  517. continue;
  518. irq = msinr - PCI_MSI_DOORBELL_START;
  519. generic_handle_domain_irq(armada_370_xp_msi_inner_domain, irq);
  520. }
  521. }
  522. #else
  523. static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
  524. #endif
  525. static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
  526. {
  527. struct irq_chip *chip = irq_desc_get_chip(desc);
  528. unsigned long irqmap, irqn, irqsrc, cpuid;
  529. chained_irq_enter(chip, desc);
  530. irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
  531. cpuid = cpu_logical_map(smp_processor_id());
  532. for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
  533. irqsrc = readl_relaxed(main_int_base +
  534. ARMADA_370_XP_INT_SOURCE_CTL(irqn));
  535. /* Check if the interrupt is not masked on current CPU.
  536. * Test IRQ (0-1) and FIQ (8-9) mask bits.
  537. */
  538. if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
  539. continue;
  540. if (irqn == 1) {
  541. armada_370_xp_handle_msi_irq(NULL, true);
  542. continue;
  543. }
  544. generic_handle_domain_irq(armada_370_xp_mpic_domain, irqn);
  545. }
  546. chained_irq_exit(chip, desc);
  547. }
  548. static void __exception_irq_entry
  549. armada_370_xp_handle_irq(struct pt_regs *regs)
  550. {
  551. u32 irqstat, irqnr;
  552. do {
  553. irqstat = readl_relaxed(per_cpu_int_base +
  554. ARMADA_370_XP_CPU_INTACK_OFFS);
  555. irqnr = irqstat & 0x3FF;
  556. if (irqnr > 1022)
  557. break;
  558. if (irqnr > 1) {
  559. generic_handle_domain_irq(armada_370_xp_mpic_domain,
  560. irqnr);
  561. continue;
  562. }
  563. /* MSI handling */
  564. if (irqnr == 1)
  565. armada_370_xp_handle_msi_irq(regs, false);
  566. #ifdef CONFIG_SMP
  567. /* IPI Handling */
  568. if (irqnr == 0) {
  569. unsigned long ipimask;
  570. int ipi;
  571. ipimask = readl_relaxed(per_cpu_int_base +
  572. ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
  573. & IPI_DOORBELL_MASK;
  574. for_each_set_bit(ipi, &ipimask, IPI_DOORBELL_END)
  575. generic_handle_domain_irq(ipi_domain, ipi);
  576. }
  577. #endif
  578. } while (1);
  579. }
  580. static int armada_370_xp_mpic_suspend(void)
  581. {
  582. doorbell_mask_reg = readl(per_cpu_int_base +
  583. ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
  584. return 0;
  585. }
  586. static void armada_370_xp_mpic_resume(void)
  587. {
  588. int nirqs;
  589. irq_hw_number_t irq;
  590. /* Re-enable interrupts */
  591. nirqs = (readl(main_int_base + ARMADA_370_XP_INT_CONTROL) >> 2) & 0x3ff;
  592. for (irq = 0; irq < nirqs; irq++) {
  593. struct irq_data *data;
  594. int virq;
  595. virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
  596. if (virq == 0)
  597. continue;
  598. data = irq_get_irq_data(virq);
  599. if (!is_percpu_irq(irq)) {
  600. /* Non per-CPU interrupts */
  601. writel(irq, per_cpu_int_base +
  602. ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  603. if (!irqd_irq_disabled(data))
  604. armada_370_xp_irq_unmask(data);
  605. } else {
  606. /* Per-CPU interrupts */
  607. writel(irq, main_int_base +
  608. ARMADA_370_XP_INT_SET_ENABLE_OFFS);
  609. /*
  610. * Re-enable on the current CPU,
  611. * armada_xp_mpic_reenable_percpu() will take
  612. * care of secondary CPUs when they come up.
  613. */
  614. if (irq_percpu_is_enabled(virq))
  615. armada_370_xp_irq_unmask(data);
  616. }
  617. }
  618. /* Reconfigure doorbells for IPIs and MSIs */
  619. writel(doorbell_mask_reg,
  620. per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
  621. if (doorbell_mask_reg & IPI_DOORBELL_MASK)
  622. writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  623. if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
  624. writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
  625. ipi_resume();
  626. }
  627. static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
  628. .suspend = armada_370_xp_mpic_suspend,
  629. .resume = armada_370_xp_mpic_resume,
  630. };
  631. static int __init armada_370_xp_mpic_of_init(struct device_node *node,
  632. struct device_node *parent)
  633. {
  634. struct resource main_int_res, per_cpu_int_res;
  635. int nr_irqs, i;
  636. u32 control;
  637. BUG_ON(of_address_to_resource(node, 0, &main_int_res));
  638. BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
  639. BUG_ON(!request_mem_region(main_int_res.start,
  640. resource_size(&main_int_res),
  641. node->full_name));
  642. BUG_ON(!request_mem_region(per_cpu_int_res.start,
  643. resource_size(&per_cpu_int_res),
  644. node->full_name));
  645. main_int_base = ioremap(main_int_res.start,
  646. resource_size(&main_int_res));
  647. BUG_ON(!main_int_base);
  648. per_cpu_int_base = ioremap(per_cpu_int_res.start,
  649. resource_size(&per_cpu_int_res));
  650. BUG_ON(!per_cpu_int_base);
  651. control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
  652. nr_irqs = (control >> 2) & 0x3ff;
  653. for (i = 0; i < nr_irqs; i++)
  654. writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
  655. armada_370_xp_mpic_domain =
  656. irq_domain_add_linear(node, nr_irqs,
  657. &armada_370_xp_mpic_irq_ops, NULL);
  658. BUG_ON(!armada_370_xp_mpic_domain);
  659. irq_domain_update_bus_token(armada_370_xp_mpic_domain, DOMAIN_BUS_WIRED);
  660. /* Setup for the boot CPU */
  661. armada_xp_mpic_perf_init();
  662. armada_xp_mpic_smp_cpu_init();
  663. armada_370_xp_msi_init(node, main_int_res.start);
  664. parent_irq = irq_of_parse_and_map(node, 0);
  665. if (parent_irq <= 0) {
  666. irq_set_default_host(armada_370_xp_mpic_domain);
  667. set_handle_irq(armada_370_xp_handle_irq);
  668. #ifdef CONFIG_SMP
  669. armada_xp_ipi_init(node);
  670. cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
  671. "irqchip/armada/ipi:starting",
  672. armada_xp_mpic_starting_cpu, NULL);
  673. #endif
  674. } else {
  675. #ifdef CONFIG_SMP
  676. cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
  677. "irqchip/armada/cascade:starting",
  678. mpic_cascaded_starting_cpu, NULL);
  679. #endif
  680. irq_set_chained_handler(parent_irq,
  681. armada_370_xp_mpic_handle_cascade_irq);
  682. }
  683. register_syscore_ops(&armada_370_xp_mpic_syscore_ops);
  684. return 0;
  685. }
  686. IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);