pcie-iproc-msi.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015 Broadcom Corporation
  4. */
  5. #include <linux/interrupt.h>
  6. #include <linux/irqchip/chained_irq.h>
  7. #include <linux/irqdomain.h>
  8. #include <linux/msi.h>
  9. #include <linux/of_irq.h>
  10. #include <linux/of_pci.h>
  11. #include <linux/pci.h>
  12. #include "pcie-iproc.h"
  13. #define IPROC_MSI_INTR_EN_SHIFT 11
  14. #define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT)
  15. #define IPROC_MSI_INT_N_EVENT_SHIFT 1
  16. #define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT)
  17. #define IPROC_MSI_EQ_EN_SHIFT 0
  18. #define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT)
  19. #define IPROC_MSI_EQ_MASK 0x3f
  20. /* Max number of GIC interrupts */
  21. #define NR_HW_IRQS 6
  22. /* Number of entries in each event queue */
  23. #define EQ_LEN 64
  24. /* Size of each event queue memory region */
  25. #define EQ_MEM_REGION_SIZE SZ_4K
  26. /* Size of each MSI address region */
  27. #define MSI_MEM_REGION_SIZE SZ_4K
  28. enum iproc_msi_reg {
  29. IPROC_MSI_EQ_PAGE = 0,
  30. IPROC_MSI_EQ_PAGE_UPPER,
  31. IPROC_MSI_PAGE,
  32. IPROC_MSI_PAGE_UPPER,
  33. IPROC_MSI_CTRL,
  34. IPROC_MSI_EQ_HEAD,
  35. IPROC_MSI_EQ_TAIL,
  36. IPROC_MSI_INTS_EN,
  37. IPROC_MSI_REG_SIZE,
  38. };
  39. struct iproc_msi;
  40. /**
  41. * struct iproc_msi_grp - iProc MSI group
  42. *
  43. * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI
  44. * event queue.
  45. *
  46. * @msi: pointer to iProc MSI data
  47. * @gic_irq: GIC interrupt
  48. * @eq: Event queue number
  49. */
  50. struct iproc_msi_grp {
  51. struct iproc_msi *msi;
  52. int gic_irq;
  53. unsigned int eq;
  54. };
  55. /**
  56. * struct iproc_msi - iProc event queue based MSI
  57. *
  58. * Only meant to be used on platforms without MSI support integrated into the
  59. * GIC.
  60. *
  61. * @pcie: pointer to iProc PCIe data
  62. * @reg_offsets: MSI register offsets
  63. * @grps: MSI groups
  64. * @nr_irqs: number of total interrupts connected to GIC
  65. * @nr_cpus: number of toal CPUs
  66. * @has_inten_reg: indicates the MSI interrupt enable register needs to be
  67. * set explicitly (required for some legacy platforms)
  68. * @bitmap: MSI vector bitmap
  69. * @bitmap_lock: lock to protect access to the MSI bitmap
  70. * @nr_msi_vecs: total number of MSI vectors
  71. * @inner_domain: inner IRQ domain
  72. * @msi_domain: MSI IRQ domain
  73. * @nr_eq_region: required number of 4K aligned memory region for MSI event
  74. * queues
  75. * @nr_msi_region: required number of 4K aligned address region for MSI posted
  76. * writes
  77. * @eq_cpu: pointer to allocated memory region for MSI event queues
  78. * @eq_dma: DMA address of MSI event queues
  79. * @msi_addr: MSI address
  80. */
  81. struct iproc_msi {
  82. struct iproc_pcie *pcie;
  83. const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE];
  84. struct iproc_msi_grp *grps;
  85. int nr_irqs;
  86. int nr_cpus;
  87. bool has_inten_reg;
  88. unsigned long *bitmap;
  89. struct mutex bitmap_lock;
  90. unsigned int nr_msi_vecs;
  91. struct irq_domain *inner_domain;
  92. struct irq_domain *msi_domain;
  93. unsigned int nr_eq_region;
  94. unsigned int nr_msi_region;
  95. void *eq_cpu;
  96. dma_addr_t eq_dma;
  97. phys_addr_t msi_addr;
  98. };
  99. static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
  100. { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 },
  101. { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 },
  102. { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 },
  103. { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 },
  104. { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 },
  105. { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 },
  106. };
  107. static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = {
  108. { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 },
  109. { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 },
  110. { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 },
  111. { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c },
  112. };
  113. static inline u32 iproc_msi_read_reg(struct iproc_msi *msi,
  114. enum iproc_msi_reg reg,
  115. unsigned int eq)
  116. {
  117. struct iproc_pcie *pcie = msi->pcie;
  118. return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]);
  119. }
  120. static inline void iproc_msi_write_reg(struct iproc_msi *msi,
  121. enum iproc_msi_reg reg,
  122. int eq, u32 val)
  123. {
  124. struct iproc_pcie *pcie = msi->pcie;
  125. writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]);
  126. }
  127. static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq)
  128. {
  129. return (hwirq % msi->nr_irqs);
  130. }
  131. static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi,
  132. unsigned long hwirq)
  133. {
  134. if (msi->nr_msi_region > 1)
  135. return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE;
  136. else
  137. return hwirq_to_group(msi, hwirq) * sizeof(u32);
  138. }
  139. static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
  140. {
  141. if (msi->nr_eq_region > 1)
  142. return eq * EQ_MEM_REGION_SIZE;
  143. else
  144. return eq * EQ_LEN * sizeof(u32);
  145. }
  146. static struct irq_chip iproc_msi_irq_chip = {
  147. .name = "iProc-MSI",
  148. };
  149. static struct msi_domain_info iproc_msi_domain_info = {
  150. .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  151. MSI_FLAG_PCI_MSIX,
  152. .chip = &iproc_msi_irq_chip,
  153. };
  154. /*
  155. * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
  156. * dedicated event queue. Each MSI group can support up to 64 MSI vectors.
  157. *
  158. * The number of MSI groups varies between different iProc SoCs. The total
  159. * number of CPU cores also varies. To support MSI IRQ affinity, we
  160. * distribute GIC interrupts across all available CPUs. MSI vector is moved
  161. * from one GIC interrupt to another to steer to the target CPU.
  162. *
  163. * Assuming:
  164. * - the number of MSI groups is M
  165. * - the number of CPU cores is N
  166. * - M is always a multiple of N
  167. *
  168. * Total number of raw MSI vectors = M * 64
  169. * Total number of supported MSI vectors = (M * 64) / N
  170. */
  171. static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq)
  172. {
  173. return (hwirq % msi->nr_cpus);
  174. }
  175. static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi,
  176. unsigned long hwirq)
  177. {
  178. return (hwirq - hwirq_to_cpu(msi, hwirq));
  179. }
  180. static int iproc_msi_irq_set_affinity(struct irq_data *data,
  181. const struct cpumask *mask, bool force)
  182. {
  183. struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
  184. int target_cpu = cpumask_first(mask);
  185. int curr_cpu;
  186. int ret;
  187. curr_cpu = hwirq_to_cpu(msi, data->hwirq);
  188. if (curr_cpu == target_cpu)
  189. ret = IRQ_SET_MASK_OK_DONE;
  190. else {
  191. /* steer MSI to the target CPU */
  192. data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu;
  193. ret = IRQ_SET_MASK_OK;
  194. }
  195. irq_data_update_effective_affinity(data, cpumask_of(target_cpu));
  196. return ret;
  197. }
  198. static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
  199. struct msi_msg *msg)
  200. {
  201. struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
  202. dma_addr_t addr;
  203. addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq);
  204. msg->address_lo = lower_32_bits(addr);
  205. msg->address_hi = upper_32_bits(addr);
  206. msg->data = data->hwirq << 5;
  207. }
  208. static struct irq_chip iproc_msi_bottom_irq_chip = {
  209. .name = "MSI",
  210. .irq_set_affinity = iproc_msi_irq_set_affinity,
  211. .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg,
  212. };
  213. static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
  214. unsigned int virq, unsigned int nr_irqs,
  215. void *args)
  216. {
  217. struct iproc_msi *msi = domain->host_data;
  218. int hwirq, i;
  219. if (msi->nr_cpus > 1 && nr_irqs > 1)
  220. return -EINVAL;
  221. mutex_lock(&msi->bitmap_lock);
  222. /*
  223. * Allocate 'nr_irqs' multiplied by 'nr_cpus' number of MSI vectors
  224. * each time
  225. */
  226. hwirq = bitmap_find_free_region(msi->bitmap, msi->nr_msi_vecs,
  227. order_base_2(msi->nr_cpus * nr_irqs));
  228. mutex_unlock(&msi->bitmap_lock);
  229. if (hwirq < 0)
  230. return -ENOSPC;
  231. for (i = 0; i < nr_irqs; i++) {
  232. irq_domain_set_info(domain, virq + i, hwirq + i,
  233. &iproc_msi_bottom_irq_chip,
  234. domain->host_data, handle_simple_irq,
  235. NULL, NULL);
  236. }
  237. return 0;
  238. }
  239. static void iproc_msi_irq_domain_free(struct irq_domain *domain,
  240. unsigned int virq, unsigned int nr_irqs)
  241. {
  242. struct irq_data *data = irq_domain_get_irq_data(domain, virq);
  243. struct iproc_msi *msi = irq_data_get_irq_chip_data(data);
  244. unsigned int hwirq;
  245. mutex_lock(&msi->bitmap_lock);
  246. hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq);
  247. bitmap_release_region(msi->bitmap, hwirq,
  248. order_base_2(msi->nr_cpus * nr_irqs));
  249. mutex_unlock(&msi->bitmap_lock);
  250. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  251. }
  252. static const struct irq_domain_ops msi_domain_ops = {
  253. .alloc = iproc_msi_irq_domain_alloc,
  254. .free = iproc_msi_irq_domain_free,
  255. };
  256. static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
  257. {
  258. u32 __iomem *msg;
  259. u32 hwirq;
  260. unsigned int offs;
  261. offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
  262. msg = (u32 __iomem *)(msi->eq_cpu + offs);
  263. hwirq = readl(msg);
  264. hwirq = (hwirq >> 5) + (hwirq & 0x1f);
  265. /*
  266. * Since we have multiple hwirq mapped to a single MSI vector,
  267. * now we need to derive the hwirq at CPU0. It can then be used to
  268. * mapped back to virq.
  269. */
  270. return hwirq_to_canonical_hwirq(msi, hwirq);
  271. }
  272. static void iproc_msi_handler(struct irq_desc *desc)
  273. {
  274. struct irq_chip *chip = irq_desc_get_chip(desc);
  275. struct iproc_msi_grp *grp;
  276. struct iproc_msi *msi;
  277. u32 eq, head, tail, nr_events;
  278. unsigned long hwirq;
  279. chained_irq_enter(chip, desc);
  280. grp = irq_desc_get_handler_data(desc);
  281. msi = grp->msi;
  282. eq = grp->eq;
  283. /*
  284. * iProc MSI event queue is tracked by head and tail pointers. Head
  285. * pointer indicates the next entry (MSI data) to be consumed by SW in
  286. * the queue and needs to be updated by SW. iProc MSI core uses the
  287. * tail pointer as the next data insertion point.
  288. *
  289. * Entries between head and tail pointers contain valid MSI data. MSI
  290. * data is guaranteed to be in the event queue memory before the tail
  291. * pointer is updated by the iProc MSI core.
  292. */
  293. head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD,
  294. eq) & IPROC_MSI_EQ_MASK;
  295. do {
  296. tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL,
  297. eq) & IPROC_MSI_EQ_MASK;
  298. /*
  299. * Figure out total number of events (MSI data) to be
  300. * processed.
  301. */
  302. nr_events = (tail < head) ?
  303. (EQ_LEN - (head - tail)) : (tail - head);
  304. if (!nr_events)
  305. break;
  306. /* process all outstanding events */
  307. while (nr_events--) {
  308. hwirq = decode_msi_hwirq(msi, eq, head);
  309. generic_handle_domain_irq(msi->inner_domain, hwirq);
  310. head++;
  311. head %= EQ_LEN;
  312. }
  313. /*
  314. * Now all outstanding events have been processed. Update the
  315. * head pointer.
  316. */
  317. iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head);
  318. /*
  319. * Now go read the tail pointer again to see if there are new
  320. * outstanding events that came in during the above window.
  321. */
  322. } while (true);
  323. chained_irq_exit(chip, desc);
  324. }
  325. static void iproc_msi_enable(struct iproc_msi *msi)
  326. {
  327. int i, eq;
  328. u32 val;
  329. /* Program memory region for each event queue */
  330. for (i = 0; i < msi->nr_eq_region; i++) {
  331. dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE);
  332. iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i,
  333. lower_32_bits(addr));
  334. iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i,
  335. upper_32_bits(addr));
  336. }
  337. /* Program address region for MSI posted writes */
  338. for (i = 0; i < msi->nr_msi_region; i++) {
  339. phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE);
  340. iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i,
  341. lower_32_bits(addr));
  342. iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i,
  343. upper_32_bits(addr));
  344. }
  345. for (eq = 0; eq < msi->nr_irqs; eq++) {
  346. /* Enable MSI event queue */
  347. val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
  348. IPROC_MSI_EQ_EN;
  349. iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
  350. /*
  351. * Some legacy platforms require the MSI interrupt enable
  352. * register to be set explicitly.
  353. */
  354. if (msi->has_inten_reg) {
  355. val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
  356. val |= BIT(eq);
  357. iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
  358. }
  359. }
  360. }
  361. static void iproc_msi_disable(struct iproc_msi *msi)
  362. {
  363. u32 eq, val;
  364. for (eq = 0; eq < msi->nr_irqs; eq++) {
  365. if (msi->has_inten_reg) {
  366. val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq);
  367. val &= ~BIT(eq);
  368. iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val);
  369. }
  370. val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq);
  371. val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT |
  372. IPROC_MSI_EQ_EN);
  373. iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val);
  374. }
  375. }
  376. static int iproc_msi_alloc_domains(struct device_node *node,
  377. struct iproc_msi *msi)
  378. {
  379. msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
  380. &msi_domain_ops, msi);
  381. if (!msi->inner_domain)
  382. return -ENOMEM;
  383. msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
  384. &iproc_msi_domain_info,
  385. msi->inner_domain);
  386. if (!msi->msi_domain) {
  387. irq_domain_remove(msi->inner_domain);
  388. return -ENOMEM;
  389. }
  390. return 0;
  391. }
  392. static void iproc_msi_free_domains(struct iproc_msi *msi)
  393. {
  394. if (msi->msi_domain)
  395. irq_domain_remove(msi->msi_domain);
  396. if (msi->inner_domain)
  397. irq_domain_remove(msi->inner_domain);
  398. }
  399. static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu)
  400. {
  401. int i;
  402. for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
  403. irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
  404. NULL, NULL);
  405. }
  406. }
  407. static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu)
  408. {
  409. int i, ret;
  410. cpumask_var_t mask;
  411. struct iproc_pcie *pcie = msi->pcie;
  412. for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) {
  413. irq_set_chained_handler_and_data(msi->grps[i].gic_irq,
  414. iproc_msi_handler,
  415. &msi->grps[i]);
  416. /* Dedicate GIC interrupt to each CPU core */
  417. if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
  418. cpumask_clear(mask);
  419. cpumask_set_cpu(cpu, mask);
  420. ret = irq_set_affinity(msi->grps[i].gic_irq, mask);
  421. if (ret)
  422. dev_err(pcie->dev,
  423. "failed to set affinity for IRQ%d\n",
  424. msi->grps[i].gic_irq);
  425. free_cpumask_var(mask);
  426. } else {
  427. dev_err(pcie->dev, "failed to alloc CPU mask\n");
  428. ret = -EINVAL;
  429. }
  430. if (ret) {
  431. /* Free all configured/unconfigured IRQs */
  432. iproc_msi_irq_free(msi, cpu);
  433. return ret;
  434. }
  435. }
  436. return 0;
  437. }
  438. int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
  439. {
  440. struct iproc_msi *msi;
  441. int i, ret;
  442. unsigned int cpu;
  443. if (!of_device_is_compatible(node, "brcm,iproc-msi"))
  444. return -ENODEV;
  445. if (!of_find_property(node, "msi-controller", NULL))
  446. return -ENODEV;
  447. if (pcie->msi)
  448. return -EBUSY;
  449. msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL);
  450. if (!msi)
  451. return -ENOMEM;
  452. msi->pcie = pcie;
  453. pcie->msi = msi;
  454. msi->msi_addr = pcie->base_addr;
  455. mutex_init(&msi->bitmap_lock);
  456. msi->nr_cpus = num_possible_cpus();
  457. if (msi->nr_cpus == 1)
  458. iproc_msi_domain_info.flags |= MSI_FLAG_MULTI_PCI_MSI;
  459. msi->nr_irqs = of_irq_count(node);
  460. if (!msi->nr_irqs) {
  461. dev_err(pcie->dev, "found no MSI GIC interrupt\n");
  462. return -ENODEV;
  463. }
  464. if (msi->nr_irqs > NR_HW_IRQS) {
  465. dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n",
  466. msi->nr_irqs);
  467. msi->nr_irqs = NR_HW_IRQS;
  468. }
  469. if (msi->nr_irqs < msi->nr_cpus) {
  470. dev_err(pcie->dev,
  471. "not enough GIC interrupts for MSI affinity\n");
  472. return -EINVAL;
  473. }
  474. if (msi->nr_irqs % msi->nr_cpus != 0) {
  475. msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus;
  476. dev_warn(pcie->dev, "Reducing number of interrupts to %d\n",
  477. msi->nr_irqs);
  478. }
  479. switch (pcie->type) {
  480. case IPROC_PCIE_PAXB_BCMA:
  481. case IPROC_PCIE_PAXB:
  482. msi->reg_offsets = iproc_msi_reg_paxb;
  483. msi->nr_eq_region = 1;
  484. msi->nr_msi_region = 1;
  485. break;
  486. case IPROC_PCIE_PAXC:
  487. msi->reg_offsets = iproc_msi_reg_paxc;
  488. msi->nr_eq_region = msi->nr_irqs;
  489. msi->nr_msi_region = msi->nr_irqs;
  490. break;
  491. default:
  492. dev_err(pcie->dev, "incompatible iProc PCIe interface\n");
  493. return -EINVAL;
  494. }
  495. if (of_find_property(node, "brcm,pcie-msi-inten", NULL))
  496. msi->has_inten_reg = true;
  497. msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
  498. msi->bitmap = devm_bitmap_zalloc(pcie->dev, msi->nr_msi_vecs,
  499. GFP_KERNEL);
  500. if (!msi->bitmap)
  501. return -ENOMEM;
  502. msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps),
  503. GFP_KERNEL);
  504. if (!msi->grps)
  505. return -ENOMEM;
  506. for (i = 0; i < msi->nr_irqs; i++) {
  507. unsigned int irq = irq_of_parse_and_map(node, i);
  508. if (!irq) {
  509. dev_err(pcie->dev, "unable to parse/map interrupt\n");
  510. ret = -ENODEV;
  511. goto free_irqs;
  512. }
  513. msi->grps[i].gic_irq = irq;
  514. msi->grps[i].msi = msi;
  515. msi->grps[i].eq = i;
  516. }
  517. /* Reserve memory for event queue and make sure memories are zeroed */
  518. msi->eq_cpu = dma_alloc_coherent(pcie->dev,
  519. msi->nr_eq_region * EQ_MEM_REGION_SIZE,
  520. &msi->eq_dma, GFP_KERNEL);
  521. if (!msi->eq_cpu) {
  522. ret = -ENOMEM;
  523. goto free_irqs;
  524. }
  525. ret = iproc_msi_alloc_domains(node, msi);
  526. if (ret) {
  527. dev_err(pcie->dev, "failed to create MSI domains\n");
  528. goto free_eq_dma;
  529. }
  530. for_each_online_cpu(cpu) {
  531. ret = iproc_msi_irq_setup(msi, cpu);
  532. if (ret)
  533. goto free_msi_irq;
  534. }
  535. iproc_msi_enable(msi);
  536. return 0;
  537. free_msi_irq:
  538. for_each_online_cpu(cpu)
  539. iproc_msi_irq_free(msi, cpu);
  540. iproc_msi_free_domains(msi);
  541. free_eq_dma:
  542. dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
  543. msi->eq_cpu, msi->eq_dma);
  544. free_irqs:
  545. for (i = 0; i < msi->nr_irqs; i++) {
  546. if (msi->grps[i].gic_irq)
  547. irq_dispose_mapping(msi->grps[i].gic_irq);
  548. }
  549. pcie->msi = NULL;
  550. return ret;
  551. }
  552. EXPORT_SYMBOL(iproc_msi_init);
  553. void iproc_msi_exit(struct iproc_pcie *pcie)
  554. {
  555. struct iproc_msi *msi = pcie->msi;
  556. unsigned int i, cpu;
  557. if (!msi)
  558. return;
  559. iproc_msi_disable(msi);
  560. for_each_online_cpu(cpu)
  561. iproc_msi_irq_free(msi, cpu);
  562. iproc_msi_free_domains(msi);
  563. dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE,
  564. msi->eq_cpu, msi->eq_dma);
  565. for (i = 0; i < msi->nr_irqs; i++) {
  566. if (msi->grps[i].gic_irq)
  567. irq_dispose_mapping(msi->grps[i].gic_irq);
  568. }
  569. }
  570. EXPORT_SYMBOL(iproc_msi_exit);