pci-msm-msi.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */
  3. #include <linux/interrupt.h>
  4. #include <linux/iommu.h>
  5. #include <linux/ipc_logging.h>
  6. #include <linux/irqchip/chained_irq.h>
  7. #include <linux/irqdomain.h>
  8. #include <linux/msi.h>
  9. #include <linux/msm_pcie.h>
  10. #include <linux/of_address.h>
  11. #include <linux/of_irq.h>
  12. #include <linux/of_pci.h>
  13. #include <linux/pci.h>
  14. #include <linux/platform_device.h>
  15. #define PCIE_MSI_CTRL_BASE (0x820)
  16. #define PCIE_MSI_CTRL_ADDR_OFFS (PCIE_MSI_CTRL_BASE)
  17. #define PCIE_MSI_CTRL_UPPER_ADDR_OFFS (PCIE_MSI_CTRL_BASE + 0x4)
  18. #define PCIE_MSI_CTRL_INT_N_EN_OFFS(n) (PCIE_MSI_CTRL_BASE + 0x8 + 0xc * (n))
  19. #define PCIE_MSI_CTRL_INT_N_MASK_OFFS(n) (PCIE_MSI_CTRL_BASE + 0xc + 0xc * (n))
  20. #define PCIE_MSI_CTRL_INT_N_STATUS_OFFS(n) \
  21. (PCIE_MSI_CTRL_BASE + 0x10 + 0xc * (n))
  22. #define MSI_IRQ_PER_GRP (32)
  23. enum msi_type {
  24. MSM_MSI_TYPE_QCOM,
  25. MSM_MSI_TYPE_SNPS,
  26. };
  27. struct msm_msi_irq {
  28. struct msm_msi_client *client;
  29. struct msm_msi_grp *grp; /* group the irq belongs to */
  30. u32 grp_index; /* index in the group */
  31. unsigned int hwirq; /* MSI controller hwirq */
  32. unsigned int virq; /* MSI controller virq */
  33. u32 pos; /* position in MSI bitmap */
  34. };
  35. struct msm_msi_grp {
  36. /* registers for SNPS only */
  37. void __iomem *int_en_reg;
  38. void __iomem *int_mask_reg;
  39. void __iomem *int_status_reg;
  40. u32 mask; /* tracks masked/unmasked MSI */
  41. struct msm_msi_irq irqs[MSI_IRQ_PER_GRP];
  42. };
  43. struct msm_msi {
  44. struct list_head clients;
  45. struct device *dev;
  46. struct device_node *of_node;
  47. int nr_hwirqs;
  48. int nr_virqs;
  49. int nr_grps;
  50. struct msm_msi_grp *grps;
  51. unsigned long *bitmap; /* tracks used/unused MSI */
  52. struct mutex mutex; /* mutex for modifying MSI client list and bitmap */
  53. struct irq_domain *inner_domain; /* parent domain; gen irq related */
  54. struct irq_domain *msi_domain; /* child domain; pci related */
  55. phys_addr_t msi_addr;
  56. u32 msi_addr_size;
  57. enum msi_type type;
  58. spinlock_t cfg_lock; /* lock for configuring Synopsys MSI registers */
  59. bool cfg_access; /* control access to MSI registers */
  60. void __iomem *pcie_cfg;
  61. void (*mask_irq)(struct irq_data *data);
  62. void (*unmask_irq)(struct irq_data *data);
  63. };
  64. /* structure for each client of MSI controller */
  65. struct msm_msi_client {
  66. struct list_head node;
  67. struct msm_msi *msi;
  68. struct device *dev; /* client's dev of pci_dev */
  69. u32 nr_irqs; /* nr_irqs allocated for client */
  70. dma_addr_t msi_addr;
  71. };
  72. static void msm_msi_snps_handler(struct irq_desc *desc)
  73. {
  74. struct irq_chip *chip = irq_desc_get_chip(desc);
  75. struct msm_msi_grp *msi_grp;
  76. int i;
  77. u32 status, mask;
  78. chained_irq_enter(chip, desc);
  79. msi_grp = irq_desc_get_handler_data(desc);
  80. status = readl_relaxed(msi_grp->int_status_reg);
  81. /* always update the mask set in msm_msi_snps_mask_irq */
  82. mask = msi_grp->mask;
  83. writel_relaxed(mask, msi_grp->int_mask_reg);
  84. /* process only interrupts which are not masked */
  85. status ^= (status & mask);
  86. writel_relaxed(status, msi_grp->int_status_reg);
  87. for (i = 0; status; i++, status >>= 1)
  88. if (status & 0x1)
  89. generic_handle_irq(msi_grp->irqs[i].virq);
  90. chained_irq_exit(chip, desc);
  91. }
  92. static void msm_msi_qgic_handler(struct irq_desc *desc)
  93. {
  94. struct irq_chip *chip = irq_desc_get_chip(desc);
  95. struct msm_msi *msi;
  96. unsigned int virq;
  97. chained_irq_enter(chip, desc);
  98. msi = irq_desc_get_handler_data(desc);
  99. virq = irq_find_mapping(msi->inner_domain, irq_desc_get_irq(desc));
  100. generic_handle_irq(virq);
  101. chained_irq_exit(chip, desc);
  102. }
  103. static void msm_msi_snps_mask_irq(struct irq_data *data)
  104. {
  105. struct msm_msi_irq *msi_irq = irq_data_get_irq_chip_data(data);
  106. struct msm_msi_grp *msi_grp = msi_irq->grp;
  107. struct msm_msi *msi = msi_irq->client->msi;
  108. unsigned long flags;
  109. spin_lock_irqsave(&msi->cfg_lock, flags);
  110. msi_grp->mask |= BIT(msi_irq->grp_index);
  111. spin_unlock_irqrestore(&msi->cfg_lock, flags);
  112. }
  113. static void msm_msi_qgic_mask_irq(struct irq_data *data)
  114. {
  115. struct irq_data *parent_data;
  116. parent_data = irq_get_irq_data(irqd_to_hwirq(data));
  117. if (!parent_data || !parent_data->chip)
  118. return;
  119. parent_data->chip->irq_mask(parent_data);
  120. }
  121. static void msm_msi_mask_irq(struct irq_data *data)
  122. {
  123. struct irq_data *parent_data;
  124. struct msm_msi_irq *msi_irq;
  125. struct msm_msi *msi;
  126. unsigned long flags;
  127. parent_data = data->parent_data;
  128. if (!parent_data)
  129. return;
  130. msi_irq = irq_data_get_irq_chip_data(parent_data);
  131. msi = msi_irq->client->msi;
  132. spin_lock_irqsave(&msi->cfg_lock, flags);
  133. if (msi->cfg_access)
  134. pci_msi_mask_irq(data);
  135. spin_unlock_irqrestore(&msi->cfg_lock, flags);
  136. msi->mask_irq(parent_data);
  137. }
  138. static void msm_msi_snps_unmask_irq(struct irq_data *data)
  139. {
  140. struct msm_msi_irq *msi_irq = irq_data_get_irq_chip_data(data);
  141. struct msm_msi_grp *msi_grp = msi_irq->grp;
  142. struct msm_msi *msi = msi_irq->client->msi;
  143. unsigned long flags;
  144. spin_lock_irqsave(&msi->cfg_lock, flags);
  145. msi_grp->mask &= ~BIT(msi_irq->grp_index);
  146. if (msi->cfg_access)
  147. writel_relaxed(msi_grp->mask, msi_grp->int_mask_reg);
  148. spin_unlock_irqrestore(&msi->cfg_lock, flags);
  149. }
  150. static void msm_msi_qgic_unmask_irq(struct irq_data *data)
  151. {
  152. struct irq_data *parent_data;
  153. parent_data = irq_get_irq_data(irqd_to_hwirq(data));
  154. if (!parent_data || !parent_data->chip)
  155. return;
  156. parent_data->chip->irq_unmask(parent_data);
  157. }
  158. static void msm_msi_unmask_irq(struct irq_data *data)
  159. {
  160. struct irq_data *parent_data;
  161. struct msm_msi_irq *msi_irq;
  162. struct msm_msi *msi;
  163. unsigned long flags;
  164. parent_data = data->parent_data;
  165. if (!parent_data)
  166. return;
  167. msi_irq = irq_data_get_irq_chip_data(parent_data);
  168. msi = msi_irq->client->msi;
  169. msi->unmask_irq(parent_data);
  170. spin_lock_irqsave(&msi->cfg_lock, flags);
  171. if (msi->cfg_access)
  172. pci_msi_unmask_irq(data);
  173. spin_unlock_irqrestore(&msi->cfg_lock, flags);
  174. }
  175. static struct irq_chip msm_msi_irq_chip = {
  176. .name = "gic_msm_pci_msi",
  177. .irq_enable = msm_msi_unmask_irq,
  178. .irq_disable = msm_msi_mask_irq,
  179. .irq_mask = msm_msi_mask_irq,
  180. .irq_unmask = msm_msi_unmask_irq,
  181. };
  182. static int msm_msi_domain_prepare(struct irq_domain *domain, struct device *dev,
  183. int nvec, msi_alloc_info_t *arg)
  184. {
  185. struct msm_msi *msi = domain->parent->host_data;
  186. struct msm_msi_client *client;
  187. client = kzalloc(sizeof(*client), GFP_KERNEL);
  188. if (!client)
  189. return -ENOMEM;
  190. client->msi = msi;
  191. client->dev = dev;
  192. client->msi_addr = msi->msi_addr;
  193. /*
  194. * Accesses to QGIC MSI doorbell register goes through PCIe SMMU and
  195. * needs to be mapped. Synopsys MSI doorbell is within the PCIe core
  196. * and does not need to be mapped.
  197. */
  198. if (msi->type == MSM_MSI_TYPE_QCOM) {
  199. client->msi_addr = dma_map_resource(client->dev, msi->msi_addr,
  200. msi->msi_addr_size, DMA_FROM_DEVICE, 0);
  201. if (dma_mapping_error(client->dev, client->msi_addr)) {
  202. dev_err(msi->dev, "MSI: failed to map msi address\n");
  203. client->msi_addr = 0;
  204. kfree(client);
  205. return -ENOMEM;
  206. }
  207. }
  208. mutex_lock(&msi->mutex);
  209. list_add_tail(&client->node, &msi->clients);
  210. mutex_unlock(&msi->mutex);
  211. /* zero out struct for framework */
  212. memset(arg, 0, sizeof(*arg));
  213. return 0;
  214. }
  215. static struct msi_domain_ops msm_msi_domain_ops = {
  216. .msi_prepare = msm_msi_domain_prepare,
  217. };
  218. static struct msi_domain_info msm_msi_domain_info = {
  219. .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  220. MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
  221. .ops = &msm_msi_domain_ops,
  222. .chip = &msm_msi_irq_chip,
  223. };
  224. static int msm_msi_irq_set_affinity(struct irq_data *data,
  225. const struct cpumask *mask, bool force)
  226. {
  227. struct irq_data *parent_data = irq_get_irq_data(irqd_to_hwirq(data));
  228. if (!parent_data)
  229. return -ENODEV;
  230. /* set affinity for MSM MSI HW IRQ */
  231. if (parent_data->chip->irq_set_affinity)
  232. return parent_data->chip->irq_set_affinity(parent_data,
  233. mask, force);
  234. return -EINVAL;
  235. }
  236. static void msm_msi_irq_compose_msi_msg(struct irq_data *data,
  237. struct msi_msg *msg)
  238. {
  239. struct msm_msi_irq *msi_irq = irq_data_get_irq_chip_data(data);
  240. struct irq_data *parent_data = irq_get_irq_data(irqd_to_hwirq(data));
  241. struct msm_msi_client *client = msi_irq->client;
  242. struct msm_msi *msi = client->msi;
  243. if (!parent_data)
  244. return;
  245. msg->address_lo = lower_32_bits(client->msi_addr);
  246. msg->address_hi = upper_32_bits(client->msi_addr);
  247. msg->data = (msi->type == MSM_MSI_TYPE_QCOM) ?
  248. irqd_to_hwirq(parent_data) : msi_irq->pos;
  249. }
  250. static struct irq_chip msm_msi_bottom_irq_chip = {
  251. .name = "msm_msi",
  252. .irq_set_affinity = msm_msi_irq_set_affinity,
  253. .irq_compose_msi_msg = msm_msi_irq_compose_msi_msg,
  254. };
  255. static int msm_msi_irq_domain_alloc(struct irq_domain *domain,
  256. unsigned int virq, unsigned int nr_irqs,
  257. void *args)
  258. {
  259. struct msm_msi *msi = domain->host_data;
  260. struct msm_msi_client *tmp, *client = NULL;
  261. struct device *dev = ((msi_alloc_info_t *)args)->desc->dev;
  262. int i, ret = 0;
  263. int pos;
  264. mutex_lock(&msi->mutex);
  265. list_for_each_entry(tmp, &msi->clients, node) {
  266. if (tmp->dev == dev) {
  267. client = tmp;
  268. break;
  269. }
  270. }
  271. if (!client) {
  272. dev_err(msi->dev, "MSI: failed to find client dev\n");
  273. ret = -ENODEV;
  274. goto out;
  275. }
  276. pos = bitmap_find_next_zero_area(msi->bitmap, msi->nr_virqs, 0,
  277. nr_irqs, nr_irqs - 1);
  278. if (pos < msi->nr_virqs) {
  279. bitmap_set(msi->bitmap, pos, nr_irqs);
  280. } else {
  281. ret = -ENOSPC;
  282. goto out;
  283. }
  284. for (i = 0; i < nr_irqs; i++) {
  285. u32 grp = pos / MSI_IRQ_PER_GRP;
  286. u32 index = pos % MSI_IRQ_PER_GRP;
  287. struct msm_msi_irq *msi_irq = &msi->grps[grp].irqs[index];
  288. msi_irq->virq = virq + i;
  289. msi_irq->client = client;
  290. irq_domain_set_info(domain, msi_irq->virq,
  291. msi_irq->hwirq,
  292. &msm_msi_bottom_irq_chip, msi_irq,
  293. handle_simple_irq, NULL, NULL);
  294. client->nr_irqs++;
  295. pos++;
  296. }
  297. out:
  298. mutex_unlock(&msi->mutex);
  299. return ret;
  300. }
  301. static void msm_msi_irq_domain_free(struct irq_domain *domain,
  302. unsigned int virq, unsigned int nr_irqs)
  303. {
  304. struct irq_data *data = irq_domain_get_irq_data(domain, virq);
  305. struct msm_msi_irq *msi_irq;
  306. struct msm_msi_client *client;
  307. struct msm_msi *msi;
  308. if (!data)
  309. return;
  310. msi_irq = irq_data_get_irq_chip_data(data);
  311. client = msi_irq->client;
  312. msi = client->msi;
  313. mutex_lock(&msi->mutex);
  314. bitmap_clear(msi->bitmap, msi_irq->pos, nr_irqs);
  315. client->nr_irqs -= nr_irqs;
  316. if (!client->nr_irqs) {
  317. if (msi->type == MSM_MSI_TYPE_QCOM)
  318. dma_unmap_resource(client->dev, client->msi_addr,
  319. PAGE_SIZE, DMA_FROM_DEVICE, 0);
  320. list_del(&client->node);
  321. kfree(client);
  322. }
  323. mutex_unlock(&msi->mutex);
  324. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  325. }
  326. static const struct irq_domain_ops msi_domain_ops = {
  327. .alloc = msm_msi_irq_domain_alloc,
  328. .free = msm_msi_irq_domain_free,
  329. };
  330. static int msm_msi_alloc_domains(struct msm_msi *msi)
  331. {
  332. msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_virqs,
  333. &msi_domain_ops, msi);
  334. if (!msi->inner_domain) {
  335. dev_err(msi->dev, "MSI: failed to create IRQ domain\n");
  336. return -ENOMEM;
  337. }
  338. msi->msi_domain = pci_msi_create_irq_domain(
  339. of_node_to_fwnode(msi->of_node),
  340. &msm_msi_domain_info,
  341. msi->inner_domain);
  342. if (!msi->msi_domain) {
  343. dev_err(msi->dev, "MSI: failed to create MSI domain\n");
  344. irq_domain_remove(msi->inner_domain);
  345. return -ENOMEM;
  346. }
  347. return 0;
  348. }
  349. static int msm_msi_snps_irq_setup(struct msm_msi *msi)
  350. {
  351. int i, index, ret;
  352. struct msm_msi_grp *msi_grp;
  353. struct msm_msi_irq *msi_irq;
  354. unsigned int irq = 0;
  355. /* setup each MSI group. nr_hwirqs == nr_grps */
  356. for (i = 0; i < msi->nr_hwirqs; i++) {
  357. irq = irq_of_parse_and_map(msi->of_node, i);
  358. if (!irq) {
  359. dev_err(msi->dev,
  360. "MSI: failed to parse/map interrupt\n");
  361. ret = -ENODEV;
  362. goto free_irqs;
  363. }
  364. ret = enable_irq_wake(irq);
  365. if (ret) {
  366. dev_err(msi->dev,
  367. "MSI: Unable to set enable_irq_wake for interrupt: %d: %d\n",
  368. i, irq);
  369. goto free_irq;
  370. }
  371. msi_grp = &msi->grps[i];
  372. msi_grp->int_en_reg = msi->pcie_cfg +
  373. PCIE_MSI_CTRL_INT_N_EN_OFFS(i);
  374. msi_grp->int_mask_reg = msi->pcie_cfg +
  375. PCIE_MSI_CTRL_INT_N_MASK_OFFS(i);
  376. msi_grp->int_status_reg = msi->pcie_cfg +
  377. PCIE_MSI_CTRL_INT_N_STATUS_OFFS(i);
  378. for (index = 0; index < MSI_IRQ_PER_GRP; index++) {
  379. msi_irq = &msi_grp->irqs[index];
  380. msi_irq->grp = msi_grp;
  381. msi_irq->grp_index = index;
  382. msi_irq->pos = (i * MSI_IRQ_PER_GRP) + index;
  383. msi_irq->hwirq = irq;
  384. }
  385. irq_set_chained_handler_and_data(irq, msm_msi_snps_handler,
  386. msi_grp);
  387. }
  388. return 0;
  389. free_irq:
  390. irq_dispose_mapping(irq);
  391. free_irqs:
  392. for (--i; i >= 0; i--) {
  393. irq = msi->grps[i].irqs[0].hwirq;
  394. irq_set_chained_handler_and_data(irq, NULL, NULL);
  395. disable_irq_wake(irq);
  396. irq_dispose_mapping(irq);
  397. }
  398. return ret;
  399. }
  400. static int msm_msi_qgic_irq_setup(struct msm_msi *msi)
  401. {
  402. int i, ret;
  403. u32 index, grp;
  404. struct msm_msi_grp *msi_grp;
  405. struct msm_msi_irq *msi_irq;
  406. unsigned int irq = 0;
  407. for (i = 0; i < msi->nr_hwirqs; i++) {
  408. irq = irq_of_parse_and_map(msi->of_node, i);
  409. if (!irq) {
  410. dev_err(msi->dev,
  411. "MSI: failed to parse/map interrupt\n");
  412. ret = -ENODEV;
  413. goto free_irqs;
  414. }
  415. ret = enable_irq_wake(irq);
  416. if (ret) {
  417. dev_err(msi->dev,
  418. "MSI: Unable to set enable_irq_wake for interrupt: %d: %d\n",
  419. i, irq);
  420. goto free_irq;
  421. }
  422. grp = i / MSI_IRQ_PER_GRP;
  423. index = i % MSI_IRQ_PER_GRP;
  424. msi_grp = &msi->grps[grp];
  425. msi_irq = &msi_grp->irqs[index];
  426. msi_irq->grp = msi_grp;
  427. msi_irq->grp_index = index;
  428. msi_irq->pos = i;
  429. msi_irq->hwirq = irq;
  430. irq_set_chained_handler_and_data(irq, msm_msi_qgic_handler,
  431. msi);
  432. }
  433. return 0;
  434. free_irq:
  435. irq_dispose_mapping(irq);
  436. free_irqs:
  437. for (--i; i >= 0; i--) {
  438. grp = i / MSI_IRQ_PER_GRP;
  439. index = i % MSI_IRQ_PER_GRP;
  440. irq = msi->grps[grp].irqs[index].hwirq;
  441. irq_set_chained_handler_and_data(irq, NULL, NULL);
  442. disable_irq_wake(irq);
  443. irq_dispose_mapping(irq);
  444. }
  445. return ret;
  446. }
  447. /* control access to PCIe MSI registers */
  448. void msm_msi_config_access(struct irq_domain *domain, bool allow)
  449. {
  450. struct msm_msi *msi = domain->parent->host_data;
  451. unsigned long flags;
  452. spin_lock_irqsave(&msi->cfg_lock, flags);
  453. msi->cfg_access = allow;
  454. spin_unlock_irqrestore(&msi->cfg_lock, flags);
  455. }
  456. EXPORT_SYMBOL(msm_msi_config_access);
  457. void msm_msi_config(struct irq_domain *domain)
  458. {
  459. struct msm_msi *msi;
  460. int i;
  461. msi = domain->parent->host_data;
  462. /* PCIe core driver sets to false during LPM */
  463. msm_msi_config_access(domain, true);
  464. if (msi->type == MSM_MSI_TYPE_QCOM)
  465. return;
  466. /* program Synopsys MSI termination address */
  467. writel_relaxed(msi->msi_addr, msi->pcie_cfg + PCIE_MSI_CTRL_ADDR_OFFS);
  468. writel_relaxed(0, msi->pcie_cfg + PCIE_MSI_CTRL_UPPER_ADDR_OFFS);
  469. /* restore mask and enable all interrupts for each group */
  470. for (i = 0; i < msi->nr_grps; i++) {
  471. struct msm_msi_grp *msi_grp = &msi->grps[i];
  472. writel_relaxed(msi_grp->mask, msi_grp->int_mask_reg);
  473. writel_relaxed(~0, msi_grp->int_en_reg);
  474. }
  475. }
  476. EXPORT_SYMBOL(msm_msi_config);
  477. int msm_msi_init(struct device *dev)
  478. {
  479. int ret;
  480. struct msm_msi *msi;
  481. struct device_node *of_node;
  482. const __be32 *prop_val;
  483. struct of_phandle_args irq;
  484. u32 size_exp = 0;
  485. struct resource *res;
  486. int (*msi_irq_setup)(struct msm_msi *msi);
  487. if (!dev->of_node) {
  488. dev_err(dev, "MSI: missing DT node\n");
  489. return -EINVAL;
  490. }
  491. of_node = of_parse_phandle(dev->of_node, "msi-parent", 0);
  492. if (!of_node) {
  493. dev_err(dev, "MSI: no phandle for MSI found\n");
  494. return -ENODEV;
  495. }
  496. if (!of_device_is_compatible(of_node, "qcom,pci-msi")) {
  497. dev_err(dev, "MSI: no compatible qcom,pci-msi found\n");
  498. ret = -ENODEV;
  499. goto err;
  500. }
  501. if (!of_find_property(of_node, "msi-controller", NULL)) {
  502. ret = -ENODEV;
  503. goto err;
  504. }
  505. msi = kzalloc(sizeof(*msi), GFP_KERNEL);
  506. if (!msi) {
  507. ret = -ENOMEM;
  508. goto err;
  509. }
  510. msi->dev = dev;
  511. msi->of_node = of_node;
  512. mutex_init(&msi->mutex);
  513. spin_lock_init(&msi->cfg_lock);
  514. INIT_LIST_HEAD(&msi->clients);
  515. prop_val = of_get_address(msi->of_node, 0, NULL, NULL);
  516. if (!prop_val) {
  517. dev_err(msi->dev, "MSI: missing 'reg' devicetree\n");
  518. ret = -EINVAL;
  519. goto err;
  520. }
  521. msi->msi_addr = be32_to_cpup(prop_val);
  522. if (!msi->msi_addr) {
  523. dev_err(msi->dev, "MSI: failed to get MSI address\n");
  524. ret = -EINVAL;
  525. goto err;
  526. }
  527. of_property_read_u32(of_node, "qcom,msi-addr-size-exp", &size_exp);
  528. size_exp = (size_exp > PAGE_SHIFT) ? size_exp : PAGE_SHIFT;
  529. msi->msi_addr_size = 1 << size_exp;
  530. msi->type = of_property_read_bool(msi->of_node, "qcom,snps") ?
  531. MSM_MSI_TYPE_SNPS : MSM_MSI_TYPE_QCOM;
  532. dev_info(msi->dev, "MSI: %s controller is present\n",
  533. msi->type == MSM_MSI_TYPE_SNPS ? "synopsys" : "qgic");
  534. while (of_irq_parse_one(msi->of_node, msi->nr_hwirqs, &irq) == 0)
  535. msi->nr_hwirqs++;
  536. if (!msi->nr_hwirqs) {
  537. dev_err(msi->dev, "MSI: found no MSI interrupts\n");
  538. ret = -ENODEV;
  539. goto err;
  540. }
  541. if (msi->type == MSM_MSI_TYPE_SNPS) {
  542. res = platform_get_resource_byname(to_platform_device(dev),
  543. IORESOURCE_MEM, "dm_core");
  544. if (!res) {
  545. dev_err(msi->dev,
  546. "MSI: failed to get PCIe register base\n");
  547. ret = -ENODEV;
  548. goto err;
  549. }
  550. msi->pcie_cfg = ioremap(res->start, resource_size(res));
  551. if (!msi->pcie_cfg) {
  552. ret = -ENOMEM;
  553. goto free_msi;
  554. }
  555. msi->nr_virqs = msi->nr_hwirqs * MSI_IRQ_PER_GRP;
  556. msi->nr_grps = msi->nr_hwirqs;
  557. msi->mask_irq = msm_msi_snps_mask_irq;
  558. msi->unmask_irq = msm_msi_snps_unmask_irq;
  559. msi_irq_setup = msm_msi_snps_irq_setup;
  560. } else {
  561. msi->nr_virqs = msi->nr_hwirqs;
  562. msi->nr_grps = 1;
  563. msi->mask_irq = msm_msi_qgic_mask_irq;
  564. msi->unmask_irq = msm_msi_qgic_unmask_irq;
  565. msi_irq_setup = msm_msi_qgic_irq_setup;
  566. }
  567. msi->grps = kcalloc(msi->nr_grps, sizeof(*msi->grps), GFP_KERNEL);
  568. if (!msi->grps) {
  569. ret = -ENOMEM;
  570. goto unmap_cfg;
  571. }
  572. msi->bitmap = kcalloc(BITS_TO_LONGS(msi->nr_virqs),
  573. sizeof(*msi->bitmap), GFP_KERNEL);
  574. if (!msi->bitmap) {
  575. ret = -ENOMEM;
  576. goto free_grps;
  577. }
  578. ret = msm_msi_alloc_domains(msi);
  579. if (ret) {
  580. dev_err(msi->dev, "MSI: failed to allocate MSI domains\n");
  581. goto free_bitmap;
  582. }
  583. ret = msi_irq_setup(msi);
  584. if (ret)
  585. goto remove_domains;
  586. msm_msi_config(msi->msi_domain);
  587. return 0;
  588. remove_domains:
  589. irq_domain_remove(msi->msi_domain);
  590. irq_domain_remove(msi->inner_domain);
  591. free_bitmap:
  592. kfree(msi->bitmap);
  593. free_grps:
  594. kfree(msi->grps);
  595. unmap_cfg:
  596. iounmap(msi->pcie_cfg);
  597. free_msi:
  598. kfree(msi);
  599. err:
  600. of_node_put(of_node);
  601. return ret;
  602. }
  603. EXPORT_SYMBOL(msm_msi_init);