msi.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2006 Jake Moilanen <[email protected]>, IBM Corp.
  4. * Copyright 2006-2007 Michael Ellerman, IBM Corp.
  5. */
  6. #include <linux/crash_dump.h>
  7. #include <linux/device.h>
  8. #include <linux/irq.h>
  9. #include <linux/irqdomain.h>
  10. #include <linux/msi.h>
  11. #include <asm/rtas.h>
  12. #include <asm/hw_irq.h>
  13. #include <asm/ppc-pci.h>
  14. #include <asm/machdep.h>
  15. #include <asm/xive.h>
  16. #include "pseries.h"
  17. static int query_token, change_token;
  18. #define RTAS_QUERY_FN 0
  19. #define RTAS_CHANGE_FN 1
  20. #define RTAS_RESET_FN 2
  21. #define RTAS_CHANGE_MSI_FN 3
  22. #define RTAS_CHANGE_MSIX_FN 4
  23. #define RTAS_CHANGE_32MSI_FN 5
  24. /* RTAS Helpers */
  25. static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs)
  26. {
  27. u32 addr, seq_num, rtas_ret[3];
  28. unsigned long buid;
  29. int rc;
  30. addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
  31. buid = pdn->phb->buid;
  32. seq_num = 1;
  33. do {
  34. if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN ||
  35. func == RTAS_CHANGE_32MSI_FN)
  36. rc = rtas_call(change_token, 6, 4, rtas_ret, addr,
  37. BUID_HI(buid), BUID_LO(buid),
  38. func, num_irqs, seq_num);
  39. else
  40. rc = rtas_call(change_token, 6, 3, rtas_ret, addr,
  41. BUID_HI(buid), BUID_LO(buid),
  42. func, num_irqs, seq_num);
  43. seq_num = rtas_ret[1];
  44. } while (rtas_busy_delay(rc));
  45. /*
  46. * If the RTAS call succeeded, return the number of irqs allocated.
  47. * If not, make sure we return a negative error code.
  48. */
  49. if (rc == 0)
  50. rc = rtas_ret[0];
  51. else if (rc > 0)
  52. rc = -rc;
  53. pr_debug("rtas_msi: ibm,change_msi(func=%d,num=%d), got %d rc = %d\n",
  54. func, num_irqs, rtas_ret[0], rc);
  55. return rc;
  56. }
  57. static void rtas_disable_msi(struct pci_dev *pdev)
  58. {
  59. struct pci_dn *pdn;
  60. pdn = pci_get_pdn(pdev);
  61. if (!pdn)
  62. return;
  63. /*
  64. * disabling MSI with the explicit interface also disables MSI-X
  65. */
  66. if (rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, 0) != 0) {
  67. /*
  68. * may have failed because explicit interface is not
  69. * present
  70. */
  71. if (rtas_change_msi(pdn, RTAS_CHANGE_FN, 0) != 0) {
  72. pr_debug("rtas_msi: Setting MSIs to 0 failed!\n");
  73. }
  74. }
  75. }
  76. static int rtas_query_irq_number(struct pci_dn *pdn, int offset)
  77. {
  78. u32 addr, rtas_ret[2];
  79. unsigned long buid;
  80. int rc;
  81. addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
  82. buid = pdn->phb->buid;
  83. do {
  84. rc = rtas_call(query_token, 4, 3, rtas_ret, addr,
  85. BUID_HI(buid), BUID_LO(buid), offset);
  86. } while (rtas_busy_delay(rc));
  87. if (rc) {
  88. pr_debug("rtas_msi: error (%d) querying source number\n", rc);
  89. return rc;
  90. }
  91. return rtas_ret[0];
  92. }
  93. static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
  94. {
  95. struct device_node *dn;
  96. const __be32 *p;
  97. u32 req_msi;
  98. dn = pci_device_to_OF_node(pdev);
  99. p = of_get_property(dn, prop_name, NULL);
  100. if (!p) {
  101. pr_debug("rtas_msi: No %s on %pOF\n", prop_name, dn);
  102. return -ENOENT;
  103. }
  104. req_msi = be32_to_cpup(p);
  105. if (req_msi < nvec) {
  106. pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);
  107. if (req_msi == 0) /* Be paranoid */
  108. return -ENOSPC;
  109. return req_msi;
  110. }
  111. return 0;
  112. }
  113. static int check_req_msi(struct pci_dev *pdev, int nvec)
  114. {
  115. return check_req(pdev, nvec, "ibm,req#msi");
  116. }
  117. static int check_req_msix(struct pci_dev *pdev, int nvec)
  118. {
  119. return check_req(pdev, nvec, "ibm,req#msi-x");
  120. }
  121. /* Quota calculation */
  122. static struct device_node *__find_pe_total_msi(struct device_node *node, int *total)
  123. {
  124. struct device_node *dn;
  125. const __be32 *p;
  126. dn = of_node_get(node);
  127. while (dn) {
  128. p = of_get_property(dn, "ibm,pe-total-#msi", NULL);
  129. if (p) {
  130. pr_debug("rtas_msi: found prop on dn %pOF\n",
  131. dn);
  132. *total = be32_to_cpup(p);
  133. return dn;
  134. }
  135. dn = of_get_next_parent(dn);
  136. }
  137. return NULL;
  138. }
  139. static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
  140. {
  141. return __find_pe_total_msi(pci_device_to_OF_node(dev), total);
  142. }
  143. static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
  144. {
  145. struct device_node *dn;
  146. struct eeh_dev *edev;
  147. /* Found our PE and assume 8 at that point. */
  148. dn = pci_device_to_OF_node(dev);
  149. if (!dn)
  150. return NULL;
  151. /* Get the top level device in the PE */
  152. edev = pdn_to_eeh_dev(PCI_DN(dn));
  153. if (edev->pe)
  154. edev = list_first_entry(&edev->pe->edevs, struct eeh_dev,
  155. entry);
  156. dn = pci_device_to_OF_node(edev->pdev);
  157. if (!dn)
  158. return NULL;
  159. /* We actually want the parent */
  160. dn = of_get_parent(dn);
  161. if (!dn)
  162. return NULL;
  163. /* Hardcode of 8 for old firmwares */
  164. *total = 8;
  165. pr_debug("rtas_msi: using PE dn %pOF\n", dn);
  166. return dn;
  167. }
  168. struct msi_counts {
  169. struct device_node *requestor;
  170. int num_devices;
  171. int request;
  172. int quota;
  173. int spare;
  174. int over_quota;
  175. };
  176. static void *count_non_bridge_devices(struct device_node *dn, void *data)
  177. {
  178. struct msi_counts *counts = data;
  179. const __be32 *p;
  180. u32 class;
  181. pr_debug("rtas_msi: counting %pOF\n", dn);
  182. p = of_get_property(dn, "class-code", NULL);
  183. class = p ? be32_to_cpup(p) : 0;
  184. if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
  185. counts->num_devices++;
  186. return NULL;
  187. }
  188. static void *count_spare_msis(struct device_node *dn, void *data)
  189. {
  190. struct msi_counts *counts = data;
  191. const __be32 *p;
  192. int req;
  193. if (dn == counts->requestor)
  194. req = counts->request;
  195. else {
  196. /* We don't know if a driver will try to use MSI or MSI-X,
  197. * so we just have to punt and use the larger of the two. */
  198. req = 0;
  199. p = of_get_property(dn, "ibm,req#msi", NULL);
  200. if (p)
  201. req = be32_to_cpup(p);
  202. p = of_get_property(dn, "ibm,req#msi-x", NULL);
  203. if (p)
  204. req = max(req, (int)be32_to_cpup(p));
  205. }
  206. if (req < counts->quota)
  207. counts->spare += counts->quota - req;
  208. else if (req > counts->quota)
  209. counts->over_quota++;
  210. return NULL;
  211. }
  212. static int msi_quota_for_device(struct pci_dev *dev, int request)
  213. {
  214. struct device_node *pe_dn;
  215. struct msi_counts counts;
  216. int total;
  217. pr_debug("rtas_msi: calc quota for %s, request %d\n", pci_name(dev),
  218. request);
  219. pe_dn = find_pe_total_msi(dev, &total);
  220. if (!pe_dn)
  221. pe_dn = find_pe_dn(dev, &total);
  222. if (!pe_dn) {
  223. pr_err("rtas_msi: couldn't find PE for %s\n", pci_name(dev));
  224. goto out;
  225. }
  226. pr_debug("rtas_msi: found PE %pOF\n", pe_dn);
  227. memset(&counts, 0, sizeof(struct msi_counts));
  228. /* Work out how many devices we have below this PE */
  229. pci_traverse_device_nodes(pe_dn, count_non_bridge_devices, &counts);
  230. if (counts.num_devices == 0) {
  231. pr_err("rtas_msi: found 0 devices under PE for %s\n",
  232. pci_name(dev));
  233. goto out;
  234. }
  235. counts.quota = total / counts.num_devices;
  236. if (request <= counts.quota)
  237. goto out;
  238. /* else, we have some more calculating to do */
  239. counts.requestor = pci_device_to_OF_node(dev);
  240. counts.request = request;
  241. pci_traverse_device_nodes(pe_dn, count_spare_msis, &counts);
  242. /* If the quota isn't an integer multiple of the total, we can
  243. * use the remainder as spare MSIs for anyone that wants them. */
  244. counts.spare += total % counts.num_devices;
  245. /* Divide any spare by the number of over-quota requestors */
  246. if (counts.over_quota)
  247. counts.quota += counts.spare / counts.over_quota;
  248. /* And finally clamp the request to the possibly adjusted quota */
  249. request = min(counts.quota, request);
  250. pr_debug("rtas_msi: request clamped to quota %d\n", request);
  251. out:
  252. of_node_put(pe_dn);
  253. return request;
  254. }
  255. static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
  256. {
  257. u32 addr_hi, addr_lo;
  258. /*
  259. * We should only get in here for IODA1 configs. This is based on the
  260. * fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS
  261. * support, and we are in a PCIe Gen2 slot.
  262. */
  263. dev_info(&pdev->dev,
  264. "rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n");
  265. pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, &addr_hi);
  266. addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4);
  267. pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO, addr_lo);
  268. pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0);
  269. }
  270. static int rtas_prepare_msi_irqs(struct pci_dev *pdev, int nvec_in, int type,
  271. msi_alloc_info_t *arg)
  272. {
  273. struct pci_dn *pdn;
  274. int quota, rc;
  275. int nvec = nvec_in;
  276. int use_32bit_msi_hack = 0;
  277. if (type == PCI_CAP_ID_MSIX)
  278. rc = check_req_msix(pdev, nvec);
  279. else
  280. rc = check_req_msi(pdev, nvec);
  281. if (rc)
  282. return rc;
  283. quota = msi_quota_for_device(pdev, nvec);
  284. if (quota && quota < nvec)
  285. return quota;
  286. /*
  287. * Firmware currently refuse any non power of two allocation
  288. * so we round up if the quota will allow it.
  289. */
  290. if (type == PCI_CAP_ID_MSIX) {
  291. int m = roundup_pow_of_two(nvec);
  292. quota = msi_quota_for_device(pdev, m);
  293. if (quota >= m)
  294. nvec = m;
  295. }
  296. pdn = pci_get_pdn(pdev);
  297. /*
  298. * Try the new more explicit firmware interface, if that fails fall
  299. * back to the old interface. The old interface is known to never
  300. * return MSI-Xs.
  301. */
  302. again:
  303. if (type == PCI_CAP_ID_MSI) {
  304. if (pdev->no_64bit_msi) {
  305. rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
  306. if (rc < 0) {
  307. /*
  308. * We only want to run the 32 bit MSI hack below if
  309. * the max bus speed is Gen2 speed
  310. */
  311. if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT)
  312. return rc;
  313. use_32bit_msi_hack = 1;
  314. }
  315. } else
  316. rc = -1;
  317. if (rc < 0)
  318. rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
  319. if (rc < 0) {
  320. pr_debug("rtas_msi: trying the old firmware call.\n");
  321. rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec);
  322. }
  323. if (use_32bit_msi_hack && rc > 0)
  324. rtas_hack_32bit_msi_gen2(pdev);
  325. } else
  326. rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
  327. if (rc != nvec) {
  328. if (nvec != nvec_in) {
  329. nvec = nvec_in;
  330. goto again;
  331. }
  332. pr_debug("rtas_msi: rtas_change_msi() failed\n");
  333. return rc;
  334. }
  335. return 0;
  336. }
  337. static int pseries_msi_ops_prepare(struct irq_domain *domain, struct device *dev,
  338. int nvec, msi_alloc_info_t *arg)
  339. {
  340. struct pci_dev *pdev = to_pci_dev(dev);
  341. int type = pdev->msix_enabled ? PCI_CAP_ID_MSIX : PCI_CAP_ID_MSI;
  342. return rtas_prepare_msi_irqs(pdev, nvec, type, arg);
  343. }
  344. /*
  345. * ->msi_free() is called before irq_domain_free_irqs_top() when the
  346. * handler data is still available. Use that to clear the XIVE
  347. * controller data.
  348. */
  349. static void pseries_msi_ops_msi_free(struct irq_domain *domain,
  350. struct msi_domain_info *info,
  351. unsigned int irq)
  352. {
  353. if (xive_enabled())
  354. xive_irq_free_data(irq);
  355. }
  356. /*
  357. * RTAS can not disable one MSI at a time. It's all or nothing. Do it
  358. * at the end after all IRQs have been freed.
  359. */
  360. static void pseries_msi_domain_free_irqs(struct irq_domain *domain,
  361. struct device *dev)
  362. {
  363. if (WARN_ON_ONCE(!dev_is_pci(dev)))
  364. return;
  365. __msi_domain_free_irqs(domain, dev);
  366. rtas_disable_msi(to_pci_dev(dev));
  367. }
  368. static struct msi_domain_ops pseries_pci_msi_domain_ops = {
  369. .msi_prepare = pseries_msi_ops_prepare,
  370. .msi_free = pseries_msi_ops_msi_free,
  371. .domain_free_irqs = pseries_msi_domain_free_irqs,
  372. };
  373. static void pseries_msi_shutdown(struct irq_data *d)
  374. {
  375. d = d->parent_data;
  376. if (d->chip->irq_shutdown)
  377. d->chip->irq_shutdown(d);
  378. }
  379. static void pseries_msi_mask(struct irq_data *d)
  380. {
  381. pci_msi_mask_irq(d);
  382. irq_chip_mask_parent(d);
  383. }
  384. static void pseries_msi_unmask(struct irq_data *d)
  385. {
  386. pci_msi_unmask_irq(d);
  387. irq_chip_unmask_parent(d);
  388. }
  389. static void pseries_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
  390. {
  391. struct msi_desc *entry = irq_data_get_msi_desc(data);
  392. /*
  393. * Do not update the MSIx vector table. It's not strictly necessary
  394. * because the table is initialized by the underlying hypervisor, PowerVM
  395. * or QEMU/KVM. However, if the MSIx vector entry is cleared, any further
  396. * activation will fail. This can happen in some drivers (eg. IPR) which
  397. * deactivate an IRQ used for testing MSI support.
  398. */
  399. entry->msg = *msg;
  400. }
  401. static struct irq_chip pseries_pci_msi_irq_chip = {
  402. .name = "pSeries-PCI-MSI",
  403. .irq_shutdown = pseries_msi_shutdown,
  404. .irq_mask = pseries_msi_mask,
  405. .irq_unmask = pseries_msi_unmask,
  406. .irq_eoi = irq_chip_eoi_parent,
  407. .irq_write_msi_msg = pseries_msi_write_msg,
  408. };
  409. /*
  410. * Set MSI_FLAG_MSIX_CONTIGUOUS as there is no way to express to
  411. * firmware to request a discontiguous or non-zero based range of
  412. * MSI-X entries. Core code will reject such setup attempts.
  413. */
  414. static struct msi_domain_info pseries_msi_domain_info = {
  415. .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
  416. MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX |
  417. MSI_FLAG_MSIX_CONTIGUOUS),
  418. .ops = &pseries_pci_msi_domain_ops,
  419. .chip = &pseries_pci_msi_irq_chip,
  420. };
  421. static void pseries_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
  422. {
  423. __pci_read_msi_msg(irq_data_get_msi_desc(data), msg);
  424. }
  425. static struct irq_chip pseries_msi_irq_chip = {
  426. .name = "pSeries-MSI",
  427. .irq_shutdown = pseries_msi_shutdown,
  428. .irq_mask = irq_chip_mask_parent,
  429. .irq_unmask = irq_chip_unmask_parent,
  430. .irq_eoi = irq_chip_eoi_parent,
  431. .irq_set_affinity = irq_chip_set_affinity_parent,
  432. .irq_compose_msi_msg = pseries_msi_compose_msg,
  433. };
  434. static int pseries_irq_parent_domain_alloc(struct irq_domain *domain, unsigned int virq,
  435. irq_hw_number_t hwirq)
  436. {
  437. struct irq_fwspec parent_fwspec;
  438. int ret;
  439. parent_fwspec.fwnode = domain->parent->fwnode;
  440. parent_fwspec.param_count = 2;
  441. parent_fwspec.param[0] = hwirq;
  442. parent_fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
  443. ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
  444. if (ret)
  445. return ret;
  446. return 0;
  447. }
  448. static int pseries_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  449. unsigned int nr_irqs, void *arg)
  450. {
  451. struct pci_controller *phb = domain->host_data;
  452. msi_alloc_info_t *info = arg;
  453. struct msi_desc *desc = info->desc;
  454. struct pci_dev *pdev = msi_desc_to_pci_dev(desc);
  455. int hwirq;
  456. int i, ret;
  457. hwirq = rtas_query_irq_number(pci_get_pdn(pdev), desc->msi_index);
  458. if (hwirq < 0) {
  459. dev_err(&pdev->dev, "Failed to query HW IRQ: %d\n", hwirq);
  460. return hwirq;
  461. }
  462. dev_dbg(&pdev->dev, "%s bridge %pOF %d/%x #%d\n", __func__,
  463. phb->dn, virq, hwirq, nr_irqs);
  464. for (i = 0; i < nr_irqs; i++) {
  465. ret = pseries_irq_parent_domain_alloc(domain, virq + i, hwirq + i);
  466. if (ret)
  467. goto out;
  468. irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
  469. &pseries_msi_irq_chip, domain->host_data);
  470. }
  471. return 0;
  472. out:
  473. /* TODO: handle RTAS cleanup in ->msi_finish() ? */
  474. irq_domain_free_irqs_parent(domain, virq, i - 1);
  475. return ret;
  476. }
  477. static void pseries_irq_domain_free(struct irq_domain *domain, unsigned int virq,
  478. unsigned int nr_irqs)
  479. {
  480. struct irq_data *d = irq_domain_get_irq_data(domain, virq);
  481. struct pci_controller *phb = irq_data_get_irq_chip_data(d);
  482. pr_debug("%s bridge %pOF %d #%d\n", __func__, phb->dn, virq, nr_irqs);
  483. /* XIVE domain data is cleared through ->msi_free() */
  484. }
  485. static const struct irq_domain_ops pseries_irq_domain_ops = {
  486. .alloc = pseries_irq_domain_alloc,
  487. .free = pseries_irq_domain_free,
  488. };
  489. static int __pseries_msi_allocate_domains(struct pci_controller *phb,
  490. unsigned int count)
  491. {
  492. struct irq_domain *parent = irq_get_default_host();
  493. phb->fwnode = irq_domain_alloc_named_id_fwnode("pSeries-MSI",
  494. phb->global_number);
  495. if (!phb->fwnode)
  496. return -ENOMEM;
  497. phb->dev_domain = irq_domain_create_hierarchy(parent, 0, count,
  498. phb->fwnode,
  499. &pseries_irq_domain_ops, phb);
  500. if (!phb->dev_domain) {
  501. pr_err("PCI: failed to create IRQ domain bridge %pOF (domain %d)\n",
  502. phb->dn, phb->global_number);
  503. irq_domain_free_fwnode(phb->fwnode);
  504. return -ENOMEM;
  505. }
  506. phb->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(phb->dn),
  507. &pseries_msi_domain_info,
  508. phb->dev_domain);
  509. if (!phb->msi_domain) {
  510. pr_err("PCI: failed to create MSI IRQ domain bridge %pOF (domain %d)\n",
  511. phb->dn, phb->global_number);
  512. irq_domain_free_fwnode(phb->fwnode);
  513. irq_domain_remove(phb->dev_domain);
  514. return -ENOMEM;
  515. }
  516. return 0;
  517. }
  518. int pseries_msi_allocate_domains(struct pci_controller *phb)
  519. {
  520. int count;
  521. if (!__find_pe_total_msi(phb->dn, &count)) {
  522. pr_err("PCI: failed to find MSIs for bridge %pOF (domain %d)\n",
  523. phb->dn, phb->global_number);
  524. return -ENOSPC;
  525. }
  526. return __pseries_msi_allocate_domains(phb, count);
  527. }
  528. void pseries_msi_free_domains(struct pci_controller *phb)
  529. {
  530. if (phb->msi_domain)
  531. irq_domain_remove(phb->msi_domain);
  532. if (phb->dev_domain)
  533. irq_domain_remove(phb->dev_domain);
  534. if (phb->fwnode)
  535. irq_domain_free_fwnode(phb->fwnode);
  536. }
  537. static void rtas_msi_pci_irq_fixup(struct pci_dev *pdev)
  538. {
  539. /* No LSI -> leave MSIs (if any) configured */
  540. if (!pdev->irq) {
  541. dev_dbg(&pdev->dev, "rtas_msi: no LSI, nothing to do.\n");
  542. return;
  543. }
  544. /* No MSI -> MSIs can't have been assigned by fw, leave LSI */
  545. if (check_req_msi(pdev, 1) && check_req_msix(pdev, 1)) {
  546. dev_dbg(&pdev->dev, "rtas_msi: no req#msi/x, nothing to do.\n");
  547. return;
  548. }
  549. dev_dbg(&pdev->dev, "rtas_msi: disabling existing MSI.\n");
  550. rtas_disable_msi(pdev);
  551. }
  552. static int rtas_msi_init(void)
  553. {
  554. query_token = rtas_token("ibm,query-interrupt-source-number");
  555. change_token = rtas_token("ibm,change-msi");
  556. if ((query_token == RTAS_UNKNOWN_SERVICE) ||
  557. (change_token == RTAS_UNKNOWN_SERVICE)) {
  558. pr_debug("rtas_msi: no RTAS tokens, no MSI support.\n");
  559. return -1;
  560. }
  561. pr_debug("rtas_msi: Registering RTAS MSI callbacks.\n");
  562. WARN_ON(ppc_md.pci_irq_fixup);
  563. ppc_md.pci_irq_fixup = rtas_msi_pci_irq_fixup;
  564. return 0;
  565. }
  566. machine_arch_initcall(pseries, rtas_msi_init);