pci_irq.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define KMSG_COMPONENT "zpci"
  3. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  4. #include <linux/kernel.h>
  5. #include <linux/irq.h>
  6. #include <linux/kernel_stat.h>
  7. #include <linux/pci.h>
  8. #include <linux/msi.h>
  9. #include <linux/smp.h>
  10. #include <asm/isc.h>
  11. #include <asm/airq.h>
  12. #include <asm/tpi.h>
  13. static enum {FLOATING, DIRECTED} irq_delivery;
  14. /*
  15. * summary bit vector
  16. * FLOATING - summary bit per function
  17. * DIRECTED - summary bit per cpu (only used in fallback path)
  18. */
  19. static struct airq_iv *zpci_sbv;
  20. /*
  21. * interrupt bit vectors
  22. * FLOATING - interrupt bit vector per function
  23. * DIRECTED - interrupt bit vector per cpu
  24. */
  25. static struct airq_iv **zpci_ibv;
  26. /* Modify PCI: Register floating adapter interruptions */
  27. static int zpci_set_airq(struct zpci_dev *zdev)
  28. {
  29. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
  30. struct zpci_fib fib = {0};
  31. u8 status;
  32. fib.fmt0.isc = PCI_ISC;
  33. fib.fmt0.sum = 1; /* enable summary notifications */
  34. fib.fmt0.noi = airq_iv_end(zdev->aibv);
  35. fib.fmt0.aibv = virt_to_phys(zdev->aibv->vector);
  36. fib.fmt0.aibvo = 0; /* each zdev has its own interrupt vector */
  37. fib.fmt0.aisb = virt_to_phys(zpci_sbv->vector) + (zdev->aisb / 64) * 8;
  38. fib.fmt0.aisbo = zdev->aisb & 63;
  39. fib.gd = zdev->gisa;
  40. return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
  41. }
  42. /* Modify PCI: Unregister floating adapter interruptions */
  43. static int zpci_clear_airq(struct zpci_dev *zdev)
  44. {
  45. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
  46. struct zpci_fib fib = {0};
  47. u8 cc, status;
  48. fib.gd = zdev->gisa;
  49. cc = zpci_mod_fc(req, &fib, &status);
  50. if (cc == 3 || (cc == 1 && status == 24))
  51. /* Function already gone or IRQs already deregistered. */
  52. cc = 0;
  53. return cc ? -EIO : 0;
  54. }
  55. /* Modify PCI: Register CPU directed interruptions */
  56. static int zpci_set_directed_irq(struct zpci_dev *zdev)
  57. {
  58. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT_D);
  59. struct zpci_fib fib = {0};
  60. u8 status;
  61. fib.fmt = 1;
  62. fib.fmt1.noi = zdev->msi_nr_irqs;
  63. fib.fmt1.dibvo = zdev->msi_first_bit;
  64. fib.gd = zdev->gisa;
  65. return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
  66. }
  67. /* Modify PCI: Unregister CPU directed interruptions */
  68. static int zpci_clear_directed_irq(struct zpci_dev *zdev)
  69. {
  70. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT_D);
  71. struct zpci_fib fib = {0};
  72. u8 cc, status;
  73. fib.fmt = 1;
  74. fib.gd = zdev->gisa;
  75. cc = zpci_mod_fc(req, &fib, &status);
  76. if (cc == 3 || (cc == 1 && status == 24))
  77. /* Function already gone or IRQs already deregistered. */
  78. cc = 0;
  79. return cc ? -EIO : 0;
  80. }
  81. /* Register adapter interruptions */
  82. static int zpci_set_irq(struct zpci_dev *zdev)
  83. {
  84. int rc;
  85. if (irq_delivery == DIRECTED)
  86. rc = zpci_set_directed_irq(zdev);
  87. else
  88. rc = zpci_set_airq(zdev);
  89. if (!rc)
  90. zdev->irqs_registered = 1;
  91. return rc;
  92. }
  93. /* Clear adapter interruptions */
  94. static int zpci_clear_irq(struct zpci_dev *zdev)
  95. {
  96. int rc;
  97. if (irq_delivery == DIRECTED)
  98. rc = zpci_clear_directed_irq(zdev);
  99. else
  100. rc = zpci_clear_airq(zdev);
  101. if (!rc)
  102. zdev->irqs_registered = 0;
  103. return rc;
  104. }
  105. static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *dest,
  106. bool force)
  107. {
  108. struct msi_desc *entry = irq_get_msi_desc(data->irq);
  109. struct msi_msg msg = entry->msg;
  110. int cpu_addr = smp_cpu_get_cpu_address(cpumask_first(dest));
  111. msg.address_lo &= 0xff0000ff;
  112. msg.address_lo |= (cpu_addr << 8);
  113. pci_write_msi_msg(data->irq, &msg);
  114. return IRQ_SET_MASK_OK;
  115. }
  116. static struct irq_chip zpci_irq_chip = {
  117. .name = "PCI-MSI",
  118. .irq_unmask = pci_msi_unmask_irq,
  119. .irq_mask = pci_msi_mask_irq,
  120. };
  121. static void zpci_handle_cpu_local_irq(bool rescan)
  122. {
  123. struct airq_iv *dibv = zpci_ibv[smp_processor_id()];
  124. union zpci_sic_iib iib = {{0}};
  125. unsigned long bit;
  126. int irqs_on = 0;
  127. for (bit = 0;;) {
  128. /* Scan the directed IRQ bit vector */
  129. bit = airq_iv_scan(dibv, bit, airq_iv_end(dibv));
  130. if (bit == -1UL) {
  131. if (!rescan || irqs_on++)
  132. /* End of second scan with interrupts on. */
  133. break;
  134. /* First scan complete, reenable interrupts. */
  135. if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &iib))
  136. break;
  137. bit = 0;
  138. continue;
  139. }
  140. inc_irq_stat(IRQIO_MSI);
  141. generic_handle_irq(airq_iv_get_data(dibv, bit));
  142. }
  143. }
  144. struct cpu_irq_data {
  145. call_single_data_t csd;
  146. atomic_t scheduled;
  147. };
  148. static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_irq_data, irq_data);
  149. static void zpci_handle_remote_irq(void *data)
  150. {
  151. atomic_t *scheduled = data;
  152. do {
  153. zpci_handle_cpu_local_irq(false);
  154. } while (atomic_dec_return(scheduled));
  155. }
  156. static void zpci_handle_fallback_irq(void)
  157. {
  158. struct cpu_irq_data *cpu_data;
  159. union zpci_sic_iib iib = {{0}};
  160. unsigned long cpu;
  161. int irqs_on = 0;
  162. for (cpu = 0;;) {
  163. cpu = airq_iv_scan(zpci_sbv, cpu, airq_iv_end(zpci_sbv));
  164. if (cpu == -1UL) {
  165. if (irqs_on++)
  166. /* End of second scan with interrupts on. */
  167. break;
  168. /* First scan complete, reenable interrupts. */
  169. if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
  170. break;
  171. cpu = 0;
  172. continue;
  173. }
  174. cpu_data = &per_cpu(irq_data, cpu);
  175. if (atomic_inc_return(&cpu_data->scheduled) > 1)
  176. continue;
  177. INIT_CSD(&cpu_data->csd, zpci_handle_remote_irq, &cpu_data->scheduled);
  178. smp_call_function_single_async(cpu, &cpu_data->csd);
  179. }
  180. }
  181. static void zpci_directed_irq_handler(struct airq_struct *airq,
  182. struct tpi_info *tpi_info)
  183. {
  184. bool floating = !tpi_info->directed_irq;
  185. if (floating) {
  186. inc_irq_stat(IRQIO_PCF);
  187. zpci_handle_fallback_irq();
  188. } else {
  189. inc_irq_stat(IRQIO_PCD);
  190. zpci_handle_cpu_local_irq(true);
  191. }
  192. }
  193. static void zpci_floating_irq_handler(struct airq_struct *airq,
  194. struct tpi_info *tpi_info)
  195. {
  196. union zpci_sic_iib iib = {{0}};
  197. unsigned long si, ai;
  198. struct airq_iv *aibv;
  199. int irqs_on = 0;
  200. inc_irq_stat(IRQIO_PCF);
  201. for (si = 0;;) {
  202. /* Scan adapter summary indicator bit vector */
  203. si = airq_iv_scan(zpci_sbv, si, airq_iv_end(zpci_sbv));
  204. if (si == -1UL) {
  205. if (irqs_on++)
  206. /* End of second scan with interrupts on. */
  207. break;
  208. /* First scan complete, reenable interrupts. */
  209. if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib))
  210. break;
  211. si = 0;
  212. continue;
  213. }
  214. /* Scan the adapter interrupt vector for this device. */
  215. aibv = zpci_ibv[si];
  216. for (ai = 0;;) {
  217. ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
  218. if (ai == -1UL)
  219. break;
  220. inc_irq_stat(IRQIO_MSI);
  221. airq_iv_lock(aibv, ai);
  222. generic_handle_irq(airq_iv_get_data(aibv, ai));
  223. airq_iv_unlock(aibv, ai);
  224. }
  225. }
  226. }
  227. int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  228. {
  229. struct zpci_dev *zdev = to_zpci(pdev);
  230. unsigned int hwirq, msi_vecs, cpu;
  231. unsigned long bit;
  232. struct msi_desc *msi;
  233. struct msi_msg msg;
  234. int cpu_addr;
  235. int rc, irq;
  236. zdev->aisb = -1UL;
  237. zdev->msi_first_bit = -1U;
  238. if (type == PCI_CAP_ID_MSI && nvec > 1)
  239. return 1;
  240. msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
  241. if (irq_delivery == DIRECTED) {
  242. /* Allocate cpu vector bits */
  243. bit = airq_iv_alloc(zpci_ibv[0], msi_vecs);
  244. if (bit == -1UL)
  245. return -EIO;
  246. } else {
  247. /* Allocate adapter summary indicator bit */
  248. bit = airq_iv_alloc_bit(zpci_sbv);
  249. if (bit == -1UL)
  250. return -EIO;
  251. zdev->aisb = bit;
  252. /* Create adapter interrupt vector */
  253. zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK, NULL);
  254. if (!zdev->aibv)
  255. return -ENOMEM;
  256. /* Wire up shortcut pointer */
  257. zpci_ibv[bit] = zdev->aibv;
  258. /* Each function has its own interrupt vector */
  259. bit = 0;
  260. }
  261. /* Request MSI interrupts */
  262. hwirq = bit;
  263. msi_for_each_desc(msi, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
  264. rc = -EIO;
  265. if (hwirq - bit >= msi_vecs)
  266. break;
  267. irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE,
  268. (irq_delivery == DIRECTED) ?
  269. msi->affinity : NULL);
  270. if (irq < 0)
  271. return -ENOMEM;
  272. rc = irq_set_msi_desc(irq, msi);
  273. if (rc)
  274. return rc;
  275. irq_set_chip_and_handler(irq, &zpci_irq_chip,
  276. handle_percpu_irq);
  277. msg.data = hwirq - bit;
  278. if (irq_delivery == DIRECTED) {
  279. if (msi->affinity)
  280. cpu = cpumask_first(&msi->affinity->mask);
  281. else
  282. cpu = 0;
  283. cpu_addr = smp_cpu_get_cpu_address(cpu);
  284. msg.address_lo = zdev->msi_addr & 0xff0000ff;
  285. msg.address_lo |= (cpu_addr << 8);
  286. for_each_possible_cpu(cpu) {
  287. airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
  288. }
  289. } else {
  290. msg.address_lo = zdev->msi_addr & 0xffffffff;
  291. airq_iv_set_data(zdev->aibv, hwirq, irq);
  292. }
  293. msg.address_hi = zdev->msi_addr >> 32;
  294. pci_write_msi_msg(irq, &msg);
  295. hwirq++;
  296. }
  297. zdev->msi_first_bit = bit;
  298. zdev->msi_nr_irqs = msi_vecs;
  299. rc = zpci_set_irq(zdev);
  300. if (rc)
  301. return rc;
  302. return (msi_vecs == nvec) ? 0 : msi_vecs;
  303. }
  304. void arch_teardown_msi_irqs(struct pci_dev *pdev)
  305. {
  306. struct zpci_dev *zdev = to_zpci(pdev);
  307. struct msi_desc *msi;
  308. int rc;
  309. /* Disable interrupts */
  310. rc = zpci_clear_irq(zdev);
  311. if (rc)
  312. return;
  313. /* Release MSI interrupts */
  314. msi_for_each_desc(msi, &pdev->dev, MSI_DESC_ASSOCIATED) {
  315. irq_set_msi_desc(msi->irq, NULL);
  316. irq_free_desc(msi->irq);
  317. msi->msg.address_lo = 0;
  318. msi->msg.address_hi = 0;
  319. msi->msg.data = 0;
  320. msi->irq = 0;
  321. }
  322. if (zdev->aisb != -1UL) {
  323. zpci_ibv[zdev->aisb] = NULL;
  324. airq_iv_free_bit(zpci_sbv, zdev->aisb);
  325. zdev->aisb = -1UL;
  326. }
  327. if (zdev->aibv) {
  328. airq_iv_release(zdev->aibv);
  329. zdev->aibv = NULL;
  330. }
  331. if ((irq_delivery == DIRECTED) && zdev->msi_first_bit != -1U)
  332. airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->msi_nr_irqs);
  333. }
  334. bool arch_restore_msi_irqs(struct pci_dev *pdev)
  335. {
  336. struct zpci_dev *zdev = to_zpci(pdev);
  337. if (!zdev->irqs_registered)
  338. zpci_set_irq(zdev);
  339. return true;
  340. }
  341. static struct airq_struct zpci_airq = {
  342. .handler = zpci_floating_irq_handler,
  343. .isc = PCI_ISC,
  344. };
  345. static void __init cpu_enable_directed_irq(void *unused)
  346. {
  347. union zpci_sic_iib iib = {{0}};
  348. union zpci_sic_iib ziib = {{0}};
  349. iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector;
  350. zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib);
  351. zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &ziib);
  352. }
  353. static int __init zpci_directed_irq_init(void)
  354. {
  355. union zpci_sic_iib iib = {{0}};
  356. unsigned int cpu;
  357. zpci_sbv = airq_iv_create(num_possible_cpus(), 0, NULL);
  358. if (!zpci_sbv)
  359. return -ENOMEM;
  360. iib.diib.isc = PCI_ISC;
  361. iib.diib.nr_cpus = num_possible_cpus();
  362. iib.diib.disb_addr = virt_to_phys(zpci_sbv->vector);
  363. zpci_set_irq_ctrl(SIC_IRQ_MODE_DIRECT, 0, &iib);
  364. zpci_ibv = kcalloc(num_possible_cpus(), sizeof(*zpci_ibv),
  365. GFP_KERNEL);
  366. if (!zpci_ibv)
  367. return -ENOMEM;
  368. for_each_possible_cpu(cpu) {
  369. /*
  370. * Per CPU IRQ vectors look the same but bit-allocation
  371. * is only done on the first vector.
  372. */
  373. zpci_ibv[cpu] = airq_iv_create(cache_line_size() * BITS_PER_BYTE,
  374. AIRQ_IV_DATA |
  375. AIRQ_IV_CACHELINE |
  376. (!cpu ? AIRQ_IV_ALLOC : 0), NULL);
  377. if (!zpci_ibv[cpu])
  378. return -ENOMEM;
  379. }
  380. on_each_cpu(cpu_enable_directed_irq, NULL, 1);
  381. zpci_irq_chip.irq_set_affinity = zpci_set_irq_affinity;
  382. return 0;
  383. }
  384. static int __init zpci_floating_irq_init(void)
  385. {
  386. zpci_ibv = kcalloc(ZPCI_NR_DEVICES, sizeof(*zpci_ibv), GFP_KERNEL);
  387. if (!zpci_ibv)
  388. return -ENOMEM;
  389. zpci_sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC, NULL);
  390. if (!zpci_sbv)
  391. goto out_free;
  392. return 0;
  393. out_free:
  394. kfree(zpci_ibv);
  395. return -ENOMEM;
  396. }
  397. int __init zpci_irq_init(void)
  398. {
  399. union zpci_sic_iib iib = {{0}};
  400. int rc;
  401. irq_delivery = sclp.has_dirq ? DIRECTED : FLOATING;
  402. if (s390_pci_force_floating)
  403. irq_delivery = FLOATING;
  404. if (irq_delivery == DIRECTED)
  405. zpci_airq.handler = zpci_directed_irq_handler;
  406. rc = register_adapter_interrupt(&zpci_airq);
  407. if (rc)
  408. goto out;
  409. /* Set summary to 1 to be called every time for the ISC. */
  410. *zpci_airq.lsi_ptr = 1;
  411. switch (irq_delivery) {
  412. case FLOATING:
  413. rc = zpci_floating_irq_init();
  414. break;
  415. case DIRECTED:
  416. rc = zpci_directed_irq_init();
  417. break;
  418. }
  419. if (rc)
  420. goto out_airq;
  421. /*
  422. * Enable floating IRQs (with suppression after one IRQ). When using
  423. * directed IRQs this enables the fallback path.
  424. */
  425. zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib);
  426. return 0;
  427. out_airq:
  428. unregister_adapter_interrupt(&zpci_airq);
  429. out:
  430. return rc;
  431. }
  432. void __init zpci_irq_exit(void)
  433. {
  434. unsigned int cpu;
  435. if (irq_delivery == DIRECTED) {
  436. for_each_possible_cpu(cpu) {
  437. airq_iv_release(zpci_ibv[cpu]);
  438. }
  439. }
  440. kfree(zpci_ibv);
  441. if (zpci_sbv)
  442. airq_iv_release(zpci_sbv);
  443. unregister_adapter_interrupt(&zpci_airq);
  444. }