vfio_pci_intrs.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * VFIO PCI interrupt handling
  4. *
  5. * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
  6. * Author: Alex Williamson <[email protected]>
  7. *
  8. * Derived from original vfio:
  9. * Copyright 2010 Cisco Systems, Inc. All rights reserved.
  10. * Author: Tom Lyon, [email protected]
  11. */
  12. #include <linux/device.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/eventfd.h>
  15. #include <linux/msi.h>
  16. #include <linux/pci.h>
  17. #include <linux/file.h>
  18. #include <linux/vfio.h>
  19. #include <linux/wait.h>
  20. #include <linux/slab.h>
  21. #include "vfio_pci_priv.h"
  22. struct vfio_pci_irq_ctx {
  23. struct eventfd_ctx *trigger;
  24. struct virqfd *unmask;
  25. struct virqfd *mask;
  26. char *name;
  27. bool masked;
  28. struct irq_bypass_producer producer;
  29. };
  30. static bool irq_is(struct vfio_pci_core_device *vdev, int type)
  31. {
  32. return vdev->irq_type == type;
  33. }
  34. static bool is_intx(struct vfio_pci_core_device *vdev)
  35. {
  36. return vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX;
  37. }
  38. static bool is_irq_none(struct vfio_pci_core_device *vdev)
  39. {
  40. return !(vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX ||
  41. vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX ||
  42. vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX);
  43. }
  44. /*
  45. * INTx
  46. */
  47. static void vfio_send_intx_eventfd(void *opaque, void *unused)
  48. {
  49. struct vfio_pci_core_device *vdev = opaque;
  50. if (likely(is_intx(vdev) && !vdev->virq_disabled))
  51. eventfd_signal(vdev->ctx[0].trigger, 1);
  52. }
  53. /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
  54. bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
  55. {
  56. struct pci_dev *pdev = vdev->pdev;
  57. unsigned long flags;
  58. bool masked_changed = false;
  59. spin_lock_irqsave(&vdev->irqlock, flags);
  60. /*
  61. * Masking can come from interrupt, ioctl, or config space
  62. * via INTx disable. The latter means this can get called
  63. * even when not using intx delivery. In this case, just
  64. * try to have the physical bit follow the virtual bit.
  65. */
  66. if (unlikely(!is_intx(vdev))) {
  67. if (vdev->pci_2_3)
  68. pci_intx(pdev, 0);
  69. } else if (!vdev->ctx[0].masked) {
  70. /*
  71. * Can't use check_and_mask here because we always want to
  72. * mask, not just when something is pending.
  73. */
  74. if (vdev->pci_2_3)
  75. pci_intx(pdev, 0);
  76. else
  77. disable_irq_nosync(pdev->irq);
  78. vdev->ctx[0].masked = true;
  79. masked_changed = true;
  80. }
  81. spin_unlock_irqrestore(&vdev->irqlock, flags);
  82. return masked_changed;
  83. }
  84. /*
  85. * If this is triggered by an eventfd, we can't call eventfd_signal
  86. * or else we'll deadlock on the eventfd wait queue. Return >0 when
  87. * a signal is necessary, which can then be handled via a work queue
  88. * or directly depending on the caller.
  89. */
  90. static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
  91. {
  92. struct vfio_pci_core_device *vdev = opaque;
  93. struct pci_dev *pdev = vdev->pdev;
  94. unsigned long flags;
  95. int ret = 0;
  96. spin_lock_irqsave(&vdev->irqlock, flags);
  97. /*
  98. * Unmasking comes from ioctl or config, so again, have the
  99. * physical bit follow the virtual even when not using INTx.
  100. */
  101. if (unlikely(!is_intx(vdev))) {
  102. if (vdev->pci_2_3)
  103. pci_intx(pdev, 1);
  104. } else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
  105. /*
  106. * A pending interrupt here would immediately trigger,
  107. * but we can avoid that overhead by just re-sending
  108. * the interrupt to the user.
  109. */
  110. if (vdev->pci_2_3) {
  111. if (!pci_check_and_unmask_intx(pdev))
  112. ret = 1;
  113. } else
  114. enable_irq(pdev->irq);
  115. vdev->ctx[0].masked = (ret > 0);
  116. }
  117. spin_unlock_irqrestore(&vdev->irqlock, flags);
  118. return ret;
  119. }
  120. void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
  121. {
  122. if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
  123. vfio_send_intx_eventfd(vdev, NULL);
  124. }
  125. static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
  126. {
  127. struct vfio_pci_core_device *vdev = dev_id;
  128. unsigned long flags;
  129. int ret = IRQ_NONE;
  130. spin_lock_irqsave(&vdev->irqlock, flags);
  131. if (!vdev->pci_2_3) {
  132. disable_irq_nosync(vdev->pdev->irq);
  133. vdev->ctx[0].masked = true;
  134. ret = IRQ_HANDLED;
  135. } else if (!vdev->ctx[0].masked && /* may be shared */
  136. pci_check_and_mask_intx(vdev->pdev)) {
  137. vdev->ctx[0].masked = true;
  138. ret = IRQ_HANDLED;
  139. }
  140. spin_unlock_irqrestore(&vdev->irqlock, flags);
  141. if (ret == IRQ_HANDLED)
  142. vfio_send_intx_eventfd(vdev, NULL);
  143. return ret;
  144. }
  145. static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
  146. {
  147. if (!is_irq_none(vdev))
  148. return -EINVAL;
  149. if (!vdev->pdev->irq)
  150. return -ENODEV;
  151. vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
  152. if (!vdev->ctx)
  153. return -ENOMEM;
  154. vdev->num_ctx = 1;
  155. /*
  156. * If the virtual interrupt is masked, restore it. Devices
  157. * supporting DisINTx can be masked at the hardware level
  158. * here, non-PCI-2.3 devices will have to wait until the
  159. * interrupt is enabled.
  160. */
  161. vdev->ctx[0].masked = vdev->virq_disabled;
  162. if (vdev->pci_2_3)
  163. pci_intx(vdev->pdev, !vdev->ctx[0].masked);
  164. vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
  165. return 0;
  166. }
  167. static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
  168. {
  169. struct pci_dev *pdev = vdev->pdev;
  170. unsigned long irqflags = IRQF_SHARED;
  171. struct eventfd_ctx *trigger;
  172. unsigned long flags;
  173. int ret;
  174. if (vdev->ctx[0].trigger) {
  175. free_irq(pdev->irq, vdev);
  176. kfree(vdev->ctx[0].name);
  177. eventfd_ctx_put(vdev->ctx[0].trigger);
  178. vdev->ctx[0].trigger = NULL;
  179. }
  180. if (fd < 0) /* Disable only */
  181. return 0;
  182. vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
  183. pci_name(pdev));
  184. if (!vdev->ctx[0].name)
  185. return -ENOMEM;
  186. trigger = eventfd_ctx_fdget(fd);
  187. if (IS_ERR(trigger)) {
  188. kfree(vdev->ctx[0].name);
  189. return PTR_ERR(trigger);
  190. }
  191. vdev->ctx[0].trigger = trigger;
  192. if (!vdev->pci_2_3)
  193. irqflags = 0;
  194. ret = request_irq(pdev->irq, vfio_intx_handler,
  195. irqflags, vdev->ctx[0].name, vdev);
  196. if (ret) {
  197. vdev->ctx[0].trigger = NULL;
  198. kfree(vdev->ctx[0].name);
  199. eventfd_ctx_put(trigger);
  200. return ret;
  201. }
  202. /*
  203. * INTx disable will stick across the new irq setup,
  204. * disable_irq won't.
  205. */
  206. spin_lock_irqsave(&vdev->irqlock, flags);
  207. if (!vdev->pci_2_3 && vdev->ctx[0].masked)
  208. disable_irq_nosync(pdev->irq);
  209. spin_unlock_irqrestore(&vdev->irqlock, flags);
  210. return 0;
  211. }
  212. static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
  213. {
  214. vfio_virqfd_disable(&vdev->ctx[0].unmask);
  215. vfio_virqfd_disable(&vdev->ctx[0].mask);
  216. vfio_intx_set_signal(vdev, -1);
  217. vdev->irq_type = VFIO_PCI_NUM_IRQS;
  218. vdev->num_ctx = 0;
  219. kfree(vdev->ctx);
  220. }
  221. /*
  222. * MSI/MSI-X
  223. */
  224. static irqreturn_t vfio_msihandler(int irq, void *arg)
  225. {
  226. struct eventfd_ctx *trigger = arg;
  227. eventfd_signal(trigger, 1);
  228. return IRQ_HANDLED;
  229. }
  230. static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
  231. {
  232. struct pci_dev *pdev = vdev->pdev;
  233. unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
  234. int ret;
  235. u16 cmd;
  236. if (!is_irq_none(vdev))
  237. return -EINVAL;
  238. vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
  239. if (!vdev->ctx)
  240. return -ENOMEM;
  241. /* return the number of supported vectors if we can't get all: */
  242. cmd = vfio_pci_memory_lock_and_enable(vdev);
  243. ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
  244. if (ret < nvec) {
  245. if (ret > 0)
  246. pci_free_irq_vectors(pdev);
  247. vfio_pci_memory_unlock_and_restore(vdev, cmd);
  248. kfree(vdev->ctx);
  249. return ret;
  250. }
  251. vfio_pci_memory_unlock_and_restore(vdev, cmd);
  252. vdev->num_ctx = nvec;
  253. vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
  254. VFIO_PCI_MSI_IRQ_INDEX;
  255. if (!msix) {
  256. /*
  257. * Compute the virtual hardware field for max msi vectors -
  258. * it is the log base 2 of the number of vectors.
  259. */
  260. vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
  261. }
  262. return 0;
  263. }
  264. static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
  265. int vector, int fd, bool msix)
  266. {
  267. struct pci_dev *pdev = vdev->pdev;
  268. struct eventfd_ctx *trigger;
  269. int irq, ret;
  270. u16 cmd;
  271. if (vector < 0 || vector >= vdev->num_ctx)
  272. return -EINVAL;
  273. irq = pci_irq_vector(pdev, vector);
  274. if (vdev->ctx[vector].trigger) {
  275. irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
  276. cmd = vfio_pci_memory_lock_and_enable(vdev);
  277. free_irq(irq, vdev->ctx[vector].trigger);
  278. vfio_pci_memory_unlock_and_restore(vdev, cmd);
  279. kfree(vdev->ctx[vector].name);
  280. eventfd_ctx_put(vdev->ctx[vector].trigger);
  281. vdev->ctx[vector].trigger = NULL;
  282. }
  283. if (fd < 0)
  284. return 0;
  285. vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
  286. msix ? "x" : "", vector,
  287. pci_name(pdev));
  288. if (!vdev->ctx[vector].name)
  289. return -ENOMEM;
  290. trigger = eventfd_ctx_fdget(fd);
  291. if (IS_ERR(trigger)) {
  292. kfree(vdev->ctx[vector].name);
  293. return PTR_ERR(trigger);
  294. }
  295. /*
  296. * The MSIx vector table resides in device memory which may be cleared
  297. * via backdoor resets. We don't allow direct access to the vector
  298. * table so even if a userspace driver attempts to save/restore around
  299. * such a reset it would be unsuccessful. To avoid this, restore the
  300. * cached value of the message prior to enabling.
  301. */
  302. cmd = vfio_pci_memory_lock_and_enable(vdev);
  303. if (msix) {
  304. struct msi_msg msg;
  305. get_cached_msi_msg(irq, &msg);
  306. pci_write_msi_msg(irq, &msg);
  307. }
  308. ret = request_irq(irq, vfio_msihandler, 0,
  309. vdev->ctx[vector].name, trigger);
  310. vfio_pci_memory_unlock_and_restore(vdev, cmd);
  311. if (ret) {
  312. kfree(vdev->ctx[vector].name);
  313. eventfd_ctx_put(trigger);
  314. return ret;
  315. }
  316. vdev->ctx[vector].producer.token = trigger;
  317. vdev->ctx[vector].producer.irq = irq;
  318. ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
  319. if (unlikely(ret)) {
  320. dev_info(&pdev->dev,
  321. "irq bypass producer (token %p) registration fails: %d\n",
  322. vdev->ctx[vector].producer.token, ret);
  323. vdev->ctx[vector].producer.token = NULL;
  324. }
  325. vdev->ctx[vector].trigger = trigger;
  326. return 0;
  327. }
  328. static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
  329. unsigned count, int32_t *fds, bool msix)
  330. {
  331. int i, j, ret = 0;
  332. if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
  333. return -EINVAL;
  334. for (i = 0, j = start; i < count && !ret; i++, j++) {
  335. int fd = fds ? fds[i] : -1;
  336. ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
  337. }
  338. if (ret) {
  339. for (--j; j >= (int)start; j--)
  340. vfio_msi_set_vector_signal(vdev, j, -1, msix);
  341. }
  342. return ret;
  343. }
  344. static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
  345. {
  346. struct pci_dev *pdev = vdev->pdev;
  347. int i;
  348. u16 cmd;
  349. for (i = 0; i < vdev->num_ctx; i++) {
  350. vfio_virqfd_disable(&vdev->ctx[i].unmask);
  351. vfio_virqfd_disable(&vdev->ctx[i].mask);
  352. }
  353. vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
  354. cmd = vfio_pci_memory_lock_and_enable(vdev);
  355. pci_free_irq_vectors(pdev);
  356. vfio_pci_memory_unlock_and_restore(vdev, cmd);
  357. /*
  358. * Both disable paths above use pci_intx_for_msi() to clear DisINTx
  359. * via their shutdown paths. Restore for NoINTx devices.
  360. */
  361. if (vdev->nointx)
  362. pci_intx(pdev, 0);
  363. vdev->irq_type = VFIO_PCI_NUM_IRQS;
  364. vdev->num_ctx = 0;
  365. kfree(vdev->ctx);
  366. }
  367. /*
  368. * IOCTL support
  369. */
  370. static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
  371. unsigned index, unsigned start,
  372. unsigned count, uint32_t flags, void *data)
  373. {
  374. if (!is_intx(vdev) || start != 0 || count != 1)
  375. return -EINVAL;
  376. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  377. vfio_pci_intx_unmask(vdev);
  378. } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  379. uint8_t unmask = *(uint8_t *)data;
  380. if (unmask)
  381. vfio_pci_intx_unmask(vdev);
  382. } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  383. int32_t fd = *(int32_t *)data;
  384. if (fd >= 0)
  385. return vfio_virqfd_enable((void *) vdev,
  386. vfio_pci_intx_unmask_handler,
  387. vfio_send_intx_eventfd, NULL,
  388. &vdev->ctx[0].unmask, fd);
  389. vfio_virqfd_disable(&vdev->ctx[0].unmask);
  390. }
  391. return 0;
  392. }
  393. static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
  394. unsigned index, unsigned start,
  395. unsigned count, uint32_t flags, void *data)
  396. {
  397. if (!is_intx(vdev) || start != 0 || count != 1)
  398. return -EINVAL;
  399. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  400. vfio_pci_intx_mask(vdev);
  401. } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  402. uint8_t mask = *(uint8_t *)data;
  403. if (mask)
  404. vfio_pci_intx_mask(vdev);
  405. } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  406. return -ENOTTY; /* XXX implement me */
  407. }
  408. return 0;
  409. }
  410. static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
  411. unsigned index, unsigned start,
  412. unsigned count, uint32_t flags, void *data)
  413. {
  414. if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
  415. vfio_intx_disable(vdev);
  416. return 0;
  417. }
  418. if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
  419. return -EINVAL;
  420. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  421. int32_t fd = *(int32_t *)data;
  422. int ret;
  423. if (is_intx(vdev))
  424. return vfio_intx_set_signal(vdev, fd);
  425. ret = vfio_intx_enable(vdev);
  426. if (ret)
  427. return ret;
  428. ret = vfio_intx_set_signal(vdev, fd);
  429. if (ret)
  430. vfio_intx_disable(vdev);
  431. return ret;
  432. }
  433. if (!is_intx(vdev))
  434. return -EINVAL;
  435. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  436. vfio_send_intx_eventfd(vdev, NULL);
  437. } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  438. uint8_t trigger = *(uint8_t *)data;
  439. if (trigger)
  440. vfio_send_intx_eventfd(vdev, NULL);
  441. }
  442. return 0;
  443. }
  444. static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
  445. unsigned index, unsigned start,
  446. unsigned count, uint32_t flags, void *data)
  447. {
  448. int i;
  449. bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
  450. if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
  451. vfio_msi_disable(vdev, msix);
  452. return 0;
  453. }
  454. if (!(irq_is(vdev, index) || is_irq_none(vdev)))
  455. return -EINVAL;
  456. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  457. int32_t *fds = data;
  458. int ret;
  459. if (vdev->irq_type == index)
  460. return vfio_msi_set_block(vdev, start, count,
  461. fds, msix);
  462. ret = vfio_msi_enable(vdev, start + count, msix);
  463. if (ret)
  464. return ret;
  465. ret = vfio_msi_set_block(vdev, start, count, fds, msix);
  466. if (ret)
  467. vfio_msi_disable(vdev, msix);
  468. return ret;
  469. }
  470. if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
  471. return -EINVAL;
  472. for (i = start; i < start + count; i++) {
  473. if (!vdev->ctx[i].trigger)
  474. continue;
  475. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  476. eventfd_signal(vdev->ctx[i].trigger, 1);
  477. } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  478. uint8_t *bools = data;
  479. if (bools[i - start])
  480. eventfd_signal(vdev->ctx[i].trigger, 1);
  481. }
  482. }
  483. return 0;
  484. }
  485. static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
  486. unsigned int count, uint32_t flags,
  487. void *data)
  488. {
  489. /* DATA_NONE/DATA_BOOL enables loopback testing */
  490. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  491. if (*ctx) {
  492. if (count) {
  493. eventfd_signal(*ctx, 1);
  494. } else {
  495. eventfd_ctx_put(*ctx);
  496. *ctx = NULL;
  497. }
  498. return 0;
  499. }
  500. } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  501. uint8_t trigger;
  502. if (!count)
  503. return -EINVAL;
  504. trigger = *(uint8_t *)data;
  505. if (trigger && *ctx)
  506. eventfd_signal(*ctx, 1);
  507. return 0;
  508. } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  509. int32_t fd;
  510. if (!count)
  511. return -EINVAL;
  512. fd = *(int32_t *)data;
  513. if (fd == -1) {
  514. if (*ctx)
  515. eventfd_ctx_put(*ctx);
  516. *ctx = NULL;
  517. } else if (fd >= 0) {
  518. struct eventfd_ctx *efdctx;
  519. efdctx = eventfd_ctx_fdget(fd);
  520. if (IS_ERR(efdctx))
  521. return PTR_ERR(efdctx);
  522. if (*ctx)
  523. eventfd_ctx_put(*ctx);
  524. *ctx = efdctx;
  525. }
  526. return 0;
  527. }
  528. return -EINVAL;
  529. }
  530. static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
  531. unsigned index, unsigned start,
  532. unsigned count, uint32_t flags, void *data)
  533. {
  534. if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
  535. return -EINVAL;
  536. return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
  537. count, flags, data);
  538. }
  539. static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
  540. unsigned index, unsigned start,
  541. unsigned count, uint32_t flags, void *data)
  542. {
  543. if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
  544. return -EINVAL;
  545. return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
  546. count, flags, data);
  547. }
  548. int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
  549. unsigned index, unsigned start, unsigned count,
  550. void *data)
  551. {
  552. int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
  553. unsigned start, unsigned count, uint32_t flags,
  554. void *data) = NULL;
  555. switch (index) {
  556. case VFIO_PCI_INTX_IRQ_INDEX:
  557. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  558. case VFIO_IRQ_SET_ACTION_MASK:
  559. func = vfio_pci_set_intx_mask;
  560. break;
  561. case VFIO_IRQ_SET_ACTION_UNMASK:
  562. func = vfio_pci_set_intx_unmask;
  563. break;
  564. case VFIO_IRQ_SET_ACTION_TRIGGER:
  565. func = vfio_pci_set_intx_trigger;
  566. break;
  567. }
  568. break;
  569. case VFIO_PCI_MSI_IRQ_INDEX:
  570. case VFIO_PCI_MSIX_IRQ_INDEX:
  571. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  572. case VFIO_IRQ_SET_ACTION_MASK:
  573. case VFIO_IRQ_SET_ACTION_UNMASK:
  574. /* XXX Need masking support exported */
  575. break;
  576. case VFIO_IRQ_SET_ACTION_TRIGGER:
  577. func = vfio_pci_set_msi_trigger;
  578. break;
  579. }
  580. break;
  581. case VFIO_PCI_ERR_IRQ_INDEX:
  582. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  583. case VFIO_IRQ_SET_ACTION_TRIGGER:
  584. if (pci_is_pcie(vdev->pdev))
  585. func = vfio_pci_set_err_trigger;
  586. break;
  587. }
  588. break;
  589. case VFIO_PCI_REQ_IRQ_INDEX:
  590. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  591. case VFIO_IRQ_SET_ACTION_TRIGGER:
  592. func = vfio_pci_set_req_trigger;
  593. break;
  594. }
  595. break;
  596. }
  597. if (!func)
  598. return -ENOTTY;
  599. return func(vdev, index, start, count, flags, data);
  600. }