irqfd.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ACRN HSM irqfd: use eventfd objects to inject virtual interrupts
  4. *
  5. * Copyright (C) 2020 Intel Corporation. All rights reserved.
  6. *
  7. * Authors:
  8. * Shuo Liu <[email protected]>
  9. * Yakui Zhao <[email protected]>
  10. */
  11. #include <linux/eventfd.h>
  12. #include <linux/file.h>
  13. #include <linux/poll.h>
  14. #include <linux/slab.h>
  15. #include "acrn_drv.h"
  16. static LIST_HEAD(acrn_irqfd_clients);
  17. /**
  18. * struct hsm_irqfd - Properties of HSM irqfd
  19. * @vm: Associated VM pointer
  20. * @wait: Entry of wait-queue
  21. * @shutdown: Async shutdown work
  22. * @eventfd: Associated eventfd
  23. * @list: Entry within &acrn_vm.irqfds of irqfds of a VM
  24. * @pt: Structure for select/poll on the associated eventfd
  25. * @msi: MSI data
  26. */
  27. struct hsm_irqfd {
  28. struct acrn_vm *vm;
  29. wait_queue_entry_t wait;
  30. struct work_struct shutdown;
  31. struct eventfd_ctx *eventfd;
  32. struct list_head list;
  33. poll_table pt;
  34. struct acrn_msi_entry msi;
  35. };
  36. static void acrn_irqfd_inject(struct hsm_irqfd *irqfd)
  37. {
  38. struct acrn_vm *vm = irqfd->vm;
  39. acrn_msi_inject(vm, irqfd->msi.msi_addr,
  40. irqfd->msi.msi_data);
  41. }
  42. static void hsm_irqfd_shutdown(struct hsm_irqfd *irqfd)
  43. {
  44. u64 cnt;
  45. lockdep_assert_held(&irqfd->vm->irqfds_lock);
  46. /* remove from wait queue */
  47. list_del_init(&irqfd->list);
  48. eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
  49. eventfd_ctx_put(irqfd->eventfd);
  50. kfree(irqfd);
  51. }
  52. static void hsm_irqfd_shutdown_work(struct work_struct *work)
  53. {
  54. struct hsm_irqfd *irqfd;
  55. struct acrn_vm *vm;
  56. irqfd = container_of(work, struct hsm_irqfd, shutdown);
  57. vm = irqfd->vm;
  58. mutex_lock(&vm->irqfds_lock);
  59. if (!list_empty(&irqfd->list))
  60. hsm_irqfd_shutdown(irqfd);
  61. mutex_unlock(&vm->irqfds_lock);
  62. }
  63. /* Called with wqh->lock held and interrupts disabled */
  64. static int hsm_irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode,
  65. int sync, void *key)
  66. {
  67. unsigned long poll_bits = (unsigned long)key;
  68. struct hsm_irqfd *irqfd;
  69. struct acrn_vm *vm;
  70. irqfd = container_of(wait, struct hsm_irqfd, wait);
  71. vm = irqfd->vm;
  72. if (poll_bits & POLLIN)
  73. /* An event has been signaled, inject an interrupt */
  74. acrn_irqfd_inject(irqfd);
  75. if (poll_bits & POLLHUP)
  76. /* Do shutdown work in thread to hold wqh->lock */
  77. queue_work(vm->irqfd_wq, &irqfd->shutdown);
  78. return 0;
  79. }
  80. static void hsm_irqfd_poll_func(struct file *file, wait_queue_head_t *wqh,
  81. poll_table *pt)
  82. {
  83. struct hsm_irqfd *irqfd;
  84. irqfd = container_of(pt, struct hsm_irqfd, pt);
  85. add_wait_queue(wqh, &irqfd->wait);
  86. }
  87. /*
  88. * Assign an eventfd to a VM and create a HSM irqfd associated with the
  89. * eventfd. The properties of the HSM irqfd are built from a &struct
  90. * acrn_irqfd.
  91. */
  92. static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
  93. {
  94. struct eventfd_ctx *eventfd = NULL;
  95. struct hsm_irqfd *irqfd, *tmp;
  96. __poll_t events;
  97. struct fd f;
  98. int ret = 0;
  99. irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
  100. if (!irqfd)
  101. return -ENOMEM;
  102. irqfd->vm = vm;
  103. memcpy(&irqfd->msi, &args->msi, sizeof(args->msi));
  104. INIT_LIST_HEAD(&irqfd->list);
  105. INIT_WORK(&irqfd->shutdown, hsm_irqfd_shutdown_work);
  106. f = fdget(args->fd);
  107. if (!f.file) {
  108. ret = -EBADF;
  109. goto out;
  110. }
  111. eventfd = eventfd_ctx_fileget(f.file);
  112. if (IS_ERR(eventfd)) {
  113. ret = PTR_ERR(eventfd);
  114. goto fail;
  115. }
  116. irqfd->eventfd = eventfd;
  117. /*
  118. * Install custom wake-up handling to be notified whenever underlying
  119. * eventfd is signaled.
  120. */
  121. init_waitqueue_func_entry(&irqfd->wait, hsm_irqfd_wakeup);
  122. init_poll_funcptr(&irqfd->pt, hsm_irqfd_poll_func);
  123. mutex_lock(&vm->irqfds_lock);
  124. list_for_each_entry(tmp, &vm->irqfds, list) {
  125. if (irqfd->eventfd != tmp->eventfd)
  126. continue;
  127. ret = -EBUSY;
  128. mutex_unlock(&vm->irqfds_lock);
  129. goto fail;
  130. }
  131. list_add_tail(&irqfd->list, &vm->irqfds);
  132. mutex_unlock(&vm->irqfds_lock);
  133. /* Check the pending event in this stage */
  134. events = vfs_poll(f.file, &irqfd->pt);
  135. if (events & EPOLLIN)
  136. acrn_irqfd_inject(irqfd);
  137. fdput(f);
  138. return 0;
  139. fail:
  140. if (eventfd && !IS_ERR(eventfd))
  141. eventfd_ctx_put(eventfd);
  142. fdput(f);
  143. out:
  144. kfree(irqfd);
  145. return ret;
  146. }
  147. static int acrn_irqfd_deassign(struct acrn_vm *vm,
  148. struct acrn_irqfd *args)
  149. {
  150. struct hsm_irqfd *irqfd, *tmp;
  151. struct eventfd_ctx *eventfd;
  152. eventfd = eventfd_ctx_fdget(args->fd);
  153. if (IS_ERR(eventfd))
  154. return PTR_ERR(eventfd);
  155. mutex_lock(&vm->irqfds_lock);
  156. list_for_each_entry_safe(irqfd, tmp, &vm->irqfds, list) {
  157. if (irqfd->eventfd == eventfd) {
  158. hsm_irqfd_shutdown(irqfd);
  159. break;
  160. }
  161. }
  162. mutex_unlock(&vm->irqfds_lock);
  163. eventfd_ctx_put(eventfd);
  164. return 0;
  165. }
  166. int acrn_irqfd_config(struct acrn_vm *vm, struct acrn_irqfd *args)
  167. {
  168. int ret;
  169. if (args->flags & ACRN_IRQFD_FLAG_DEASSIGN)
  170. ret = acrn_irqfd_deassign(vm, args);
  171. else
  172. ret = acrn_irqfd_assign(vm, args);
  173. return ret;
  174. }
  175. int acrn_irqfd_init(struct acrn_vm *vm)
  176. {
  177. INIT_LIST_HEAD(&vm->irqfds);
  178. mutex_init(&vm->irqfds_lock);
  179. vm->irqfd_wq = alloc_workqueue("acrn_irqfd-%u", 0, 0, vm->vmid);
  180. if (!vm->irqfd_wq)
  181. return -ENOMEM;
  182. dev_dbg(acrn_dev.this_device, "VM %u irqfd init.\n", vm->vmid);
  183. return 0;
  184. }
  185. void acrn_irqfd_deinit(struct acrn_vm *vm)
  186. {
  187. struct hsm_irqfd *irqfd, *next;
  188. dev_dbg(acrn_dev.this_device, "VM %u irqfd deinit.\n", vm->vmid);
  189. destroy_workqueue(vm->irqfd_wq);
  190. mutex_lock(&vm->irqfds_lock);
  191. list_for_each_entry_safe(irqfd, next, &vm->irqfds, list)
  192. hsm_irqfd_shutdown(irqfd);
  193. mutex_unlock(&vm->irqfds_lock);
  194. }