vfio.h 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * VFIO API definition
  4. *
  5. * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
  6. * Author: Alex Williamson <[email protected]>
  7. */
  8. #ifndef VFIO_H
  9. #define VFIO_H
  10. #include <linux/iommu.h>
  11. #include <linux/mm.h>
  12. #include <linux/workqueue.h>
  13. #include <linux/poll.h>
  14. #include <uapi/linux/vfio.h>
  15. #include <linux/iova_bitmap.h>
  16. struct kvm;
  17. /*
  18. * VFIO devices can be placed in a set, this allows all devices to share this
  19. * structure and the VFIO core will provide a lock that is held around
  20. * open_device()/close_device() for all devices in the set.
  21. */
  22. struct vfio_device_set {
  23. void *set_id;
  24. struct mutex lock;
  25. struct list_head device_list;
  26. unsigned int device_count;
  27. };
  28. struct vfio_device {
  29. struct device *dev;
  30. const struct vfio_device_ops *ops;
  31. /*
  32. * mig_ops/log_ops is a static property of the vfio_device which must
  33. * be set prior to registering the vfio_device.
  34. */
  35. const struct vfio_migration_ops *mig_ops;
  36. const struct vfio_log_ops *log_ops;
  37. struct vfio_group *group;
  38. struct vfio_device_set *dev_set;
  39. struct list_head dev_set_list;
  40. unsigned int migration_flags;
  41. /* Driver must reference the kvm during open_device or never touch it */
  42. struct kvm *kvm;
  43. /* Members below here are private, not for driver use */
  44. unsigned int index;
  45. struct device device; /* device.kref covers object life circle */
  46. refcount_t refcount; /* user count on registered device*/
  47. unsigned int open_count;
  48. struct completion comp;
  49. struct list_head group_next;
  50. struct list_head iommu_entry;
  51. };
  52. /**
  53. * struct vfio_device_ops - VFIO bus driver device callbacks
  54. *
  55. * @init: initialize private fields in device structure
  56. * @release: Reclaim private fields in device structure
  57. * @open_device: Called when the first file descriptor is opened for this device
  58. * @close_device: Opposite of open_device
  59. * @read: Perform read(2) on device file descriptor
  60. * @write: Perform write(2) on device file descriptor
  61. * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
  62. * operations documented below
  63. * @mmap: Perform mmap(2) on a region of the device file descriptor
  64. * @request: Request for the bus driver to release the device
  65. * @match: Optional device name match callback (return: 0 for no-match, >0 for
  66. * match, -errno for abort (ex. match with insufficient or incorrect
  67. * additional args)
  68. * @dma_unmap: Called when userspace unmaps IOVA from the container
  69. * this device is attached to.
  70. * @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl
  71. */
  72. struct vfio_device_ops {
  73. char *name;
  74. int (*init)(struct vfio_device *vdev);
  75. void (*release)(struct vfio_device *vdev);
  76. int (*open_device)(struct vfio_device *vdev);
  77. void (*close_device)(struct vfio_device *vdev);
  78. ssize_t (*read)(struct vfio_device *vdev, char __user *buf,
  79. size_t count, loff_t *ppos);
  80. ssize_t (*write)(struct vfio_device *vdev, const char __user *buf,
  81. size_t count, loff_t *size);
  82. long (*ioctl)(struct vfio_device *vdev, unsigned int cmd,
  83. unsigned long arg);
  84. int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma);
  85. void (*request)(struct vfio_device *vdev, unsigned int count);
  86. int (*match)(struct vfio_device *vdev, char *buf);
  87. void (*dma_unmap)(struct vfio_device *vdev, u64 iova, u64 length);
  88. int (*device_feature)(struct vfio_device *device, u32 flags,
  89. void __user *arg, size_t argsz);
  90. };
  91. /**
  92. * @migration_set_state: Optional callback to change the migration state for
  93. * devices that support migration. It's mandatory for
  94. * VFIO_DEVICE_FEATURE_MIGRATION migration support.
  95. * The returned FD is used for data transfer according to the FSM
  96. * definition. The driver is responsible to ensure that FD reaches end
  97. * of stream or error whenever the migration FSM leaves a data transfer
  98. * state or before close_device() returns.
  99. * @migration_get_state: Optional callback to get the migration state for
  100. * devices that support migration. It's mandatory for
  101. * VFIO_DEVICE_FEATURE_MIGRATION migration support.
  102. */
  103. struct vfio_migration_ops {
  104. struct file *(*migration_set_state)(
  105. struct vfio_device *device,
  106. enum vfio_device_mig_state new_state);
  107. int (*migration_get_state)(struct vfio_device *device,
  108. enum vfio_device_mig_state *curr_state);
  109. };
  110. /**
  111. * @log_start: Optional callback to ask the device start DMA logging.
  112. * @log_stop: Optional callback to ask the device stop DMA logging.
  113. * @log_read_and_clear: Optional callback to ask the device read
  114. * and clear the dirty DMAs in some given range.
  115. *
  116. * The vfio core implementation of the DEVICE_FEATURE_DMA_LOGGING_ set
  117. * of features does not track logging state relative to the device,
  118. * therefore the device implementation of vfio_log_ops must handle
  119. * arbitrary user requests. This includes rejecting subsequent calls
  120. * to log_start without an intervening log_stop, as well as graceful
  121. * handling of log_stop and log_read_and_clear from invalid states.
  122. */
  123. struct vfio_log_ops {
  124. int (*log_start)(struct vfio_device *device,
  125. struct rb_root_cached *ranges, u32 nnodes, u64 *page_size);
  126. int (*log_stop)(struct vfio_device *device);
  127. int (*log_read_and_clear)(struct vfio_device *device,
  128. unsigned long iova, unsigned long length,
  129. struct iova_bitmap *dirty);
  130. };
  131. /**
  132. * vfio_check_feature - Validate user input for the VFIO_DEVICE_FEATURE ioctl
  133. * @flags: Arg from the device_feature op
  134. * @argsz: Arg from the device_feature op
  135. * @supported_ops: Combination of VFIO_DEVICE_FEATURE_GET and SET the driver
  136. * supports
  137. * @minsz: Minimum data size the driver accepts
  138. *
  139. * For use in a driver's device_feature op. Checks that the inputs to the
  140. * VFIO_DEVICE_FEATURE ioctl are correct for the driver's feature. Returns 1 if
  141. * the driver should execute the get or set, otherwise the relevant
  142. * value should be returned.
  143. */
  144. static inline int vfio_check_feature(u32 flags, size_t argsz, u32 supported_ops,
  145. size_t minsz)
  146. {
  147. if ((flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)) &
  148. ~supported_ops)
  149. return -EINVAL;
  150. if (flags & VFIO_DEVICE_FEATURE_PROBE)
  151. return 0;
  152. /* Without PROBE one of GET or SET must be requested */
  153. if (!(flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)))
  154. return -EINVAL;
  155. if (argsz < minsz)
  156. return -EINVAL;
  157. return 1;
  158. }
  159. struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
  160. const struct vfio_device_ops *ops);
  161. #define vfio_alloc_device(dev_struct, member, dev, ops) \
  162. container_of(_vfio_alloc_device(sizeof(struct dev_struct) + \
  163. BUILD_BUG_ON_ZERO(offsetof( \
  164. struct dev_struct, member)), \
  165. dev, ops), \
  166. struct dev_struct, member)
  167. int vfio_init_device(struct vfio_device *device, struct device *dev,
  168. const struct vfio_device_ops *ops);
  169. void vfio_free_device(struct vfio_device *device);
  170. static inline void vfio_put_device(struct vfio_device *device)
  171. {
  172. put_device(&device->device);
  173. }
  174. int vfio_register_group_dev(struct vfio_device *device);
  175. int vfio_register_emulated_iommu_dev(struct vfio_device *device);
  176. void vfio_unregister_group_dev(struct vfio_device *device);
  177. int vfio_assign_device_set(struct vfio_device *device, void *set_id);
  178. unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set);
  179. int vfio_mig_get_next_state(struct vfio_device *device,
  180. enum vfio_device_mig_state cur_fsm,
  181. enum vfio_device_mig_state new_fsm,
  182. enum vfio_device_mig_state *next_fsm);
  183. /*
  184. * External user API
  185. */
  186. struct iommu_group *vfio_file_iommu_group(struct file *file);
  187. bool vfio_file_is_group(struct file *file);
  188. bool vfio_file_enforced_coherent(struct file *file);
  189. void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
  190. bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
  191. #define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
  192. int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
  193. int npage, int prot, struct page **pages);
  194. void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage);
  195. int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova,
  196. void *data, size_t len, bool write);
  197. /*
  198. * Sub-module helpers
  199. */
  200. struct vfio_info_cap {
  201. struct vfio_info_cap_header *buf;
  202. size_t size;
  203. };
  204. struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
  205. size_t size, u16 id,
  206. u16 version);
  207. void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
  208. int vfio_info_add_capability(struct vfio_info_cap *caps,
  209. struct vfio_info_cap_header *cap, size_t size);
  210. int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
  211. int num_irqs, int max_irq_type,
  212. size_t *data_size);
  213. struct pci_dev;
  214. #if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH)
  215. void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
  216. void vfio_spapr_pci_eeh_release(struct pci_dev *pdev);
  217. long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, unsigned int cmd,
  218. unsigned long arg);
  219. #else
  220. static inline void vfio_spapr_pci_eeh_open(struct pci_dev *pdev)
  221. {
  222. }
  223. static inline void vfio_spapr_pci_eeh_release(struct pci_dev *pdev)
  224. {
  225. }
  226. static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
  227. unsigned int cmd,
  228. unsigned long arg)
  229. {
  230. return -ENOTTY;
  231. }
  232. #endif /* CONFIG_VFIO_SPAPR_EEH */
  233. /*
  234. * IRQfd - generic
  235. */
  236. struct virqfd {
  237. void *opaque;
  238. struct eventfd_ctx *eventfd;
  239. int (*handler)(void *, void *);
  240. void (*thread)(void *, void *);
  241. void *data;
  242. struct work_struct inject;
  243. wait_queue_entry_t wait;
  244. poll_table pt;
  245. struct work_struct shutdown;
  246. struct virqfd **pvirqfd;
  247. };
  248. int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *),
  249. void (*thread)(void *, void *), void *data,
  250. struct virqfd **pvirqfd, int fd);
  251. void vfio_virqfd_disable(struct virqfd **pvirqfd);
  252. #endif /* VFIO_H */