vringh.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * Linux host-side vring helpers; for when the kernel needs to access
  4. * someone else's vring.
  5. *
  6. * Copyright IBM Corporation, 2013.
  7. * Parts taken from drivers/vhost/vhost.c Copyright 2009 Red Hat, Inc.
  8. *
  9. * Written by: Rusty Russell <[email protected]>
  10. */
  11. #ifndef _LINUX_VRINGH_H
  12. #define _LINUX_VRINGH_H
  13. #include <uapi/linux/virtio_ring.h>
  14. #include <linux/virtio_byteorder.h>
  15. #include <linux/uio.h>
  16. #include <linux/slab.h>
  17. #include <linux/spinlock.h>
  18. #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
  19. #include <linux/dma-direction.h>
  20. #include <linux/vhost_iotlb.h>
  21. #endif
  22. #include <asm/barrier.h>
  23. /* virtio_ring with information needed for host access. */
  24. struct vringh {
  25. /* Everything is little endian */
  26. bool little_endian;
  27. /* Guest publishes used event idx (note: we always do). */
  28. bool event_indices;
  29. /* Can we get away with weak barriers? */
  30. bool weak_barriers;
  31. /* Last available index we saw (ie. where we're up to). */
  32. u16 last_avail_idx;
  33. /* Last index we used. */
  34. u16 last_used_idx;
  35. /* How many descriptors we've completed since last need_notify(). */
  36. u32 completed;
  37. /* The vring (note: it may contain user pointers!) */
  38. struct vring vring;
  39. /* IOTLB for this vring */
  40. struct vhost_iotlb *iotlb;
  41. /* spinlock to synchronize IOTLB accesses */
  42. spinlock_t *iotlb_lock;
  43. /* The function to call to notify the guest about added buffers */
  44. void (*notify)(struct vringh *);
  45. };
  46. /**
  47. * struct vringh_config_ops - ops for creating a host vring from a virtio driver
  48. * @find_vrhs: find the host vrings and instantiate them
  49. * vdev: the virtio_device
  50. * nhvrs: the number of host vrings to find
  51. * hvrs: on success, includes new host vrings
  52. * callbacks: array of driver callbacks, for each host vring
  53. * include a NULL entry for vqs that do not need a callback
  54. * Returns 0 on success or error status
  55. * @del_vrhs: free the host vrings found by find_vrhs().
  56. */
  57. struct virtio_device;
  58. typedef void vrh_callback_t(struct virtio_device *, struct vringh *);
  59. struct vringh_config_ops {
  60. int (*find_vrhs)(struct virtio_device *vdev, unsigned nhvrs,
  61. struct vringh *vrhs[], vrh_callback_t *callbacks[]);
  62. void (*del_vrhs)(struct virtio_device *vdev);
  63. };
  64. /* The memory the vring can access, and what offset to apply. */
  65. struct vringh_range {
  66. u64 start, end_incl;
  67. u64 offset;
  68. };
  69. /**
  70. * struct vringh_iov - iovec mangler.
  71. *
  72. * Mangles iovec in place, and restores it.
  73. * Remaining data is iov + i, of used - i elements.
  74. */
  75. struct vringh_iov {
  76. struct iovec *iov;
  77. size_t consumed; /* Within iov[i] */
  78. unsigned i, used, max_num;
  79. };
  80. /**
  81. * struct vringh_iov - kvec mangler.
  82. *
  83. * Mangles kvec in place, and restores it.
  84. * Remaining data is iov + i, of used - i elements.
  85. */
  86. struct vringh_kiov {
  87. struct kvec *iov;
  88. size_t consumed; /* Within iov[i] */
  89. unsigned i, used, max_num;
  90. };
  91. /* Flag on max_num to indicate we're kmalloced. */
  92. #define VRINGH_IOV_ALLOCATED 0x8000000
  93. /* Helpers for userspace vrings. */
  94. int vringh_init_user(struct vringh *vrh, u64 features,
  95. unsigned int num, bool weak_barriers,
  96. vring_desc_t __user *desc,
  97. vring_avail_t __user *avail,
  98. vring_used_t __user *used);
  99. static inline void vringh_iov_init(struct vringh_iov *iov,
  100. struct iovec *iovec, unsigned num)
  101. {
  102. iov->used = iov->i = 0;
  103. iov->consumed = 0;
  104. iov->max_num = num;
  105. iov->iov = iovec;
  106. }
  107. static inline void vringh_iov_reset(struct vringh_iov *iov)
  108. {
  109. iov->iov[iov->i].iov_len += iov->consumed;
  110. iov->iov[iov->i].iov_base -= iov->consumed;
  111. iov->consumed = 0;
  112. iov->i = 0;
  113. }
  114. static inline void vringh_iov_cleanup(struct vringh_iov *iov)
  115. {
  116. if (iov->max_num & VRINGH_IOV_ALLOCATED)
  117. kfree(iov->iov);
  118. iov->max_num = iov->used = iov->i = iov->consumed = 0;
  119. iov->iov = NULL;
  120. }
  121. /* Convert a descriptor into iovecs. */
  122. int vringh_getdesc_user(struct vringh *vrh,
  123. struct vringh_iov *riov,
  124. struct vringh_iov *wiov,
  125. bool (*getrange)(struct vringh *vrh,
  126. u64 addr, struct vringh_range *r),
  127. u16 *head);
  128. /* Copy bytes from readable vsg, consuming it (and incrementing wiov->i). */
  129. ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len);
  130. /* Copy bytes into writable vsg, consuming it (and incrementing wiov->i). */
  131. ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
  132. const void *src, size_t len);
  133. /* Mark a descriptor as used. */
  134. int vringh_complete_user(struct vringh *vrh, u16 head, u32 len);
  135. int vringh_complete_multi_user(struct vringh *vrh,
  136. const struct vring_used_elem used[],
  137. unsigned num_used);
  138. /* Pretend we've never seen descriptor (for easy error handling). */
  139. void vringh_abandon_user(struct vringh *vrh, unsigned int num);
  140. /* Do we need to fire the eventfd to notify the other side? */
  141. int vringh_need_notify_user(struct vringh *vrh);
  142. bool vringh_notify_enable_user(struct vringh *vrh);
  143. void vringh_notify_disable_user(struct vringh *vrh);
  144. /* Helpers for kernelspace vrings. */
  145. int vringh_init_kern(struct vringh *vrh, u64 features,
  146. unsigned int num, bool weak_barriers,
  147. struct vring_desc *desc,
  148. struct vring_avail *avail,
  149. struct vring_used *used);
  150. static inline void vringh_kiov_init(struct vringh_kiov *kiov,
  151. struct kvec *kvec, unsigned num)
  152. {
  153. kiov->used = kiov->i = 0;
  154. kiov->consumed = 0;
  155. kiov->max_num = num;
  156. kiov->iov = kvec;
  157. }
  158. static inline void vringh_kiov_reset(struct vringh_kiov *kiov)
  159. {
  160. kiov->iov[kiov->i].iov_len += kiov->consumed;
  161. kiov->iov[kiov->i].iov_base -= kiov->consumed;
  162. kiov->consumed = 0;
  163. kiov->i = 0;
  164. }
  165. static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov)
  166. {
  167. if (kiov->max_num & VRINGH_IOV_ALLOCATED)
  168. kfree(kiov->iov);
  169. kiov->max_num = kiov->used = kiov->i = kiov->consumed = 0;
  170. kiov->iov = NULL;
  171. }
  172. static inline size_t vringh_kiov_length(struct vringh_kiov *kiov)
  173. {
  174. size_t len = 0;
  175. int i;
  176. for (i = kiov->i; i < kiov->used; i++)
  177. len += kiov->iov[i].iov_len;
  178. return len;
  179. }
  180. void vringh_kiov_advance(struct vringh_kiov *kiov, size_t len);
  181. int vringh_getdesc_kern(struct vringh *vrh,
  182. struct vringh_kiov *riov,
  183. struct vringh_kiov *wiov,
  184. u16 *head,
  185. gfp_t gfp);
  186. ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len);
  187. ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
  188. const void *src, size_t len);
  189. void vringh_abandon_kern(struct vringh *vrh, unsigned int num);
  190. int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len);
  191. bool vringh_notify_enable_kern(struct vringh *vrh);
  192. void vringh_notify_disable_kern(struct vringh *vrh);
  193. int vringh_need_notify_kern(struct vringh *vrh);
  194. /* Notify the guest about buffers added to the used ring */
  195. static inline void vringh_notify(struct vringh *vrh)
  196. {
  197. if (vrh->notify)
  198. vrh->notify(vrh);
  199. }
  200. static inline bool vringh_is_little_endian(const struct vringh *vrh)
  201. {
  202. return vrh->little_endian ||
  203. virtio_legacy_is_little_endian();
  204. }
  205. static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
  206. {
  207. return __virtio16_to_cpu(vringh_is_little_endian(vrh), val);
  208. }
  209. static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
  210. {
  211. return __cpu_to_virtio16(vringh_is_little_endian(vrh), val);
  212. }
  213. static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
  214. {
  215. return __virtio32_to_cpu(vringh_is_little_endian(vrh), val);
  216. }
  217. static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
  218. {
  219. return __cpu_to_virtio32(vringh_is_little_endian(vrh), val);
  220. }
  221. static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
  222. {
  223. return __virtio64_to_cpu(vringh_is_little_endian(vrh), val);
  224. }
  225. static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
  226. {
  227. return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
  228. }
  229. #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
  230. void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb,
  231. spinlock_t *iotlb_lock);
  232. int vringh_init_iotlb(struct vringh *vrh, u64 features,
  233. unsigned int num, bool weak_barriers,
  234. struct vring_desc *desc,
  235. struct vring_avail *avail,
  236. struct vring_used *used);
  237. int vringh_getdesc_iotlb(struct vringh *vrh,
  238. struct vringh_kiov *riov,
  239. struct vringh_kiov *wiov,
  240. u16 *head,
  241. gfp_t gfp);
  242. ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
  243. struct vringh_kiov *riov,
  244. void *dst, size_t len);
  245. ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
  246. struct vringh_kiov *wiov,
  247. const void *src, size_t len);
  248. void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num);
  249. int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len);
  250. bool vringh_notify_enable_iotlb(struct vringh *vrh);
  251. void vringh_notify_disable_iotlb(struct vringh *vrh);
  252. int vringh_need_notify_iotlb(struct vringh *vrh);
  253. #endif /* CONFIG_VHOST_IOTLB */
  254. #endif /* _LINUX_VRINGH_H */