rxe_queue.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
  2. /*
  3. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  4. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  5. */
  6. #ifndef RXE_QUEUE_H
  7. #define RXE_QUEUE_H
  8. /* Implements a simple circular buffer that is shared between user
  9. * and the driver and can be resized. The requested element size is
  10. * rounded up to a power of 2 and the number of elements in the buffer
  11. * is also rounded up to a power of 2. Since the queue is empty when
  12. * the producer and consumer indices match the maximum capacity of the
  13. * queue is one less than the number of element slots.
  14. *
  15. * Notes:
  16. * - The driver indices are always masked off to q->index_mask
  17. * before storing so do not need to be checked on reads.
  18. * - The user whether user space or kernel is generally
  19. * not trusted so its parameters are masked to make sure
  20. * they do not access the queue out of bounds on reads.
  21. * - The driver indices for queues must not be written
  22. * by user so a local copy is used and a shared copy is
  23. * stored when the local copy is changed.
  24. * - By passing the type in the parameter list separate from q
  25. * the compiler can eliminate the switch statement when the
  26. * actual queue type is known when the function is called at
  27. * compile time.
  28. * - These queues are lock free. The user and driver must protect
  29. * changes to their end of the queues with locks if more than one
  30. * CPU can be accessing it at the same time.
  31. */
  32. /**
  33. * enum queue_type - type of queue
  34. * @QUEUE_TYPE_TO_CLIENT: Queue is written by rxe driver and
  35. * read by client which may be a user space
  36. * application or a kernel ulp.
  37. * Used by rxe internals only.
  38. * @QUEUE_TYPE_FROM_CLIENT: Queue is written by client and
  39. * read by rxe driver.
  40. * Used by rxe internals only.
  41. * @QUEUE_TYPE_FROM_ULP: Queue is written by kernel ulp and
  42. * read by rxe driver.
  43. * Used by kernel verbs APIs only on
  44. * behalf of ulps.
  45. * @QUEUE_TYPE_TO_ULP: Queue is written by rxe driver and
  46. * read by kernel ulp.
  47. * Used by kernel verbs APIs only on
  48. * behalf of ulps.
  49. */
  50. enum queue_type {
  51. QUEUE_TYPE_TO_CLIENT,
  52. QUEUE_TYPE_FROM_CLIENT,
  53. QUEUE_TYPE_FROM_ULP,
  54. QUEUE_TYPE_TO_ULP,
  55. };
  56. struct rxe_queue_buf;
  57. struct rxe_queue {
  58. struct rxe_dev *rxe;
  59. struct rxe_queue_buf *buf;
  60. struct rxe_mmap_info *ip;
  61. size_t buf_size;
  62. size_t elem_size;
  63. unsigned int log2_elem_size;
  64. u32 index_mask;
  65. enum queue_type type;
  66. /* private copy of index for shared queues between
  67. * driver and clients. Driver reads and writes
  68. * this copy and then replicates to rxe_queue_buf
  69. * for read access by clients.
  70. */
  71. u32 index;
  72. };
  73. int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
  74. struct ib_udata *udata, struct rxe_queue_buf *buf,
  75. size_t buf_size, struct rxe_mmap_info **ip_p);
  76. void rxe_queue_reset(struct rxe_queue *q);
  77. struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
  78. unsigned int elem_size, enum queue_type type);
  79. int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
  80. unsigned int elem_size, struct ib_udata *udata,
  81. struct mminfo __user *outbuf,
  82. spinlock_t *producer_lock, spinlock_t *consumer_lock);
  83. void rxe_queue_cleanup(struct rxe_queue *queue);
  84. static inline u32 queue_next_index(struct rxe_queue *q, int index)
  85. {
  86. return (index + 1) & q->index_mask;
  87. }
  88. static inline u32 queue_get_producer(const struct rxe_queue *q,
  89. enum queue_type type)
  90. {
  91. u32 prod;
  92. switch (type) {
  93. case QUEUE_TYPE_FROM_CLIENT:
  94. /* used by rxe, client owns the index */
  95. prod = smp_load_acquire(&q->buf->producer_index);
  96. break;
  97. case QUEUE_TYPE_TO_CLIENT:
  98. /* used by rxe which owns the index */
  99. prod = q->index;
  100. break;
  101. case QUEUE_TYPE_FROM_ULP:
  102. /* used by ulp which owns the index */
  103. prod = q->buf->producer_index;
  104. break;
  105. case QUEUE_TYPE_TO_ULP:
  106. /* used by ulp, rxe owns the index */
  107. prod = smp_load_acquire(&q->buf->producer_index);
  108. break;
  109. }
  110. return prod;
  111. }
  112. static inline u32 queue_get_consumer(const struct rxe_queue *q,
  113. enum queue_type type)
  114. {
  115. u32 cons;
  116. switch (type) {
  117. case QUEUE_TYPE_FROM_CLIENT:
  118. /* used by rxe which owns the index */
  119. cons = q->index;
  120. break;
  121. case QUEUE_TYPE_TO_CLIENT:
  122. /* used by rxe, client owns the index */
  123. cons = smp_load_acquire(&q->buf->consumer_index);
  124. break;
  125. case QUEUE_TYPE_FROM_ULP:
  126. /* used by ulp, rxe owns the index */
  127. cons = smp_load_acquire(&q->buf->consumer_index);
  128. break;
  129. case QUEUE_TYPE_TO_ULP:
  130. /* used by ulp which owns the index */
  131. cons = q->buf->consumer_index;
  132. break;
  133. }
  134. return cons;
  135. }
  136. static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
  137. {
  138. u32 prod = queue_get_producer(q, type);
  139. u32 cons = queue_get_consumer(q, type);
  140. return ((prod - cons) & q->index_mask) == 0;
  141. }
  142. static inline int queue_full(struct rxe_queue *q, enum queue_type type)
  143. {
  144. u32 prod = queue_get_producer(q, type);
  145. u32 cons = queue_get_consumer(q, type);
  146. return ((prod + 1 - cons) & q->index_mask) == 0;
  147. }
  148. static inline u32 queue_count(const struct rxe_queue *q,
  149. enum queue_type type)
  150. {
  151. u32 prod = queue_get_producer(q, type);
  152. u32 cons = queue_get_consumer(q, type);
  153. return (prod - cons) & q->index_mask;
  154. }
  155. static inline void queue_advance_producer(struct rxe_queue *q,
  156. enum queue_type type)
  157. {
  158. u32 prod;
  159. switch (type) {
  160. case QUEUE_TYPE_FROM_CLIENT:
  161. /* used by rxe, client owns the index */
  162. if (WARN_ON(1))
  163. pr_warn("%s: attempt to advance client index\n",
  164. __func__);
  165. break;
  166. case QUEUE_TYPE_TO_CLIENT:
  167. /* used by rxe which owns the index */
  168. prod = q->index;
  169. prod = (prod + 1) & q->index_mask;
  170. q->index = prod;
  171. /* release so client can read it safely */
  172. smp_store_release(&q->buf->producer_index, prod);
  173. break;
  174. case QUEUE_TYPE_FROM_ULP:
  175. /* used by ulp which owns the index */
  176. prod = q->buf->producer_index;
  177. prod = (prod + 1) & q->index_mask;
  178. /* release so rxe can read it safely */
  179. smp_store_release(&q->buf->producer_index, prod);
  180. break;
  181. case QUEUE_TYPE_TO_ULP:
  182. /* used by ulp, rxe owns the index */
  183. if (WARN_ON(1))
  184. pr_warn("%s: attempt to advance driver index\n",
  185. __func__);
  186. break;
  187. }
  188. }
  189. static inline void queue_advance_consumer(struct rxe_queue *q,
  190. enum queue_type type)
  191. {
  192. u32 cons;
  193. switch (type) {
  194. case QUEUE_TYPE_FROM_CLIENT:
  195. /* used by rxe which owns the index */
  196. cons = (q->index + 1) & q->index_mask;
  197. q->index = cons;
  198. /* release so client can read it safely */
  199. smp_store_release(&q->buf->consumer_index, cons);
  200. break;
  201. case QUEUE_TYPE_TO_CLIENT:
  202. /* used by rxe, client owns the index */
  203. if (WARN_ON(1))
  204. pr_warn("%s: attempt to advance client index\n",
  205. __func__);
  206. break;
  207. case QUEUE_TYPE_FROM_ULP:
  208. /* used by ulp, rxe owns the index */
  209. if (WARN_ON(1))
  210. pr_warn("%s: attempt to advance driver index\n",
  211. __func__);
  212. break;
  213. case QUEUE_TYPE_TO_ULP:
  214. /* used by ulp which owns the index */
  215. cons = q->buf->consumer_index;
  216. cons = (cons + 1) & q->index_mask;
  217. /* release so rxe can read it safely */
  218. smp_store_release(&q->buf->consumer_index, cons);
  219. break;
  220. }
  221. }
  222. static inline void *queue_producer_addr(struct rxe_queue *q,
  223. enum queue_type type)
  224. {
  225. u32 prod = queue_get_producer(q, type);
  226. return q->buf->data + (prod << q->log2_elem_size);
  227. }
  228. static inline void *queue_consumer_addr(struct rxe_queue *q,
  229. enum queue_type type)
  230. {
  231. u32 cons = queue_get_consumer(q, type);
  232. return q->buf->data + (cons << q->log2_elem_size);
  233. }
  234. static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index)
  235. {
  236. return q->buf->data + ((index & q->index_mask)
  237. << q->log2_elem_size);
  238. }
  239. static inline u32 queue_index_from_addr(const struct rxe_queue *q,
  240. const void *addr)
  241. {
  242. return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
  243. & q->index_mask;
  244. }
  245. static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
  246. {
  247. return queue_empty(q, type) ? NULL : queue_consumer_addr(q, type);
  248. }
  249. #endif /* RXE_QUEUE_H */