dma-ring.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright(c) 2016-2018 Intel Corporation. All rights reserved.
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/mei.h>
  7. #include "mei_dev.h"
  8. /**
  9. * mei_dmam_dscr_alloc() - allocate a managed coherent buffer
  10. * for the dma descriptor
  11. * @dev: mei_device
  12. * @dscr: dma descriptor
  13. *
  14. * Return:
  15. * * 0 - on success or zero allocation request
  16. * * -EINVAL - if size is not power of 2
  17. * * -ENOMEM - of allocation has failed
  18. */
  19. static int mei_dmam_dscr_alloc(struct mei_device *dev,
  20. struct mei_dma_dscr *dscr)
  21. {
  22. if (!dscr->size)
  23. return 0;
  24. if (WARN_ON(!is_power_of_2(dscr->size)))
  25. return -EINVAL;
  26. if (dscr->vaddr)
  27. return 0;
  28. dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr,
  29. GFP_KERNEL);
  30. if (!dscr->vaddr)
  31. return -ENOMEM;
  32. return 0;
  33. }
  34. /**
  35. * mei_dmam_dscr_free() - free a managed coherent buffer
  36. * from the dma descriptor
  37. * @dev: mei_device
  38. * @dscr: dma descriptor
  39. */
  40. static void mei_dmam_dscr_free(struct mei_device *dev,
  41. struct mei_dma_dscr *dscr)
  42. {
  43. if (!dscr->vaddr)
  44. return;
  45. dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr);
  46. dscr->vaddr = NULL;
  47. }
  48. /**
  49. * mei_dmam_ring_free() - free dma ring buffers
  50. * @dev: mei device
  51. */
  52. void mei_dmam_ring_free(struct mei_device *dev)
  53. {
  54. int i;
  55. for (i = 0; i < DMA_DSCR_NUM; i++)
  56. mei_dmam_dscr_free(dev, &dev->dr_dscr[i]);
  57. }
  58. /**
  59. * mei_dmam_ring_alloc() - allocate dma ring buffers
  60. * @dev: mei device
  61. *
  62. * Return: -ENOMEM on allocation failure 0 otherwise
  63. */
  64. int mei_dmam_ring_alloc(struct mei_device *dev)
  65. {
  66. int i;
  67. for (i = 0; i < DMA_DSCR_NUM; i++)
  68. if (mei_dmam_dscr_alloc(dev, &dev->dr_dscr[i]))
  69. goto err;
  70. return 0;
  71. err:
  72. mei_dmam_ring_free(dev);
  73. return -ENOMEM;
  74. }
  75. /**
  76. * mei_dma_ring_is_allocated() - check if dma ring is allocated
  77. * @dev: mei device
  78. *
  79. * Return: true if dma ring is allocated
  80. */
  81. bool mei_dma_ring_is_allocated(struct mei_device *dev)
  82. {
  83. return !!dev->dr_dscr[DMA_DSCR_HOST].vaddr;
  84. }
  85. static inline
  86. struct hbm_dma_ring_ctrl *mei_dma_ring_ctrl(struct mei_device *dev)
  87. {
  88. return (struct hbm_dma_ring_ctrl *)dev->dr_dscr[DMA_DSCR_CTRL].vaddr;
  89. }
  90. /**
  91. * mei_dma_ring_reset() - reset the dma control block
  92. * @dev: mei device
  93. */
  94. void mei_dma_ring_reset(struct mei_device *dev)
  95. {
  96. struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
  97. if (!ctrl)
  98. return;
  99. memset(ctrl, 0, sizeof(*ctrl));
  100. }
  101. /**
  102. * mei_dma_copy_from() - copy from dma ring into buffer
  103. * @dev: mei device
  104. * @buf: data buffer
  105. * @offset: offset in slots.
  106. * @n: number of slots to copy.
  107. */
  108. static size_t mei_dma_copy_from(struct mei_device *dev, unsigned char *buf,
  109. u32 offset, u32 n)
  110. {
  111. unsigned char *dbuf = dev->dr_dscr[DMA_DSCR_DEVICE].vaddr;
  112. size_t b_offset = offset << 2;
  113. size_t b_n = n << 2;
  114. memcpy(buf, dbuf + b_offset, b_n);
  115. return b_n;
  116. }
  117. /**
  118. * mei_dma_copy_to() - copy to a buffer to the dma ring
  119. * @dev: mei device
  120. * @buf: data buffer
  121. * @offset: offset in slots.
  122. * @n: number of slots to copy.
  123. */
  124. static size_t mei_dma_copy_to(struct mei_device *dev, unsigned char *buf,
  125. u32 offset, u32 n)
  126. {
  127. unsigned char *hbuf = dev->dr_dscr[DMA_DSCR_HOST].vaddr;
  128. size_t b_offset = offset << 2;
  129. size_t b_n = n << 2;
  130. memcpy(hbuf + b_offset, buf, b_n);
  131. return b_n;
  132. }
  133. /**
  134. * mei_dma_ring_read() - read data from the ring
  135. * @dev: mei device
  136. * @buf: buffer to read into: may be NULL in case of droping the data.
  137. * @len: length to read.
  138. */
  139. void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len)
  140. {
  141. struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
  142. u32 dbuf_depth;
  143. u32 rd_idx, rem, slots;
  144. if (WARN_ON(!ctrl))
  145. return;
  146. dev_dbg(dev->dev, "reading from dma %u bytes\n", len);
  147. if (!len)
  148. return;
  149. dbuf_depth = dev->dr_dscr[DMA_DSCR_DEVICE].size >> 2;
  150. rd_idx = READ_ONCE(ctrl->dbuf_rd_idx) & (dbuf_depth - 1);
  151. slots = mei_data2slots(len);
  152. /* if buf is NULL we drop the packet by advancing the pointer.*/
  153. if (!buf)
  154. goto out;
  155. if (rd_idx + slots > dbuf_depth) {
  156. buf += mei_dma_copy_from(dev, buf, rd_idx, dbuf_depth - rd_idx);
  157. rem = slots - (dbuf_depth - rd_idx);
  158. rd_idx = 0;
  159. } else {
  160. rem = slots;
  161. }
  162. mei_dma_copy_from(dev, buf, rd_idx, rem);
  163. out:
  164. WRITE_ONCE(ctrl->dbuf_rd_idx, ctrl->dbuf_rd_idx + slots);
  165. }
  166. static inline u32 mei_dma_ring_hbuf_depth(struct mei_device *dev)
  167. {
  168. return dev->dr_dscr[DMA_DSCR_HOST].size >> 2;
  169. }
  170. /**
  171. * mei_dma_ring_empty_slots() - calaculate number of empty slots in dma ring
  172. * @dev: mei_device
  173. *
  174. * Return: number of empty slots
  175. */
  176. u32 mei_dma_ring_empty_slots(struct mei_device *dev)
  177. {
  178. struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
  179. u32 wr_idx, rd_idx, hbuf_depth, empty;
  180. if (!mei_dma_ring_is_allocated(dev))
  181. return 0;
  182. if (WARN_ON(!ctrl))
  183. return 0;
  184. /* easier to work in slots */
  185. hbuf_depth = mei_dma_ring_hbuf_depth(dev);
  186. rd_idx = READ_ONCE(ctrl->hbuf_rd_idx);
  187. wr_idx = READ_ONCE(ctrl->hbuf_wr_idx);
  188. if (rd_idx > wr_idx)
  189. empty = rd_idx - wr_idx;
  190. else
  191. empty = hbuf_depth - (wr_idx - rd_idx);
  192. return empty;
  193. }
  194. /**
  195. * mei_dma_ring_write - write data to dma ring host buffer
  196. *
  197. * @dev: mei_device
  198. * @buf: data will be written
  199. * @len: data length
  200. */
  201. void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len)
  202. {
  203. struct hbm_dma_ring_ctrl *ctrl = mei_dma_ring_ctrl(dev);
  204. u32 hbuf_depth;
  205. u32 wr_idx, rem, slots;
  206. if (WARN_ON(!ctrl))
  207. return;
  208. dev_dbg(dev->dev, "writing to dma %u bytes\n", len);
  209. hbuf_depth = mei_dma_ring_hbuf_depth(dev);
  210. wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1);
  211. slots = mei_data2slots(len);
  212. if (wr_idx + slots > hbuf_depth) {
  213. buf += mei_dma_copy_to(dev, buf, wr_idx, hbuf_depth - wr_idx);
  214. rem = slots - (hbuf_depth - wr_idx);
  215. wr_idx = 0;
  216. } else {
  217. rem = slots;
  218. }
  219. mei_dma_copy_to(dev, buf, wr_idx, rem);
  220. WRITE_ONCE(ctrl->hbuf_wr_idx, ctrl->hbuf_wr_idx + slots);
  221. }