bcm_vk_sg.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2018-2020 Broadcom.
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include <linux/mm.h>
  7. #include <linux/pagemap.h>
  8. #include <linux/pgtable.h>
  9. #include <linux/vmalloc.h>
  10. #include <asm/page.h>
  11. #include <asm/unaligned.h>
  12. #include <uapi/linux/misc/bcm_vk.h>
  13. #include "bcm_vk.h"
  14. #include "bcm_vk_msg.h"
  15. #include "bcm_vk_sg.h"
  16. /*
  17. * Valkyrie has a hardware limitation of 16M transfer size.
  18. * So limit the SGL chunks to 16M.
  19. */
  20. #define BCM_VK_MAX_SGL_CHUNK SZ_16M
  21. static int bcm_vk_dma_alloc(struct device *dev,
  22. struct bcm_vk_dma *dma,
  23. int dir,
  24. struct _vk_data *vkdata);
  25. static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma);
  26. /* Uncomment to dump SGLIST */
  27. /* #define BCM_VK_DUMP_SGLIST */
  28. static int bcm_vk_dma_alloc(struct device *dev,
  29. struct bcm_vk_dma *dma,
  30. int direction,
  31. struct _vk_data *vkdata)
  32. {
  33. dma_addr_t addr, sg_addr;
  34. int err;
  35. int i;
  36. int offset;
  37. u32 size;
  38. u32 remaining_size;
  39. u32 transfer_size;
  40. u64 data;
  41. unsigned long first, last;
  42. struct _vk_data *sgdata;
  43. /* Get 64-bit user address */
  44. data = get_unaligned(&vkdata->address);
  45. /* offset into first page */
  46. offset = offset_in_page(data);
  47. /* Calculate number of pages */
  48. first = (data & PAGE_MASK) >> PAGE_SHIFT;
  49. last = ((data + vkdata->size - 1) & PAGE_MASK) >> PAGE_SHIFT;
  50. dma->nr_pages = last - first + 1;
  51. /* Allocate DMA pages */
  52. dma->pages = kmalloc_array(dma->nr_pages,
  53. sizeof(struct page *),
  54. GFP_KERNEL);
  55. if (!dma->pages)
  56. return -ENOMEM;
  57. dev_dbg(dev, "Alloc DMA Pages [0x%llx+0x%x => %d pages]\n",
  58. data, vkdata->size, dma->nr_pages);
  59. dma->direction = direction;
  60. /* Get user pages into memory */
  61. err = get_user_pages_fast(data & PAGE_MASK,
  62. dma->nr_pages,
  63. direction == DMA_FROM_DEVICE,
  64. dma->pages);
  65. if (err != dma->nr_pages) {
  66. dma->nr_pages = (err >= 0) ? err : 0;
  67. dev_err(dev, "get_user_pages_fast, err=%d [%d]\n",
  68. err, dma->nr_pages);
  69. return err < 0 ? err : -EINVAL;
  70. }
  71. /* Max size of sg list is 1 per mapped page + fields at start */
  72. dma->sglen = (dma->nr_pages * sizeof(*sgdata)) +
  73. (sizeof(u32) * SGLIST_VKDATA_START);
  74. /* Allocate sglist */
  75. dma->sglist = dma_alloc_coherent(dev,
  76. dma->sglen,
  77. &dma->handle,
  78. GFP_KERNEL);
  79. if (!dma->sglist)
  80. return -ENOMEM;
  81. dma->sglist[SGLIST_NUM_SG] = 0;
  82. dma->sglist[SGLIST_TOTALSIZE] = vkdata->size;
  83. remaining_size = vkdata->size;
  84. sgdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START];
  85. /* Map all pages into DMA */
  86. size = min_t(size_t, PAGE_SIZE - offset, remaining_size);
  87. remaining_size -= size;
  88. sg_addr = dma_map_page(dev,
  89. dma->pages[0],
  90. offset,
  91. size,
  92. dma->direction);
  93. transfer_size = size;
  94. if (unlikely(dma_mapping_error(dev, sg_addr))) {
  95. __free_page(dma->pages[0]);
  96. return -EIO;
  97. }
  98. for (i = 1; i < dma->nr_pages; i++) {
  99. size = min_t(size_t, PAGE_SIZE, remaining_size);
  100. remaining_size -= size;
  101. addr = dma_map_page(dev,
  102. dma->pages[i],
  103. 0,
  104. size,
  105. dma->direction);
  106. if (unlikely(dma_mapping_error(dev, addr))) {
  107. __free_page(dma->pages[i]);
  108. return -EIO;
  109. }
  110. /*
  111. * Compress SG list entry when pages are contiguous
  112. * and transfer size less or equal to BCM_VK_MAX_SGL_CHUNK
  113. */
  114. if ((addr == (sg_addr + transfer_size)) &&
  115. ((transfer_size + size) <= BCM_VK_MAX_SGL_CHUNK)) {
  116. /* pages are contiguous, add to same sg entry */
  117. transfer_size += size;
  118. } else {
  119. /* pages are not contiguous, write sg entry */
  120. sgdata->size = transfer_size;
  121. put_unaligned(sg_addr, (u64 *)&sgdata->address);
  122. dma->sglist[SGLIST_NUM_SG]++;
  123. /* start new sg entry */
  124. sgdata++;
  125. sg_addr = addr;
  126. transfer_size = size;
  127. }
  128. }
  129. /* Write last sg list entry */
  130. sgdata->size = transfer_size;
  131. put_unaligned(sg_addr, (u64 *)&sgdata->address);
  132. dma->sglist[SGLIST_NUM_SG]++;
  133. /* Update pointers and size field to point to sglist */
  134. put_unaligned((u64)dma->handle, &vkdata->address);
  135. vkdata->size = (dma->sglist[SGLIST_NUM_SG] * sizeof(*sgdata)) +
  136. (sizeof(u32) * SGLIST_VKDATA_START);
  137. #ifdef BCM_VK_DUMP_SGLIST
  138. dev_dbg(dev,
  139. "sgl 0x%llx handle 0x%llx, sglen: 0x%x sgsize: 0x%x\n",
  140. (u64)dma->sglist,
  141. dma->handle,
  142. dma->sglen,
  143. vkdata->size);
  144. for (i = 0; i < vkdata->size / sizeof(u32); i++)
  145. dev_dbg(dev, "i:0x%x 0x%x\n", i, dma->sglist[i]);
  146. #endif
  147. return 0;
  148. }
  149. int bcm_vk_sg_alloc(struct device *dev,
  150. struct bcm_vk_dma *dma,
  151. int dir,
  152. struct _vk_data *vkdata,
  153. int num)
  154. {
  155. int i;
  156. int rc = -EINVAL;
  157. /* Convert user addresses to DMA SG List */
  158. for (i = 0; i < num; i++) {
  159. if (vkdata[i].size && vkdata[i].address) {
  160. /*
  161. * If both size and address are non-zero
  162. * then DMA alloc.
  163. */
  164. rc = bcm_vk_dma_alloc(dev,
  165. &dma[i],
  166. dir,
  167. &vkdata[i]);
  168. } else if (vkdata[i].size ||
  169. vkdata[i].address) {
  170. /*
  171. * If one of size and address are zero
  172. * there is a problem.
  173. */
  174. dev_err(dev,
  175. "Invalid vkdata %x 0x%x 0x%llx\n",
  176. i, vkdata[i].size, vkdata[i].address);
  177. rc = -EINVAL;
  178. } else {
  179. /*
  180. * If size and address are both zero
  181. * don't convert, but return success.
  182. */
  183. rc = 0;
  184. }
  185. if (rc)
  186. goto fail_alloc;
  187. }
  188. return rc;
  189. fail_alloc:
  190. while (i > 0) {
  191. i--;
  192. if (dma[i].sglist)
  193. bcm_vk_dma_free(dev, &dma[i]);
  194. }
  195. return rc;
  196. }
  197. static int bcm_vk_dma_free(struct device *dev, struct bcm_vk_dma *dma)
  198. {
  199. dma_addr_t addr;
  200. int i;
  201. int num_sg;
  202. u32 size;
  203. struct _vk_data *vkdata;
  204. dev_dbg(dev, "free sglist=%p sglen=0x%x\n", dma->sglist, dma->sglen);
  205. /* Unmap all pages in the sglist */
  206. num_sg = dma->sglist[SGLIST_NUM_SG];
  207. vkdata = (struct _vk_data *)&dma->sglist[SGLIST_VKDATA_START];
  208. for (i = 0; i < num_sg; i++) {
  209. size = vkdata[i].size;
  210. addr = get_unaligned(&vkdata[i].address);
  211. dma_unmap_page(dev, addr, size, dma->direction);
  212. }
  213. /* Free allocated sglist */
  214. dma_free_coherent(dev, dma->sglen, dma->sglist, dma->handle);
  215. /* Release lock on all pages */
  216. for (i = 0; i < dma->nr_pages; i++)
  217. put_page(dma->pages[i]);
  218. /* Free allocated dma pages */
  219. kfree(dma->pages);
  220. dma->sglist = NULL;
  221. return 0;
  222. }
  223. int bcm_vk_sg_free(struct device *dev, struct bcm_vk_dma *dma, int num,
  224. int *proc_cnt)
  225. {
  226. int i;
  227. *proc_cnt = 0;
  228. /* Unmap and free all pages and sglists */
  229. for (i = 0; i < num; i++) {
  230. if (dma[i].sglist) {
  231. bcm_vk_dma_free(dev, &dma[i]);
  232. *proc_cnt += 1;
  233. }
  234. }
  235. return 0;
  236. }