ivtv-udma.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. User DMA
  4. Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
  5. Copyright (C) 2004 Chris Kennedy <[email protected]>
  6. Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
  7. */
  8. #include "ivtv-driver.h"
  9. #include "ivtv-udma.h"
  10. void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size)
  11. {
  12. dma_page->uaddr = first & PAGE_MASK;
  13. dma_page->offset = first & ~PAGE_MASK;
  14. dma_page->tail = 1 + ((first+size-1) & ~PAGE_MASK);
  15. dma_page->first = (first & PAGE_MASK) >> PAGE_SHIFT;
  16. dma_page->last = ((first+size-1) & PAGE_MASK) >> PAGE_SHIFT;
  17. dma_page->page_count = dma_page->last - dma_page->first + 1;
  18. if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset;
  19. }
  20. int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
  21. {
  22. int i, offset;
  23. unsigned long flags;
  24. if (map_offset < 0)
  25. return map_offset;
  26. offset = dma_page->offset;
  27. /* Fill SG Array with new values */
  28. for (i = 0; i < dma_page->page_count; i++) {
  29. unsigned int len = (i == dma_page->page_count - 1) ?
  30. dma_page->tail : PAGE_SIZE - offset;
  31. if (PageHighMem(dma->map[map_offset])) {
  32. void *src;
  33. if (dma->bouncemap[map_offset] == NULL)
  34. dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
  35. if (dma->bouncemap[map_offset] == NULL)
  36. return -1;
  37. local_irq_save(flags);
  38. src = kmap_atomic(dma->map[map_offset]) + offset;
  39. memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
  40. kunmap_atomic(src);
  41. local_irq_restore(flags);
  42. sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
  43. }
  44. else {
  45. sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
  46. }
  47. offset = 0;
  48. map_offset++;
  49. }
  50. return map_offset;
  51. }
  52. void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
  53. int i;
  54. struct scatterlist *sg;
  55. for_each_sg(dma->SGlist, sg, dma->SG_length, i) {
  56. dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
  57. dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
  58. dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
  59. buffer_offset += sg_dma_len(sg);
  60. split -= sg_dma_len(sg);
  61. if (split == 0)
  62. buffer_offset = buffer_offset_2;
  63. }
  64. }
  65. /* User DMA Buffers */
  66. void ivtv_udma_alloc(struct ivtv *itv)
  67. {
  68. if (itv->udma.SG_handle == 0) {
  69. /* Map DMA Page Array Buffer */
  70. itv->udma.SG_handle = dma_map_single(&itv->pdev->dev,
  71. itv->udma.SGarray,
  72. sizeof(itv->udma.SGarray),
  73. DMA_TO_DEVICE);
  74. ivtv_udma_sync_for_cpu(itv);
  75. }
  76. }
  77. int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
  78. void __user *userbuf, int size_in_bytes)
  79. {
  80. struct ivtv_dma_page_info user_dma;
  81. struct ivtv_user_dma *dma = &itv->udma;
  82. int err;
  83. IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
  84. /* Still in USE */
  85. if (dma->SG_length || dma->page_count) {
  86. IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
  87. dma->SG_length, dma->page_count);
  88. return -EBUSY;
  89. }
  90. ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);
  91. if (user_dma.page_count <= 0) {
  92. IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
  93. user_dma.page_count, size_in_bytes, user_dma.offset);
  94. return -EINVAL;
  95. }
  96. /* Pin user pages for DMA Xfer */
  97. err = pin_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
  98. dma->map, FOLL_FORCE);
  99. if (user_dma.page_count != err) {
  100. IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
  101. err, user_dma.page_count);
  102. if (err >= 0) {
  103. unpin_user_pages(dma->map, err);
  104. return -EINVAL;
  105. }
  106. return err;
  107. }
  108. dma->page_count = user_dma.page_count;
  109. /* Fill SG List with new values */
  110. if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
  111. unpin_user_pages(dma->map, dma->page_count);
  112. dma->page_count = 0;
  113. return -ENOMEM;
  114. }
  115. /* Map SG List */
  116. dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
  117. dma->page_count, DMA_TO_DEVICE);
  118. /* Fill SG Array with new values */
  119. ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
  120. /* Tag SG Array with Interrupt Bit */
  121. dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
  122. ivtv_udma_sync_for_device(itv);
  123. return dma->page_count;
  124. }
  125. void ivtv_udma_unmap(struct ivtv *itv)
  126. {
  127. struct ivtv_user_dma *dma = &itv->udma;
  128. IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
  129. /* Nothing to free */
  130. if (dma->page_count == 0)
  131. return;
  132. /* Unmap Scatterlist */
  133. if (dma->SG_length) {
  134. dma_unmap_sg(&itv->pdev->dev, dma->SGlist, dma->page_count,
  135. DMA_TO_DEVICE);
  136. dma->SG_length = 0;
  137. }
  138. /* sync DMA */
  139. ivtv_udma_sync_for_cpu(itv);
  140. unpin_user_pages(dma->map, dma->page_count);
  141. dma->page_count = 0;
  142. }
  143. void ivtv_udma_free(struct ivtv *itv)
  144. {
  145. int i;
  146. /* Unmap SG Array */
  147. if (itv->udma.SG_handle) {
  148. dma_unmap_single(&itv->pdev->dev, itv->udma.SG_handle,
  149. sizeof(itv->udma.SGarray), DMA_TO_DEVICE);
  150. }
  151. /* Unmap Scatterlist */
  152. if (itv->udma.SG_length) {
  153. dma_unmap_sg(&itv->pdev->dev, itv->udma.SGlist,
  154. itv->udma.page_count, DMA_TO_DEVICE);
  155. }
  156. for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
  157. if (itv->udma.bouncemap[i])
  158. __free_page(itv->udma.bouncemap[i]);
  159. }
  160. }
  161. void ivtv_udma_start(struct ivtv *itv)
  162. {
  163. IVTV_DEBUG_DMA("start UDMA\n");
  164. write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR);
  165. write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
  166. set_bit(IVTV_F_I_DMA, &itv->i_flags);
  167. set_bit(IVTV_F_I_UDMA, &itv->i_flags);
  168. clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
  169. }
  170. void ivtv_udma_prepare(struct ivtv *itv)
  171. {
  172. unsigned long flags;
  173. spin_lock_irqsave(&itv->dma_reg_lock, flags);
  174. if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
  175. ivtv_udma_start(itv);
  176. else
  177. set_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
  178. spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
  179. }