async_memcpy.c 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * copy offload engine support
  4. *
  5. * Copyright © 2006, Intel Corporation.
  6. *
  7. * Dan Williams <[email protected]>
  8. *
  9. * with architecture considerations by:
  10. * Neil Brown <[email protected]>
  11. * Jeff Garzik <[email protected]>
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/highmem.h>
  15. #include <linux/module.h>
  16. #include <linux/mm.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/async_tx.h>
  19. /**
  20. * async_memcpy - attempt to copy memory with a dma engine.
  21. * @dest: destination page
  22. * @src: src page
  23. * @dest_offset: offset into 'dest' to start transaction
  24. * @src_offset: offset into 'src' to start transaction
  25. * @len: length in bytes
  26. * @submit: submission / completion modifiers
  27. *
  28. * honored flags: ASYNC_TX_ACK
  29. */
  30. struct dma_async_tx_descriptor *
  31. async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
  32. unsigned int src_offset, size_t len,
  33. struct async_submit_ctl *submit)
  34. {
  35. struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY,
  36. &dest, 1, &src, 1, len);
  37. struct dma_device *device = chan ? chan->device : NULL;
  38. struct dma_async_tx_descriptor *tx = NULL;
  39. struct dmaengine_unmap_data *unmap = NULL;
  40. if (device)
  41. unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
  42. if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
  43. unsigned long dma_prep_flags = 0;
  44. if (submit->cb_fn)
  45. dma_prep_flags |= DMA_PREP_INTERRUPT;
  46. if (submit->flags & ASYNC_TX_FENCE)
  47. dma_prep_flags |= DMA_PREP_FENCE;
  48. unmap->to_cnt = 1;
  49. unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len,
  50. DMA_TO_DEVICE);
  51. unmap->from_cnt = 1;
  52. unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len,
  53. DMA_FROM_DEVICE);
  54. unmap->len = len;
  55. tx = device->device_prep_dma_memcpy(chan, unmap->addr[1],
  56. unmap->addr[0], len,
  57. dma_prep_flags);
  58. }
  59. if (tx) {
  60. pr_debug("%s: (async) len: %zu\n", __func__, len);
  61. dma_set_unmap(tx, unmap);
  62. async_tx_submit(chan, tx, submit);
  63. } else {
  64. void *dest_buf, *src_buf;
  65. pr_debug("%s: (sync) len: %zu\n", __func__, len);
  66. /* wait for any prerequisite operations */
  67. async_tx_quiesce(&submit->depend_tx);
  68. dest_buf = kmap_atomic(dest) + dest_offset;
  69. src_buf = kmap_atomic(src) + src_offset;
  70. memcpy(dest_buf, src_buf, len);
  71. kunmap_atomic(src_buf);
  72. kunmap_atomic(dest_buf);
  73. async_tx_sync_epilog(submit);
  74. }
  75. dmaengine_unmap_put(unmap);
  76. return tx;
  77. }
  78. EXPORT_SYMBOL_GPL(async_memcpy);
  79. MODULE_AUTHOR("Intel Corporation");
  80. MODULE_DESCRIPTION("asynchronous memcpy api");
  81. MODULE_LICENSE("GPL");