qcom_sg_ops.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2011 Google, Inc.
  4. * Copyright (C) 2019, 2020 Linaro Ltd.
  5. *
  6. * Portions based off of Andrew Davis' SRAM heap:
  7. * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
  8. * Andrew F. Davis <[email protected]>
  9. *
  10. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  11. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  12. */
  13. #ifndef _QCOM_SG_OPS_H
  14. #define _QCOM_SG_OPS_H
  15. #include <linux/scatterlist.h>
  16. #include <linux/dma-heap.h>
  17. #include <linux/device.h>
  18. #include "deferred-free-helper.h"
  19. #include "qcom_dma_heap_priv.h"
  20. struct qcom_sg_buffer {
  21. struct dma_heap *heap;
  22. struct list_head attachments;
  23. struct mutex lock;
  24. unsigned long len;
  25. struct sg_table sg_table;
  26. int vmap_cnt;
  27. void *vaddr;
  28. bool uncached;
  29. struct mem_buf_vmperm *vmperm;
  30. struct deferred_freelist_item deferred_free;
  31. void (*free)(struct qcom_sg_buffer *buffer);
  32. };
  33. struct dma_heap_attachment {
  34. struct device *dev;
  35. struct sg_table *table;
  36. struct list_head list;
  37. bool mapped;
  38. };
  39. int qcom_sg_attach(struct dma_buf *dmabuf,
  40. struct dma_buf_attachment *attachment);
  41. void qcom_sg_detach(struct dma_buf *dmabuf,
  42. struct dma_buf_attachment *attachment);
  43. struct sg_table *qcom_sg_map_dma_buf(struct dma_buf_attachment *attachment,
  44. enum dma_data_direction direction);
  45. void qcom_sg_unmap_dma_buf(struct dma_buf_attachment *attachment,
  46. struct sg_table *table,
  47. enum dma_data_direction direction);
  48. int qcom_sg_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
  49. enum dma_data_direction direction);
  50. int qcom_sg_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
  51. enum dma_data_direction direction);
  52. int qcom_sg_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
  53. enum dma_data_direction dir,
  54. unsigned int offset,
  55. unsigned int len);
  56. int qcom_sg_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
  57. enum dma_data_direction direction,
  58. unsigned int offset,
  59. unsigned int len);
  60. int qcom_sg_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma);
  61. void *qcom_sg_do_vmap(struct qcom_sg_buffer *buffer);
  62. int qcom_sg_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
  63. void qcom_sg_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
  64. void qcom_sg_release(struct dma_buf *dmabuf);
  65. struct mem_buf_vmperm *qcom_sg_lookup_vmperm(struct dma_buf *dmabuf);
  66. extern struct mem_buf_dma_buf_ops qcom_sg_buf_ops;
  67. #endif /* _QCOM_SG_OPS_H */