ib_umem.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
  2. /*
  3. * Copyright (c) 2007 Cisco Systems. All rights reserved.
  4. * Copyright (c) 2020 Intel Corporation. All rights reserved.
  5. */
  6. #ifndef IB_UMEM_H
  7. #define IB_UMEM_H
  8. #include <linux/list.h>
  9. #include <linux/scatterlist.h>
  10. #include <linux/workqueue.h>
  11. #include <rdma/ib_verbs.h>
  12. struct ib_ucontext;
  13. struct ib_umem_odp;
  14. struct dma_buf_attach_ops;
  15. struct ib_umem {
  16. struct ib_device *ibdev;
  17. struct mm_struct *owning_mm;
  18. u64 iova;
  19. size_t length;
  20. unsigned long address;
  21. u32 writable : 1;
  22. u32 is_odp : 1;
  23. u32 is_dmabuf : 1;
  24. struct work_struct work;
  25. struct sg_append_table sgt_append;
  26. };
  27. struct ib_umem_dmabuf {
  28. struct ib_umem umem;
  29. struct dma_buf_attachment *attach;
  30. struct sg_table *sgt;
  31. struct scatterlist *first_sg;
  32. struct scatterlist *last_sg;
  33. unsigned long first_sg_offset;
  34. unsigned long last_sg_trim;
  35. void *private;
  36. u8 pinned : 1;
  37. };
  38. static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
  39. {
  40. return container_of(umem, struct ib_umem_dmabuf, umem);
  41. }
  42. /* Returns the offset of the umem start relative to the first page. */
  43. static inline int ib_umem_offset(struct ib_umem *umem)
  44. {
  45. return umem->address & ~PAGE_MASK;
  46. }
  47. static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
  48. unsigned long pgsz)
  49. {
  50. return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
  51. (pgsz - 1);
  52. }
  53. static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
  54. unsigned long pgsz)
  55. {
  56. return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
  57. ALIGN_DOWN(umem->iova, pgsz))) /
  58. pgsz;
  59. }
  60. static inline size_t ib_umem_num_pages(struct ib_umem *umem)
  61. {
  62. return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
  63. }
  64. static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
  65. struct ib_umem *umem,
  66. unsigned long pgsz)
  67. {
  68. __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
  69. umem->sgt_append.sgt.nents, pgsz);
  70. biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
  71. biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
  72. }
  73. static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
  74. {
  75. return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
  76. }
  77. /**
  78. * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
  79. * @umem: umem to iterate over
  80. * @pgsz: Page size to split the list into
  81. *
  82. * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
  83. * returned DMA blocks will be aligned to pgsz and span the range:
  84. * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
  85. *
  86. * Performs exactly ib_umem_num_dma_blocks() iterations.
  87. */
  88. #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
  89. for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
  90. __rdma_umem_block_iter_next(biter);)
  91. #ifdef CONFIG_INFINIBAND_USER_MEM
  92. struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
  93. size_t size, int access);
  94. void ib_umem_release(struct ib_umem *umem);
  95. int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
  96. size_t length);
  97. unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
  98. unsigned long pgsz_bitmap,
  99. unsigned long virt);
  100. /**
  101. * ib_umem_find_best_pgoff - Find best HW page size
  102. *
  103. * @umem: umem struct
  104. * @pgsz_bitmap bitmap of HW supported page sizes
  105. * @pgoff_bitmask: Mask of bits that can be represented with an offset
  106. *
  107. * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
  108. * an IOVA it accepts a bitmask specifying what address bits can be represented
  109. * with a page offset.
  110. *
  111. * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
  112. * and can support aligned offsets up to 4032 then pgoff_bitmask would be
  113. * "111111000000".
  114. *
  115. * If the pgoff_bitmask requires either alignment in the low bit or an
  116. * unavailable page size for the high bits, this function returns 0.
  117. */
  118. static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
  119. unsigned long pgsz_bitmap,
  120. u64 pgoff_bitmask)
  121. {
  122. struct scatterlist *sg = umem->sgt_append.sgt.sgl;
  123. dma_addr_t dma_addr;
  124. dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
  125. return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
  126. dma_addr & pgoff_bitmask);
  127. }
  128. struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
  129. unsigned long offset, size_t size,
  130. int fd, int access,
  131. const struct dma_buf_attach_ops *ops);
  132. struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
  133. unsigned long offset,
  134. size_t size, int fd,
  135. int access);
  136. int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
  137. void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
  138. void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
  139. #else /* CONFIG_INFINIBAND_USER_MEM */
  140. #include <linux/err.h>
  141. static inline struct ib_umem *ib_umem_get(struct ib_device *device,
  142. unsigned long addr, size_t size,
  143. int access)
  144. {
  145. return ERR_PTR(-EOPNOTSUPP);
  146. }
  147. static inline void ib_umem_release(struct ib_umem *umem) { }
  148. static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
  149. size_t length) {
  150. return -EOPNOTSUPP;
  151. }
  152. static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
  153. unsigned long pgsz_bitmap,
  154. unsigned long virt)
  155. {
  156. return 0;
  157. }
  158. static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
  159. unsigned long pgsz_bitmap,
  160. u64 pgoff_bitmask)
  161. {
  162. return 0;
  163. }
  164. static inline
  165. struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
  166. unsigned long offset,
  167. size_t size, int fd,
  168. int access,
  169. struct dma_buf_attach_ops *ops)
  170. {
  171. return ERR_PTR(-EOPNOTSUPP);
  172. }
  173. static inline struct ib_umem_dmabuf *
  174. ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
  175. size_t size, int fd, int access)
  176. {
  177. return ERR_PTR(-EOPNOTSUPP);
  178. }
  179. static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
  180. {
  181. return -EOPNOTSUPP;
  182. }
  183. static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
  184. static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
  185. #endif /* CONFIG_INFINIBAND_USER_MEM */
  186. #endif /* IB_UMEM_H */