swiotlb.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __LINUX_SWIOTLB_H
  3. #define __LINUX_SWIOTLB_H
  4. #include <linux/device.h>
  5. #include <linux/dma-direction.h>
  6. #include <linux/init.h>
  7. #include <linux/types.h>
  8. #include <linux/limits.h>
  9. #include <linux/spinlock.h>
  10. struct device;
  11. struct page;
  12. struct scatterlist;
  13. #define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */
  14. #define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */
  15. #define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */
  16. /*
  17. * Maximum allowable number of contiguous slabs to map,
  18. * must be a power of 2. What is the appropriate value ?
  19. * The complexity of {map,unmap}_single is linearly dependent on this value.
  20. */
  21. #define IO_TLB_SEGSIZE 128
  22. /*
  23. * log of the size of each IO TLB slab. The number of slabs is command line
  24. * controllable.
  25. */
  26. #define IO_TLB_SHIFT 11
  27. #define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
  28. /* default to 64MB */
  29. #define IO_TLB_DEFAULT_SIZE (64UL<<20)
  30. unsigned long swiotlb_size_or_default(void);
  31. void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
  32. int (*remap)(void *tlb, unsigned long nslabs));
  33. int swiotlb_init_late(size_t size, gfp_t gfp_mask,
  34. int (*remap)(void *tlb, unsigned long nslabs));
  35. extern void __init swiotlb_update_mem_attributes(void);
  36. phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
  37. size_t mapping_size, size_t alloc_size,
  38. unsigned int alloc_aligned_mask, enum dma_data_direction dir,
  39. unsigned long attrs);
  40. extern void swiotlb_tbl_unmap_single(struct device *hwdev,
  41. phys_addr_t tlb_addr,
  42. size_t mapping_size,
  43. enum dma_data_direction dir,
  44. unsigned long attrs);
  45. void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
  46. size_t size, enum dma_data_direction dir);
  47. void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
  48. size_t size, enum dma_data_direction dir);
  49. dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
  50. size_t size, enum dma_data_direction dir, unsigned long attrs);
  51. #ifdef CONFIG_SWIOTLB
  52. /**
  53. * struct io_tlb_mem - IO TLB Memory Pool Descriptor
  54. *
  55. * @start: The start address of the swiotlb memory pool. Used to do a quick
  56. * range check to see if the memory was in fact allocated by this
  57. * API.
  58. * @end: The end address of the swiotlb memory pool. Used to do a quick
  59. * range check to see if the memory was in fact allocated by this
  60. * API.
  61. * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool
  62. * may be remapped in the memory encrypted case and store virtual
  63. * address for bounce buffer operation.
  64. * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
  65. * @end. For default swiotlb, this is command line adjustable via
  66. * setup_io_tlb_npages.
  67. * @used: The number of used IO TLB block.
  68. * @list: The free list describing the number of free entries available
  69. * from each index.
  70. * @orig_addr: The original address corresponding to a mapped entry.
  71. * @alloc_size: Size of the allocated buffer.
  72. * @debugfs: The dentry to debugfs.
  73. * @late_alloc: %true if allocated using the page allocator
  74. * @force_bounce: %true if swiotlb bouncing is forced
  75. * @for_alloc: %true if the pool is used for memory allocation
  76. * @nareas: The area number in the pool.
  77. * @area_nslabs: The slot number in the area.
  78. */
  79. struct io_tlb_mem {
  80. phys_addr_t start;
  81. phys_addr_t end;
  82. void *vaddr;
  83. unsigned long nslabs;
  84. unsigned long used;
  85. struct dentry *debugfs;
  86. bool late_alloc;
  87. bool force_bounce;
  88. bool for_alloc;
  89. unsigned int nareas;
  90. unsigned int area_nslabs;
  91. struct io_tlb_area *areas;
  92. struct io_tlb_slot *slots;
  93. };
  94. extern struct io_tlb_mem io_tlb_default_mem;
  95. static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
  96. {
  97. struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
  98. return mem && paddr >= mem->start && paddr < mem->end;
  99. }
  100. static inline bool is_swiotlb_force_bounce(struct device *dev)
  101. {
  102. struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
  103. return mem && mem->force_bounce;
  104. }
  105. void swiotlb_init(bool addressing_limited, unsigned int flags);
  106. void __init swiotlb_exit(void);
  107. unsigned int swiotlb_max_segment(void);
  108. size_t swiotlb_max_mapping_size(struct device *dev);
  109. bool is_swiotlb_active(struct device *dev);
  110. void __init swiotlb_adjust_size(unsigned long size);
  111. #ifdef CONFIG_SWIOTLB_NONLINEAR
  112. int swiotlb_late_init_with_tblpaddr(char *tlb,
  113. phys_addr_t tlb_paddr, unsigned long nslabs);
  114. #else
  115. static inline int swiotlb_late_init_with_tblpaddr(char *tlb,
  116. phys_addr_t tlb_paddr, unsigned long nslabs)
  117. {
  118. return -EINVAL;
  119. }
  120. #endif
  121. #else
  122. static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
  123. {
  124. }
  125. static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
  126. {
  127. return false;
  128. }
  129. static inline bool is_swiotlb_force_bounce(struct device *dev)
  130. {
  131. return false;
  132. }
  133. static inline void swiotlb_exit(void)
  134. {
  135. }
  136. static inline unsigned int swiotlb_max_segment(void)
  137. {
  138. return 0;
  139. }
  140. static inline size_t swiotlb_max_mapping_size(struct device *dev)
  141. {
  142. return SIZE_MAX;
  143. }
  144. static inline bool is_swiotlb_active(struct device *dev)
  145. {
  146. return false;
  147. }
  148. static inline void swiotlb_adjust_size(unsigned long size)
  149. {
  150. }
  151. static inline int swiotlb_late_init_with_tblpaddr(char *tlb,
  152. phys_addr_t tlb_paddr, unsigned long nslabs)
  153. {
  154. return -EINVAL;
  155. }
  156. #endif /* CONFIG_SWIOTLB */
  157. extern void swiotlb_print_info(void);
  158. #ifdef CONFIG_DMA_RESTRICTED_POOL
  159. struct page *swiotlb_alloc(struct device *dev, size_t size);
  160. bool swiotlb_free(struct device *dev, struct page *page, size_t size);
  161. static inline bool is_swiotlb_for_alloc(struct device *dev)
  162. {
  163. return dev->dma_io_tlb_mem->for_alloc;
  164. }
  165. #else
  166. static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
  167. {
  168. return NULL;
  169. }
  170. static inline bool swiotlb_free(struct device *dev, struct page *page,
  171. size_t size)
  172. {
  173. return false;
  174. }
  175. static inline bool is_swiotlb_for_alloc(struct device *dev)
  176. {
  177. return false;
  178. }
  179. #endif /* CONFIG_DMA_RESTRICTED_POOL */
  180. extern phys_addr_t swiotlb_unencrypted_base;
  181. #endif /* __LINUX_SWIOTLB_H */