dma-mapping.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_DMA_MAPPING_H
  3. #define _LINUX_DMA_MAPPING_H
  4. #include <linux/sizes.h>
  5. #include <linux/string.h>
  6. #include <linux/device.h>
  7. #include <linux/err.h>
  8. #include <linux/dma-direction.h>
  9. #include <linux/scatterlist.h>
  10. #include <linux/bug.h>
  11. #include <linux/mem_encrypt.h>
  12. /**
  13. * List of possible attributes associated with a DMA mapping. The semantics
  14. * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
  15. */
  16. /*
  17. * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
  18. * may be weakly ordered, that is that reads and writes may pass each other.
  19. */
  20. #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
  21. /*
  22. * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
  23. * buffered to improve performance.
  24. */
  25. #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
  26. /*
  27. * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
  28. * virtual mapping for the allocated buffer.
  29. */
  30. #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
  31. /*
  32. * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
  33. * the CPU cache for the given buffer assuming that it has been already
  34. * transferred to 'device' domain.
  35. */
  36. #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
  37. /*
  38. * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
  39. * in physical memory.
  40. */
  41. #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
  42. /*
  43. * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
  44. * that it's probably not worth the time to try to allocate memory to in a way
  45. * that gives better TLB efficiency.
  46. */
  47. #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
  48. /*
  49. * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
  50. * allocation failure reports (similarly to __GFP_NOWARN).
  51. */
  52. #define DMA_ATTR_NO_WARN (1UL << 8)
  53. /*
  54. * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
  55. * accessible at an elevated privilege level (and ideally inaccessible or
  56. * at least read-only at lesser-privileged levels).
  57. */
  58. #define DMA_ATTR_PRIVILEGED (1UL << 9)
  59. /*
  60. * DMA_ATTR_SYS_CACHE: used to indicate that the buffer should be mapped with
  61. * the correct memory attributes so that it can be cached in the system or last
  62. * level cache. This is useful for buffers that are being mapped for devices
  63. * that are non-coherent, but can use the system cache.
  64. */
  65. #define DMA_ATTR_SYS_CACHE (1UL << 10)
  66. /*
  67. * DMA_ATTR_SYS_CACHE_NWA: used to indicate that the buffer should be mapped
  68. * with the correct memory attributes so that it can be cached in the system or
  69. * last level cache, with a no write allocate cache policy. This is useful for
  70. * buffers that are being mapped for devices that are non-coherent, but can use
  71. * the system cache.
  72. */
  73. #define DMA_ATTR_SYS_CACHE_NWA (1UL << 11)
  74. /*
  75. * A dma_addr_t can hold any valid DMA or bus address for the platform. It can
  76. * be given to a device to use as a DMA source or target. It is specific to a
  77. * given device and there may be a translation between the CPU physical address
  78. * space and the bus address space.
  79. *
  80. * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not
  81. * be used directly in drivers, but checked for using dma_mapping_error()
  82. * instead.
  83. */
  84. #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
  85. #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
  86. #ifdef CONFIG_DMA_API_DEBUG
  87. void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
  88. void debug_dma_map_single(struct device *dev, const void *addr,
  89. unsigned long len);
  90. #else
  91. static inline void debug_dma_mapping_error(struct device *dev,
  92. dma_addr_t dma_addr)
  93. {
  94. }
  95. static inline void debug_dma_map_single(struct device *dev, const void *addr,
  96. unsigned long len)
  97. {
  98. }
  99. #endif /* CONFIG_DMA_API_DEBUG */
  100. #ifdef CONFIG_HAS_DMA
  101. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  102. {
  103. debug_dma_mapping_error(dev, dma_addr);
  104. if (unlikely(dma_addr == DMA_MAPPING_ERROR))
  105. return -ENOMEM;
  106. return 0;
  107. }
  108. dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
  109. size_t offset, size_t size, enum dma_data_direction dir,
  110. unsigned long attrs);
  111. void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
  112. enum dma_data_direction dir, unsigned long attrs);
  113. unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
  114. int nents, enum dma_data_direction dir, unsigned long attrs);
  115. void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
  116. int nents, enum dma_data_direction dir,
  117. unsigned long attrs);
  118. int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
  119. enum dma_data_direction dir, unsigned long attrs);
  120. dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
  121. size_t size, enum dma_data_direction dir, unsigned long attrs);
  122. void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
  123. enum dma_data_direction dir, unsigned long attrs);
  124. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
  125. enum dma_data_direction dir);
  126. void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
  127. size_t size, enum dma_data_direction dir);
  128. void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  129. int nelems, enum dma_data_direction dir);
  130. void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  131. int nelems, enum dma_data_direction dir);
  132. void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
  133. gfp_t flag, unsigned long attrs);
  134. void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
  135. dma_addr_t dma_handle, unsigned long attrs);
  136. void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
  137. gfp_t gfp, unsigned long attrs);
  138. void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
  139. dma_addr_t dma_handle);
  140. int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
  141. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  142. unsigned long attrs);
  143. int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
  144. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  145. unsigned long attrs);
  146. bool dma_can_mmap(struct device *dev);
  147. bool dma_pci_p2pdma_supported(struct device *dev);
  148. int dma_set_mask(struct device *dev, u64 mask);
  149. int dma_set_coherent_mask(struct device *dev, u64 mask);
  150. u64 dma_get_required_mask(struct device *dev);
  151. size_t dma_max_mapping_size(struct device *dev);
  152. size_t dma_opt_mapping_size(struct device *dev);
  153. bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
  154. unsigned long dma_get_merge_boundary(struct device *dev);
  155. struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
  156. enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
  157. void dma_free_noncontiguous(struct device *dev, size_t size,
  158. struct sg_table *sgt, enum dma_data_direction dir);
  159. void *dma_vmap_noncontiguous(struct device *dev, size_t size,
  160. struct sg_table *sgt);
  161. void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
  162. int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
  163. size_t size, struct sg_table *sgt);
  164. #else /* CONFIG_HAS_DMA */
  165. static inline dma_addr_t dma_map_page_attrs(struct device *dev,
  166. struct page *page, size_t offset, size_t size,
  167. enum dma_data_direction dir, unsigned long attrs)
  168. {
  169. return DMA_MAPPING_ERROR;
  170. }
  171. static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
  172. size_t size, enum dma_data_direction dir, unsigned long attrs)
  173. {
  174. }
  175. static inline unsigned int dma_map_sg_attrs(struct device *dev,
  176. struct scatterlist *sg, int nents, enum dma_data_direction dir,
  177. unsigned long attrs)
  178. {
  179. return 0;
  180. }
  181. static inline void dma_unmap_sg_attrs(struct device *dev,
  182. struct scatterlist *sg, int nents, enum dma_data_direction dir,
  183. unsigned long attrs)
  184. {
  185. }
  186. static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
  187. enum dma_data_direction dir, unsigned long attrs)
  188. {
  189. return -EOPNOTSUPP;
  190. }
  191. static inline dma_addr_t dma_map_resource(struct device *dev,
  192. phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
  193. unsigned long attrs)
  194. {
  195. return DMA_MAPPING_ERROR;
  196. }
  197. static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
  198. size_t size, enum dma_data_direction dir, unsigned long attrs)
  199. {
  200. }
  201. static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
  202. size_t size, enum dma_data_direction dir)
  203. {
  204. }
  205. static inline void dma_sync_single_for_device(struct device *dev,
  206. dma_addr_t addr, size_t size, enum dma_data_direction dir)
  207. {
  208. }
  209. static inline void dma_sync_sg_for_cpu(struct device *dev,
  210. struct scatterlist *sg, int nelems, enum dma_data_direction dir)
  211. {
  212. }
  213. static inline void dma_sync_sg_for_device(struct device *dev,
  214. struct scatterlist *sg, int nelems, enum dma_data_direction dir)
  215. {
  216. }
  217. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  218. {
  219. return -ENOMEM;
  220. }
  221. static inline void *dma_alloc_attrs(struct device *dev, size_t size,
  222. dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
  223. {
  224. return NULL;
  225. }
  226. static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
  227. dma_addr_t dma_handle, unsigned long attrs)
  228. {
  229. }
  230. static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
  231. dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
  232. {
  233. return NULL;
  234. }
  235. static inline void dmam_free_coherent(struct device *dev, size_t size,
  236. void *vaddr, dma_addr_t dma_handle)
  237. {
  238. }
  239. static inline int dma_get_sgtable_attrs(struct device *dev,
  240. struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
  241. size_t size, unsigned long attrs)
  242. {
  243. return -ENXIO;
  244. }
  245. static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
  246. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  247. unsigned long attrs)
  248. {
  249. return -ENXIO;
  250. }
  251. static inline bool dma_can_mmap(struct device *dev)
  252. {
  253. return false;
  254. }
  255. static inline bool dma_pci_p2pdma_supported(struct device *dev)
  256. {
  257. return false;
  258. }
  259. static inline int dma_set_mask(struct device *dev, u64 mask)
  260. {
  261. return -EIO;
  262. }
  263. static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
  264. {
  265. return -EIO;
  266. }
  267. static inline u64 dma_get_required_mask(struct device *dev)
  268. {
  269. return 0;
  270. }
  271. static inline size_t dma_max_mapping_size(struct device *dev)
  272. {
  273. return 0;
  274. }
  275. static inline size_t dma_opt_mapping_size(struct device *dev)
  276. {
  277. return 0;
  278. }
  279. static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
  280. {
  281. return false;
  282. }
  283. static inline unsigned long dma_get_merge_boundary(struct device *dev)
  284. {
  285. return 0;
  286. }
  287. static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
  288. size_t size, enum dma_data_direction dir, gfp_t gfp,
  289. unsigned long attrs)
  290. {
  291. return NULL;
  292. }
  293. static inline void dma_free_noncontiguous(struct device *dev, size_t size,
  294. struct sg_table *sgt, enum dma_data_direction dir)
  295. {
  296. }
  297. static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
  298. struct sg_table *sgt)
  299. {
  300. return NULL;
  301. }
  302. static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
  303. {
  304. }
  305. static inline int dma_mmap_noncontiguous(struct device *dev,
  306. struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
  307. {
  308. return -EINVAL;
  309. }
  310. #endif /* CONFIG_HAS_DMA */
  311. struct page *dma_alloc_pages(struct device *dev, size_t size,
  312. dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
  313. void dma_free_pages(struct device *dev, size_t size, struct page *page,
  314. dma_addr_t dma_handle, enum dma_data_direction dir);
  315. int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
  316. size_t size, struct page *page);
  317. static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
  318. dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
  319. {
  320. struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
  321. return page ? page_address(page) : NULL;
  322. }
  323. static inline void dma_free_noncoherent(struct device *dev, size_t size,
  324. void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
  325. {
  326. dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
  327. }
  328. static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
  329. size_t size, enum dma_data_direction dir, unsigned long attrs)
  330. {
  331. /* DMA must never operate on areas that might be remapped. */
  332. if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
  333. "rejecting DMA map of vmalloc memory\n"))
  334. return DMA_MAPPING_ERROR;
  335. debug_dma_map_single(dev, ptr, size);
  336. return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
  337. size, dir, attrs);
  338. }
  339. static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
  340. size_t size, enum dma_data_direction dir, unsigned long attrs)
  341. {
  342. return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
  343. }
  344. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  345. dma_addr_t addr, unsigned long offset, size_t size,
  346. enum dma_data_direction dir)
  347. {
  348. return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
  349. }
  350. static inline void dma_sync_single_range_for_device(struct device *dev,
  351. dma_addr_t addr, unsigned long offset, size_t size,
  352. enum dma_data_direction dir)
  353. {
  354. return dma_sync_single_for_device(dev, addr + offset, size, dir);
  355. }
  356. /**
  357. * dma_unmap_sgtable - Unmap the given buffer for DMA
  358. * @dev: The device for which to perform the DMA operation
  359. * @sgt: The sg_table object describing the buffer
  360. * @dir: DMA direction
  361. * @attrs: Optional DMA attributes for the unmap operation
  362. *
  363. * Unmaps a buffer described by a scatterlist stored in the given sg_table
  364. * object for the @dir DMA operation by the @dev device. After this function
  365. * the ownership of the buffer is transferred back to the CPU domain.
  366. */
  367. static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
  368. enum dma_data_direction dir, unsigned long attrs)
  369. {
  370. dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
  371. }
  372. /**
  373. * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
  374. * @dev: The device for which to perform the DMA operation
  375. * @sgt: The sg_table object describing the buffer
  376. * @dir: DMA direction
  377. *
  378. * Performs the needed cache synchronization and moves the ownership of the
  379. * buffer back to the CPU domain, so it is safe to perform any access to it
  380. * by the CPU. Before doing any further DMA operations, one has to transfer
  381. * the ownership of the buffer back to the DMA domain by calling the
  382. * dma_sync_sgtable_for_device().
  383. */
  384. static inline void dma_sync_sgtable_for_cpu(struct device *dev,
  385. struct sg_table *sgt, enum dma_data_direction dir)
  386. {
  387. dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
  388. }
  389. /**
  390. * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
  391. * @dev: The device for which to perform the DMA operation
  392. * @sgt: The sg_table object describing the buffer
  393. * @dir: DMA direction
  394. *
  395. * Performs the needed cache synchronization and moves the ownership of the
  396. * buffer back to the DMA domain, so it is safe to perform the DMA operation.
  397. * Once finished, one has to call dma_sync_sgtable_for_cpu() or
  398. * dma_unmap_sgtable().
  399. */
  400. static inline void dma_sync_sgtable_for_device(struct device *dev,
  401. struct sg_table *sgt, enum dma_data_direction dir)
  402. {
  403. dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
  404. }
  405. #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
  406. #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
  407. #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
  408. #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
  409. #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
  410. #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
  411. #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
  412. #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
  413. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  414. dma_addr_t *dma_handle, gfp_t gfp)
  415. {
  416. return dma_alloc_attrs(dev, size, dma_handle, gfp,
  417. (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
  418. }
  419. static inline void dma_free_coherent(struct device *dev, size_t size,
  420. void *cpu_addr, dma_addr_t dma_handle)
  421. {
  422. return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
  423. }
  424. static inline u64 dma_get_mask(struct device *dev)
  425. {
  426. if (dev->dma_mask && *dev->dma_mask)
  427. return *dev->dma_mask;
  428. return DMA_BIT_MASK(32);
  429. }
  430. /*
  431. * Set both the DMA mask and the coherent DMA mask to the same thing.
  432. * Note that we don't check the return value from dma_set_coherent_mask()
  433. * as the DMA API guarantees that the coherent DMA mask can be set to
  434. * the same or smaller than the streaming DMA mask.
  435. */
  436. static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
  437. {
  438. int rc = dma_set_mask(dev, mask);
  439. if (rc == 0)
  440. dma_set_coherent_mask(dev, mask);
  441. return rc;
  442. }
  443. /*
  444. * Similar to the above, except it deals with the case where the device
  445. * does not have dev->dma_mask appropriately setup.
  446. */
  447. static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
  448. {
  449. dev->dma_mask = &dev->coherent_dma_mask;
  450. return dma_set_mask_and_coherent(dev, mask);
  451. }
  452. /**
  453. * dma_addressing_limited - return if the device is addressing limited
  454. * @dev: device to check
  455. *
  456. * Return %true if the devices DMA mask is too small to address all memory in
  457. * the system, else %false. Lack of addressing bits is the prime reason for
  458. * bounce buffering, but might not be the only one.
  459. */
  460. static inline bool dma_addressing_limited(struct device *dev)
  461. {
  462. return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
  463. dma_get_required_mask(dev);
  464. }
  465. static inline unsigned int dma_get_max_seg_size(struct device *dev)
  466. {
  467. if (dev->dma_parms && dev->dma_parms->max_segment_size)
  468. return dev->dma_parms->max_segment_size;
  469. return SZ_64K;
  470. }
  471. static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
  472. {
  473. if (dev->dma_parms) {
  474. dev->dma_parms->max_segment_size = size;
  475. return 0;
  476. }
  477. return -EIO;
  478. }
  479. static inline unsigned long dma_get_seg_boundary(struct device *dev)
  480. {
  481. if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
  482. return dev->dma_parms->segment_boundary_mask;
  483. return ULONG_MAX;
  484. }
  485. /**
  486. * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
  487. * @dev: device to guery the boundary for
  488. * @page_shift: ilog() of the IOMMU page size
  489. *
  490. * Return the segment boundary in IOMMU page units (which may be different from
  491. * the CPU page size) for the passed in device.
  492. *
  493. * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
  494. * non-DMA API callers.
  495. */
  496. static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
  497. unsigned int page_shift)
  498. {
  499. if (!dev)
  500. return (U32_MAX >> page_shift) + 1;
  501. return (dma_get_seg_boundary(dev) >> page_shift) + 1;
  502. }
  503. static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
  504. {
  505. if (dev->dma_parms) {
  506. dev->dma_parms->segment_boundary_mask = mask;
  507. return 0;
  508. }
  509. return -EIO;
  510. }
  511. static inline unsigned int dma_get_min_align_mask(struct device *dev)
  512. {
  513. if (dev->dma_parms)
  514. return dev->dma_parms->min_align_mask;
  515. return 0;
  516. }
  517. static inline int dma_set_min_align_mask(struct device *dev,
  518. unsigned int min_align_mask)
  519. {
  520. if (WARN_ON_ONCE(!dev->dma_parms))
  521. return -EIO;
  522. dev->dma_parms->min_align_mask = min_align_mask;
  523. return 0;
  524. }
  525. static inline int dma_get_cache_alignment(void)
  526. {
  527. #ifdef ARCH_DMA_MINALIGN
  528. return ARCH_DMA_MINALIGN;
  529. #endif
  530. return 1;
  531. }
  532. static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
  533. dma_addr_t *dma_handle, gfp_t gfp)
  534. {
  535. return dmam_alloc_attrs(dev, size, dma_handle, gfp,
  536. (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
  537. }
  538. static inline void *dma_alloc_wc(struct device *dev, size_t size,
  539. dma_addr_t *dma_addr, gfp_t gfp)
  540. {
  541. unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
  542. if (gfp & __GFP_NOWARN)
  543. attrs |= DMA_ATTR_NO_WARN;
  544. return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
  545. }
  546. static inline void dma_free_wc(struct device *dev, size_t size,
  547. void *cpu_addr, dma_addr_t dma_addr)
  548. {
  549. return dma_free_attrs(dev, size, cpu_addr, dma_addr,
  550. DMA_ATTR_WRITE_COMBINE);
  551. }
  552. static inline int dma_mmap_wc(struct device *dev,
  553. struct vm_area_struct *vma,
  554. void *cpu_addr, dma_addr_t dma_addr,
  555. size_t size)
  556. {
  557. return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
  558. DMA_ATTR_WRITE_COMBINE);
  559. }
  560. #ifdef CONFIG_NEED_DMA_MAP_STATE
  561. #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
  562. #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
  563. #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
  564. #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
  565. #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
  566. #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
  567. #else
  568. #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
  569. #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
  570. #define dma_unmap_addr(PTR, ADDR_NAME) (0)
  571. #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
  572. #define dma_unmap_len(PTR, LEN_NAME) (0)
  573. #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
  574. #endif
  575. #endif /* _LINUX_DMA_MAPPING_H */