mapping.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * arch-independent dma-mapping routines
  4. *
  5. * Copyright (c) 2006 SUSE Linux Products GmbH
  6. * Copyright (c) 2006 Tejun Heo <[email protected]>
  7. */
  8. #include <linux/memblock.h> /* for max_pfn */
  9. #include <linux/acpi.h>
  10. #include <linux/dma-map-ops.h>
  11. #include <linux/export.h>
  12. #include <linux/gfp.h>
  13. #include <linux/kmsan.h>
  14. #include <linux/of_device.h>
  15. #include <linux/slab.h>
  16. #include <linux/vmalloc.h>
  17. #include "debug.h"
  18. #include "direct.h"
  19. bool dma_default_coherent;
  20. /*
  21. * Managed DMA API
  22. */
  23. struct dma_devres {
  24. size_t size;
  25. void *vaddr;
  26. dma_addr_t dma_handle;
  27. unsigned long attrs;
  28. };
  29. static void dmam_release(struct device *dev, void *res)
  30. {
  31. struct dma_devres *this = res;
  32. dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
  33. this->attrs);
  34. }
  35. static int dmam_match(struct device *dev, void *res, void *match_data)
  36. {
  37. struct dma_devres *this = res, *match = match_data;
  38. if (this->vaddr == match->vaddr) {
  39. WARN_ON(this->size != match->size ||
  40. this->dma_handle != match->dma_handle);
  41. return 1;
  42. }
  43. return 0;
  44. }
  45. /**
  46. * dmam_free_coherent - Managed dma_free_coherent()
  47. * @dev: Device to free coherent memory for
  48. * @size: Size of allocation
  49. * @vaddr: Virtual address of the memory to free
  50. * @dma_handle: DMA handle of the memory to free
  51. *
  52. * Managed dma_free_coherent().
  53. */
  54. void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
  55. dma_addr_t dma_handle)
  56. {
  57. struct dma_devres match_data = { size, vaddr, dma_handle };
  58. dma_free_coherent(dev, size, vaddr, dma_handle);
  59. WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
  60. }
  61. EXPORT_SYMBOL(dmam_free_coherent);
  62. /**
  63. * dmam_alloc_attrs - Managed dma_alloc_attrs()
  64. * @dev: Device to allocate non_coherent memory for
  65. * @size: Size of allocation
  66. * @dma_handle: Out argument for allocated DMA handle
  67. * @gfp: Allocation flags
  68. * @attrs: Flags in the DMA_ATTR_* namespace.
  69. *
  70. * Managed dma_alloc_attrs(). Memory allocated using this function will be
  71. * automatically released on driver detach.
  72. *
  73. * RETURNS:
  74. * Pointer to allocated memory on success, NULL on failure.
  75. */
  76. void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
  77. gfp_t gfp, unsigned long attrs)
  78. {
  79. struct dma_devres *dr;
  80. void *vaddr;
  81. dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
  82. if (!dr)
  83. return NULL;
  84. vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
  85. if (!vaddr) {
  86. devres_free(dr);
  87. return NULL;
  88. }
  89. dr->vaddr = vaddr;
  90. dr->dma_handle = *dma_handle;
  91. dr->size = size;
  92. dr->attrs = attrs;
  93. devres_add(dev, dr);
  94. return vaddr;
  95. }
  96. EXPORT_SYMBOL(dmam_alloc_attrs);
  97. static bool dma_go_direct(struct device *dev, dma_addr_t mask,
  98. const struct dma_map_ops *ops)
  99. {
  100. if (likely(!ops))
  101. return true;
  102. #ifdef CONFIG_DMA_OPS_BYPASS
  103. if (dev->dma_ops_bypass)
  104. return min_not_zero(mask, dev->bus_dma_limit) >=
  105. dma_direct_get_required_mask(dev);
  106. #endif
  107. return false;
  108. }
  109. /*
  110. * Check if the devices uses a direct mapping for streaming DMA operations.
  111. * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
  112. * enough.
  113. */
  114. static inline bool dma_alloc_direct(struct device *dev,
  115. const struct dma_map_ops *ops)
  116. {
  117. return dma_go_direct(dev, dev->coherent_dma_mask, ops);
  118. }
  119. static inline bool dma_map_direct(struct device *dev,
  120. const struct dma_map_ops *ops)
  121. {
  122. return dma_go_direct(dev, *dev->dma_mask, ops);
  123. }
  124. dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
  125. size_t offset, size_t size, enum dma_data_direction dir,
  126. unsigned long attrs)
  127. {
  128. const struct dma_map_ops *ops = get_dma_ops(dev);
  129. dma_addr_t addr;
  130. BUG_ON(!valid_dma_direction(dir));
  131. if (WARN_ON_ONCE(!dev->dma_mask))
  132. return DMA_MAPPING_ERROR;
  133. if (dma_map_direct(dev, ops) ||
  134. arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
  135. addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
  136. else
  137. addr = ops->map_page(dev, page, offset, size, dir, attrs);
  138. kmsan_handle_dma(page, offset, size, dir);
  139. debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
  140. return addr;
  141. }
  142. EXPORT_SYMBOL(dma_map_page_attrs);
  143. void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
  144. enum dma_data_direction dir, unsigned long attrs)
  145. {
  146. const struct dma_map_ops *ops = get_dma_ops(dev);
  147. BUG_ON(!valid_dma_direction(dir));
  148. if (dma_map_direct(dev, ops) ||
  149. arch_dma_unmap_page_direct(dev, addr + size))
  150. dma_direct_unmap_page(dev, addr, size, dir, attrs);
  151. else if (ops->unmap_page)
  152. ops->unmap_page(dev, addr, size, dir, attrs);
  153. debug_dma_unmap_page(dev, addr, size, dir);
  154. }
  155. EXPORT_SYMBOL(dma_unmap_page_attrs);
  156. static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
  157. int nents, enum dma_data_direction dir, unsigned long attrs)
  158. {
  159. const struct dma_map_ops *ops = get_dma_ops(dev);
  160. int ents;
  161. BUG_ON(!valid_dma_direction(dir));
  162. if (WARN_ON_ONCE(!dev->dma_mask))
  163. return 0;
  164. if (dma_map_direct(dev, ops) ||
  165. arch_dma_map_sg_direct(dev, sg, nents))
  166. ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
  167. else
  168. ents = ops->map_sg(dev, sg, nents, dir, attrs);
  169. if (ents > 0) {
  170. kmsan_handle_dma_sg(sg, nents, dir);
  171. debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
  172. } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
  173. ents != -EIO && ents != -EREMOTEIO)) {
  174. return -EIO;
  175. }
  176. return ents;
  177. }
  178. /**
  179. * dma_map_sg_attrs - Map the given buffer for DMA
  180. * @dev: The device for which to perform the DMA operation
  181. * @sg: The sg_table object describing the buffer
  182. * @nents: Number of entries to map
  183. * @dir: DMA direction
  184. * @attrs: Optional DMA attributes for the map operation
  185. *
  186. * Maps a buffer described by a scatterlist passed in the sg argument with
  187. * nents segments for the @dir DMA operation by the @dev device.
  188. *
  189. * Returns the number of mapped entries (which can be less than nents)
  190. * on success. Zero is returned for any error.
  191. *
  192. * dma_unmap_sg_attrs() should be used to unmap the buffer with the
  193. * original sg and original nents (not the value returned by this funciton).
  194. */
  195. unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
  196. int nents, enum dma_data_direction dir, unsigned long attrs)
  197. {
  198. int ret;
  199. ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs);
  200. if (ret < 0)
  201. return 0;
  202. return ret;
  203. }
  204. EXPORT_SYMBOL(dma_map_sg_attrs);
  205. /**
  206. * dma_map_sgtable - Map the given buffer for DMA
  207. * @dev: The device for which to perform the DMA operation
  208. * @sgt: The sg_table object describing the buffer
  209. * @dir: DMA direction
  210. * @attrs: Optional DMA attributes for the map operation
  211. *
  212. * Maps a buffer described by a scatterlist stored in the given sg_table
  213. * object for the @dir DMA operation by the @dev device. After success, the
  214. * ownership for the buffer is transferred to the DMA domain. One has to
  215. * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
  216. * ownership of the buffer back to the CPU domain before touching the
  217. * buffer by the CPU.
  218. *
  219. * Returns 0 on success or a negative error code on error. The following
  220. * error codes are supported with the given meaning:
  221. *
  222. * -EINVAL An invalid argument, unaligned access or other error
  223. * in usage. Will not succeed if retried.
  224. * -ENOMEM Insufficient resources (like memory or IOVA space) to
  225. * complete the mapping. Should succeed if retried later.
  226. * -EIO Legacy error code with an unknown meaning. eg. this is
  227. * returned if a lower level call returned
  228. * DMA_MAPPING_ERROR.
  229. * -EREMOTEIO The DMA device cannot access P2PDMA memory specified
  230. * in the sg_table. This will not succeed if retried.
  231. */
  232. int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
  233. enum dma_data_direction dir, unsigned long attrs)
  234. {
  235. int nents;
  236. nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
  237. if (nents < 0)
  238. return nents;
  239. sgt->nents = nents;
  240. return 0;
  241. }
  242. EXPORT_SYMBOL_GPL(dma_map_sgtable);
  243. void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
  244. int nents, enum dma_data_direction dir,
  245. unsigned long attrs)
  246. {
  247. const struct dma_map_ops *ops = get_dma_ops(dev);
  248. BUG_ON(!valid_dma_direction(dir));
  249. debug_dma_unmap_sg(dev, sg, nents, dir);
  250. if (dma_map_direct(dev, ops) ||
  251. arch_dma_unmap_sg_direct(dev, sg, nents))
  252. dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
  253. else if (ops->unmap_sg)
  254. ops->unmap_sg(dev, sg, nents, dir, attrs);
  255. }
  256. EXPORT_SYMBOL(dma_unmap_sg_attrs);
  257. dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
  258. size_t size, enum dma_data_direction dir, unsigned long attrs)
  259. {
  260. const struct dma_map_ops *ops = get_dma_ops(dev);
  261. dma_addr_t addr = DMA_MAPPING_ERROR;
  262. BUG_ON(!valid_dma_direction(dir));
  263. if (WARN_ON_ONCE(!dev->dma_mask))
  264. return DMA_MAPPING_ERROR;
  265. if (dma_map_direct(dev, ops))
  266. addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
  267. else if (ops->map_resource)
  268. addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
  269. debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
  270. return addr;
  271. }
  272. EXPORT_SYMBOL(dma_map_resource);
  273. void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
  274. enum dma_data_direction dir, unsigned long attrs)
  275. {
  276. const struct dma_map_ops *ops = get_dma_ops(dev);
  277. BUG_ON(!valid_dma_direction(dir));
  278. if (!dma_map_direct(dev, ops) && ops->unmap_resource)
  279. ops->unmap_resource(dev, addr, size, dir, attrs);
  280. debug_dma_unmap_resource(dev, addr, size, dir);
  281. }
  282. EXPORT_SYMBOL(dma_unmap_resource);
  283. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
  284. enum dma_data_direction dir)
  285. {
  286. const struct dma_map_ops *ops = get_dma_ops(dev);
  287. BUG_ON(!valid_dma_direction(dir));
  288. if (dma_map_direct(dev, ops))
  289. dma_direct_sync_single_for_cpu(dev, addr, size, dir);
  290. else if (ops->sync_single_for_cpu)
  291. ops->sync_single_for_cpu(dev, addr, size, dir);
  292. debug_dma_sync_single_for_cpu(dev, addr, size, dir);
  293. }
  294. EXPORT_SYMBOL(dma_sync_single_for_cpu);
  295. void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
  296. size_t size, enum dma_data_direction dir)
  297. {
  298. const struct dma_map_ops *ops = get_dma_ops(dev);
  299. BUG_ON(!valid_dma_direction(dir));
  300. if (dma_map_direct(dev, ops))
  301. dma_direct_sync_single_for_device(dev, addr, size, dir);
  302. else if (ops->sync_single_for_device)
  303. ops->sync_single_for_device(dev, addr, size, dir);
  304. debug_dma_sync_single_for_device(dev, addr, size, dir);
  305. }
  306. EXPORT_SYMBOL(dma_sync_single_for_device);
  307. void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  308. int nelems, enum dma_data_direction dir)
  309. {
  310. const struct dma_map_ops *ops = get_dma_ops(dev);
  311. BUG_ON(!valid_dma_direction(dir));
  312. if (dma_map_direct(dev, ops))
  313. dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
  314. else if (ops->sync_sg_for_cpu)
  315. ops->sync_sg_for_cpu(dev, sg, nelems, dir);
  316. debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
  317. }
  318. EXPORT_SYMBOL(dma_sync_sg_for_cpu);
  319. void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  320. int nelems, enum dma_data_direction dir)
  321. {
  322. const struct dma_map_ops *ops = get_dma_ops(dev);
  323. BUG_ON(!valid_dma_direction(dir));
  324. if (dma_map_direct(dev, ops))
  325. dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
  326. else if (ops->sync_sg_for_device)
  327. ops->sync_sg_for_device(dev, sg, nelems, dir);
  328. debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
  329. }
  330. EXPORT_SYMBOL(dma_sync_sg_for_device);
  331. /*
  332. * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
  333. * that the intention is to allow exporting memory allocated via the
  334. * coherent DMA APIs through the dma_buf API, which only accepts a
  335. * scattertable. This presents a couple of problems:
  336. * 1. Not all memory allocated via the coherent DMA APIs is backed by
  337. * a struct page
  338. * 2. Passing coherent DMA memory into the streaming APIs is not allowed
  339. * as we will try to flush the memory through a different alias to that
  340. * actually being used (and the flushes are redundant.)
  341. */
  342. int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
  343. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  344. unsigned long attrs)
  345. {
  346. const struct dma_map_ops *ops = get_dma_ops(dev);
  347. if (dma_alloc_direct(dev, ops))
  348. return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
  349. size, attrs);
  350. if (!ops->get_sgtable)
  351. return -ENXIO;
  352. return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
  353. }
  354. EXPORT_SYMBOL(dma_get_sgtable_attrs);
  355. #ifdef CONFIG_MMU
  356. /*
  357. * Return the page attributes used for mapping dma_alloc_* memory, either in
  358. * kernel space if remapping is needed, or to userspace through dma_mmap_*.
  359. */
  360. pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
  361. {
  362. if (dev_is_dma_coherent(dev))
  363. return prot;
  364. #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
  365. if (attrs & DMA_ATTR_WRITE_COMBINE)
  366. return pgprot_writecombine(prot);
  367. #endif
  368. if (attrs & DMA_ATTR_SYS_CACHE ||
  369. attrs & DMA_ATTR_SYS_CACHE_NWA)
  370. return pgprot_syscached(prot);
  371. return pgprot_dmacoherent(prot);
  372. }
  373. #endif /* CONFIG_MMU */
  374. /**
  375. * dma_can_mmap - check if a given device supports dma_mmap_*
  376. * @dev: device to check
  377. *
  378. * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
  379. * map DMA allocations to userspace.
  380. */
  381. bool dma_can_mmap(struct device *dev)
  382. {
  383. const struct dma_map_ops *ops = get_dma_ops(dev);
  384. if (dma_alloc_direct(dev, ops))
  385. return dma_direct_can_mmap(dev);
  386. return ops->mmap != NULL;
  387. }
  388. EXPORT_SYMBOL_GPL(dma_can_mmap);
  389. /**
  390. * dma_mmap_attrs - map a coherent DMA allocation into user space
  391. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  392. * @vma: vm_area_struct describing requested user mapping
  393. * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
  394. * @dma_addr: device-view address returned from dma_alloc_attrs
  395. * @size: size of memory originally requested in dma_alloc_attrs
  396. * @attrs: attributes of mapping properties requested in dma_alloc_attrs
  397. *
  398. * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
  399. * space. The coherent DMA buffer must not be freed by the driver until the
  400. * user space mapping has been released.
  401. */
  402. int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
  403. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  404. unsigned long attrs)
  405. {
  406. const struct dma_map_ops *ops = get_dma_ops(dev);
  407. if (dma_alloc_direct(dev, ops))
  408. return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
  409. attrs);
  410. if (!ops->mmap)
  411. return -ENXIO;
  412. return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
  413. }
  414. EXPORT_SYMBOL(dma_mmap_attrs);
  415. u64 dma_get_required_mask(struct device *dev)
  416. {
  417. const struct dma_map_ops *ops = get_dma_ops(dev);
  418. if (dma_alloc_direct(dev, ops))
  419. return dma_direct_get_required_mask(dev);
  420. if (ops->get_required_mask)
  421. return ops->get_required_mask(dev);
  422. /*
  423. * We require every DMA ops implementation to at least support a 32-bit
  424. * DMA mask (and use bounce buffering if that isn't supported in
  425. * hardware). As the direct mapping code has its own routine to
  426. * actually report an optimal mask we default to 32-bit here as that
  427. * is the right thing for most IOMMUs, and at least not actively
  428. * harmful in general.
  429. */
  430. return DMA_BIT_MASK(32);
  431. }
  432. EXPORT_SYMBOL_GPL(dma_get_required_mask);
  433. void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
  434. gfp_t flag, unsigned long attrs)
  435. {
  436. const struct dma_map_ops *ops = get_dma_ops(dev);
  437. void *cpu_addr;
  438. WARN_ON_ONCE(!dev->coherent_dma_mask);
  439. if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
  440. return cpu_addr;
  441. /* let the implementation decide on the zone to allocate from: */
  442. flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
  443. if (dma_alloc_direct(dev, ops))
  444. cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
  445. else if (ops->alloc)
  446. cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
  447. else
  448. return NULL;
  449. debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
  450. return cpu_addr;
  451. }
  452. EXPORT_SYMBOL(dma_alloc_attrs);
  453. void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
  454. dma_addr_t dma_handle, unsigned long attrs)
  455. {
  456. const struct dma_map_ops *ops = get_dma_ops(dev);
  457. if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
  458. return;
  459. /*
  460. * On non-coherent platforms which implement DMA-coherent buffers via
  461. * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
  462. * this far in IRQ context is a) at risk of a BUG_ON() or trying to
  463. * sleep on some machines, and b) an indication that the driver is
  464. * probably misusing the coherent API anyway.
  465. */
  466. WARN_ON(irqs_disabled());
  467. if (!cpu_addr)
  468. return;
  469. debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  470. if (dma_alloc_direct(dev, ops))
  471. dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
  472. else if (ops->free)
  473. ops->free(dev, size, cpu_addr, dma_handle, attrs);
  474. }
  475. EXPORT_SYMBOL(dma_free_attrs);
  476. static struct page *__dma_alloc_pages(struct device *dev, size_t size,
  477. dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
  478. {
  479. const struct dma_map_ops *ops = get_dma_ops(dev);
  480. if (WARN_ON_ONCE(!dev->coherent_dma_mask))
  481. return NULL;
  482. if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
  483. return NULL;
  484. size = PAGE_ALIGN(size);
  485. if (dma_alloc_direct(dev, ops))
  486. return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
  487. if (!ops->alloc_pages)
  488. return NULL;
  489. return ops->alloc_pages(dev, size, dma_handle, dir, gfp);
  490. }
  491. struct page *dma_alloc_pages(struct device *dev, size_t size,
  492. dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
  493. {
  494. struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
  495. if (page)
  496. debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
  497. return page;
  498. }
  499. EXPORT_SYMBOL_GPL(dma_alloc_pages);
  500. static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
  501. dma_addr_t dma_handle, enum dma_data_direction dir)
  502. {
  503. const struct dma_map_ops *ops = get_dma_ops(dev);
  504. size = PAGE_ALIGN(size);
  505. if (dma_alloc_direct(dev, ops))
  506. dma_direct_free_pages(dev, size, page, dma_handle, dir);
  507. else if (ops->free_pages)
  508. ops->free_pages(dev, size, page, dma_handle, dir);
  509. }
  510. void dma_free_pages(struct device *dev, size_t size, struct page *page,
  511. dma_addr_t dma_handle, enum dma_data_direction dir)
  512. {
  513. debug_dma_unmap_page(dev, dma_handle, size, dir);
  514. __dma_free_pages(dev, size, page, dma_handle, dir);
  515. }
  516. EXPORT_SYMBOL_GPL(dma_free_pages);
  517. int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
  518. size_t size, struct page *page)
  519. {
  520. unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  521. if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
  522. return -ENXIO;
  523. return remap_pfn_range(vma, vma->vm_start,
  524. page_to_pfn(page) + vma->vm_pgoff,
  525. vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
  526. }
  527. EXPORT_SYMBOL_GPL(dma_mmap_pages);
  528. static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
  529. enum dma_data_direction dir, gfp_t gfp)
  530. {
  531. struct sg_table *sgt;
  532. struct page *page;
  533. sgt = kmalloc(sizeof(*sgt), gfp);
  534. if (!sgt)
  535. return NULL;
  536. if (sg_alloc_table(sgt, 1, gfp))
  537. goto out_free_sgt;
  538. page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
  539. if (!page)
  540. goto out_free_table;
  541. sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
  542. sg_dma_len(sgt->sgl) = sgt->sgl->length;
  543. return sgt;
  544. out_free_table:
  545. sg_free_table(sgt);
  546. out_free_sgt:
  547. kfree(sgt);
  548. return NULL;
  549. }
  550. struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
  551. enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
  552. {
  553. const struct dma_map_ops *ops = get_dma_ops(dev);
  554. struct sg_table *sgt;
  555. if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
  556. return NULL;
  557. if (ops && ops->alloc_noncontiguous)
  558. sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
  559. else
  560. sgt = alloc_single_sgt(dev, size, dir, gfp);
  561. if (sgt) {
  562. sgt->nents = 1;
  563. debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
  564. }
  565. return sgt;
  566. }
  567. EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
  568. static void free_single_sgt(struct device *dev, size_t size,
  569. struct sg_table *sgt, enum dma_data_direction dir)
  570. {
  571. __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
  572. dir);
  573. sg_free_table(sgt);
  574. kfree(sgt);
  575. }
  576. void dma_free_noncontiguous(struct device *dev, size_t size,
  577. struct sg_table *sgt, enum dma_data_direction dir)
  578. {
  579. const struct dma_map_ops *ops = get_dma_ops(dev);
  580. debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
  581. if (ops && ops->free_noncontiguous)
  582. ops->free_noncontiguous(dev, size, sgt, dir);
  583. else
  584. free_single_sgt(dev, size, sgt, dir);
  585. }
  586. EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
  587. void *dma_vmap_noncontiguous(struct device *dev, size_t size,
  588. struct sg_table *sgt)
  589. {
  590. const struct dma_map_ops *ops = get_dma_ops(dev);
  591. unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  592. if (ops && ops->alloc_noncontiguous)
  593. return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
  594. return page_address(sg_page(sgt->sgl));
  595. }
  596. EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
  597. void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
  598. {
  599. const struct dma_map_ops *ops = get_dma_ops(dev);
  600. if (ops && ops->alloc_noncontiguous)
  601. vunmap(vaddr);
  602. }
  603. EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
  604. int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
  605. size_t size, struct sg_table *sgt)
  606. {
  607. const struct dma_map_ops *ops = get_dma_ops(dev);
  608. if (ops && ops->alloc_noncontiguous) {
  609. unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  610. if (vma->vm_pgoff >= count ||
  611. vma_pages(vma) > count - vma->vm_pgoff)
  612. return -ENXIO;
  613. return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
  614. }
  615. return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
  616. }
  617. EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
  618. static int dma_supported(struct device *dev, u64 mask)
  619. {
  620. const struct dma_map_ops *ops = get_dma_ops(dev);
  621. /*
  622. * ->dma_supported sets the bypass flag, so we must always call
  623. * into the method here unless the device is truly direct mapped.
  624. */
  625. if (!ops)
  626. return dma_direct_supported(dev, mask);
  627. if (!ops->dma_supported)
  628. return 1;
  629. return ops->dma_supported(dev, mask);
  630. }
  631. bool dma_pci_p2pdma_supported(struct device *dev)
  632. {
  633. const struct dma_map_ops *ops = get_dma_ops(dev);
  634. /* if ops is not set, dma direct will be used which supports P2PDMA */
  635. if (!ops)
  636. return true;
  637. /*
  638. * Note: dma_ops_bypass is not checked here because P2PDMA should
  639. * not be used with dma mapping ops that do not have support even
  640. * if the specific device is bypassing them.
  641. */
  642. return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED;
  643. }
  644. EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
  645. #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
  646. void arch_dma_set_mask(struct device *dev, u64 mask);
  647. #else
  648. #define arch_dma_set_mask(dev, mask) do { } while (0)
  649. #endif
  650. int dma_set_mask(struct device *dev, u64 mask)
  651. {
  652. /*
  653. * Truncate the mask to the actually supported dma_addr_t width to
  654. * avoid generating unsupportable addresses.
  655. */
  656. mask = (dma_addr_t)mask;
  657. if (!dev->dma_mask || !dma_supported(dev, mask))
  658. return -EIO;
  659. arch_dma_set_mask(dev, mask);
  660. *dev->dma_mask = mask;
  661. return 0;
  662. }
  663. EXPORT_SYMBOL(dma_set_mask);
  664. int dma_set_coherent_mask(struct device *dev, u64 mask)
  665. {
  666. /*
  667. * Truncate the mask to the actually supported dma_addr_t width to
  668. * avoid generating unsupportable addresses.
  669. */
  670. mask = (dma_addr_t)mask;
  671. if (!dma_supported(dev, mask))
  672. return -EIO;
  673. dev->coherent_dma_mask = mask;
  674. return 0;
  675. }
  676. EXPORT_SYMBOL(dma_set_coherent_mask);
  677. size_t dma_max_mapping_size(struct device *dev)
  678. {
  679. const struct dma_map_ops *ops = get_dma_ops(dev);
  680. size_t size = SIZE_MAX;
  681. if (dma_map_direct(dev, ops))
  682. size = dma_direct_max_mapping_size(dev);
  683. else if (ops && ops->max_mapping_size)
  684. size = ops->max_mapping_size(dev);
  685. return size;
  686. }
  687. EXPORT_SYMBOL_GPL(dma_max_mapping_size);
  688. size_t dma_opt_mapping_size(struct device *dev)
  689. {
  690. const struct dma_map_ops *ops = get_dma_ops(dev);
  691. size_t size = SIZE_MAX;
  692. if (ops && ops->opt_mapping_size)
  693. size = ops->opt_mapping_size();
  694. return min(dma_max_mapping_size(dev), size);
  695. }
  696. EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
  697. bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
  698. {
  699. const struct dma_map_ops *ops = get_dma_ops(dev);
  700. if (dma_map_direct(dev, ops))
  701. return dma_direct_need_sync(dev, dma_addr);
  702. return ops->sync_single_for_cpu || ops->sync_single_for_device;
  703. }
  704. EXPORT_SYMBOL_GPL(dma_need_sync);
  705. unsigned long dma_get_merge_boundary(struct device *dev)
  706. {
  707. const struct dma_map_ops *ops = get_dma_ops(dev);
  708. if (!ops || !ops->get_merge_boundary)
  709. return 0; /* can't merge */
  710. return ops->get_merge_boundary(dev);
  711. }
  712. EXPORT_SYMBOL_GPL(dma_get_merge_boundary);