memalloc.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (c) by Jaroslav Kysela <[email protected]>
  4. * Takashi Iwai <[email protected]>
  5. *
  6. * Generic memory allocators
  7. */
  8. #include <linux/slab.h>
  9. #include <linux/mm.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/dma-map-ops.h>
  12. #include <linux/genalloc.h>
  13. #include <linux/highmem.h>
  14. #include <linux/vmalloc.h>
  15. #ifdef CONFIG_X86
  16. #include <asm/set_memory.h>
  17. #endif
  18. #include <sound/memalloc.h>
  19. #include "memalloc_local.h"
  20. #define DEFAULT_GFP \
  21. (GFP_KERNEL | \
  22. __GFP_COMP | /* compound page lets parts be mapped */ \
  23. __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \
  24. __GFP_NOWARN) /* no stack trace print - this call is non-critical */
  25. static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
  26. #ifdef CONFIG_SND_DMA_SGBUF
  27. static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
  28. #endif
  29. static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
  30. {
  31. const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
  32. if (WARN_ON_ONCE(!ops || !ops->alloc))
  33. return NULL;
  34. return ops->alloc(dmab, size);
  35. }
  36. /**
  37. * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
  38. * type and direction
  39. * @type: the DMA buffer type
  40. * @device: the device pointer
  41. * @dir: DMA direction
  42. * @size: the buffer size to allocate
  43. * @dmab: buffer allocation record to store the allocated data
  44. *
  45. * Calls the memory-allocator function for the corresponding
  46. * buffer type.
  47. *
  48. * Return: Zero if the buffer with the given size is allocated successfully,
  49. * otherwise a negative value on error.
  50. */
  51. int snd_dma_alloc_dir_pages(int type, struct device *device,
  52. enum dma_data_direction dir, size_t size,
  53. struct snd_dma_buffer *dmab)
  54. {
  55. if (WARN_ON(!size))
  56. return -ENXIO;
  57. if (WARN_ON(!dmab))
  58. return -ENXIO;
  59. size = PAGE_ALIGN(size);
  60. dmab->dev.type = type;
  61. dmab->dev.dev = device;
  62. dmab->dev.dir = dir;
  63. dmab->bytes = 0;
  64. dmab->addr = 0;
  65. dmab->private_data = NULL;
  66. dmab->area = __snd_dma_alloc_pages(dmab, size);
  67. if (!dmab->area)
  68. return -ENOMEM;
  69. dmab->bytes = size;
  70. return 0;
  71. }
  72. EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
  73. /**
  74. * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
  75. * @type: the DMA buffer type
  76. * @device: the device pointer
  77. * @size: the buffer size to allocate
  78. * @dmab: buffer allocation record to store the allocated data
  79. *
  80. * Calls the memory-allocator function for the corresponding
  81. * buffer type. When no space is left, this function reduces the size and
  82. * tries to allocate again. The size actually allocated is stored in
  83. * res_size argument.
  84. *
  85. * Return: Zero if the buffer with the given size is allocated successfully,
  86. * otherwise a negative value on error.
  87. */
  88. int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
  89. struct snd_dma_buffer *dmab)
  90. {
  91. int err;
  92. while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
  93. if (err != -ENOMEM)
  94. return err;
  95. if (size <= PAGE_SIZE)
  96. return -ENOMEM;
  97. size >>= 1;
  98. size = PAGE_SIZE << get_order(size);
  99. }
  100. if (! dmab->area)
  101. return -ENOMEM;
  102. return 0;
  103. }
  104. EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
  105. /**
  106. * snd_dma_free_pages - release the allocated buffer
  107. * @dmab: the buffer allocation record to release
  108. *
  109. * Releases the allocated buffer via snd_dma_alloc_pages().
  110. */
  111. void snd_dma_free_pages(struct snd_dma_buffer *dmab)
  112. {
  113. const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
  114. if (ops && ops->free)
  115. ops->free(dmab);
  116. }
  117. EXPORT_SYMBOL(snd_dma_free_pages);
  118. /* called by devres */
  119. static void __snd_release_pages(struct device *dev, void *res)
  120. {
  121. snd_dma_free_pages(res);
  122. }
  123. /**
  124. * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
  125. * @dev: the device pointer
  126. * @type: the DMA buffer type
  127. * @dir: DMA direction
  128. * @size: the buffer size to allocate
  129. *
  130. * Allocate buffer pages depending on the given type and manage using devres.
  131. * The pages will be released automatically at the device removal.
  132. *
  133. * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
  134. * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
  135. * SNDRV_DMA_TYPE_VMALLOC type.
  136. *
  137. * Return: the snd_dma_buffer object at success, or NULL if failed
  138. */
  139. struct snd_dma_buffer *
  140. snd_devm_alloc_dir_pages(struct device *dev, int type,
  141. enum dma_data_direction dir, size_t size)
  142. {
  143. struct snd_dma_buffer *dmab;
  144. int err;
  145. if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
  146. type == SNDRV_DMA_TYPE_VMALLOC))
  147. return NULL;
  148. dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
  149. if (!dmab)
  150. return NULL;
  151. err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
  152. if (err < 0) {
  153. devres_free(dmab);
  154. return NULL;
  155. }
  156. devres_add(dev, dmab);
  157. return dmab;
  158. }
  159. EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
  160. /**
  161. * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
  162. * @dmab: buffer allocation information
  163. * @area: VM area information
  164. *
  165. * Return: zero if successful, or a negative error code
  166. */
  167. int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
  168. struct vm_area_struct *area)
  169. {
  170. const struct snd_malloc_ops *ops;
  171. if (!dmab)
  172. return -ENOENT;
  173. ops = snd_dma_get_ops(dmab);
  174. if (ops && ops->mmap)
  175. return ops->mmap(dmab, area);
  176. else
  177. return -ENOENT;
  178. }
  179. EXPORT_SYMBOL(snd_dma_buffer_mmap);
  180. #ifdef CONFIG_HAS_DMA
  181. /**
  182. * snd_dma_buffer_sync - sync DMA buffer between CPU and device
  183. * @dmab: buffer allocation information
  184. * @mode: sync mode
  185. */
  186. void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
  187. enum snd_dma_sync_mode mode)
  188. {
  189. const struct snd_malloc_ops *ops;
  190. if (!dmab || !dmab->dev.need_sync)
  191. return;
  192. ops = snd_dma_get_ops(dmab);
  193. if (ops && ops->sync)
  194. ops->sync(dmab, mode);
  195. }
  196. EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
  197. #endif /* CONFIG_HAS_DMA */
  198. /**
  199. * snd_sgbuf_get_addr - return the physical address at the corresponding offset
  200. * @dmab: buffer allocation information
  201. * @offset: offset in the ring buffer
  202. *
  203. * Return: the physical address
  204. */
  205. dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
  206. {
  207. const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
  208. if (ops && ops->get_addr)
  209. return ops->get_addr(dmab, offset);
  210. else
  211. return dmab->addr + offset;
  212. }
  213. EXPORT_SYMBOL(snd_sgbuf_get_addr);
  214. /**
  215. * snd_sgbuf_get_page - return the physical page at the corresponding offset
  216. * @dmab: buffer allocation information
  217. * @offset: offset in the ring buffer
  218. *
  219. * Return: the page pointer
  220. */
  221. struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
  222. {
  223. const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
  224. if (ops && ops->get_page)
  225. return ops->get_page(dmab, offset);
  226. else
  227. return virt_to_page(dmab->area + offset);
  228. }
  229. EXPORT_SYMBOL(snd_sgbuf_get_page);
  230. /**
  231. * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
  232. * on sg-buffer
  233. * @dmab: buffer allocation information
  234. * @ofs: offset in the ring buffer
  235. * @size: the requested size
  236. *
  237. * Return: the chunk size
  238. */
  239. unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
  240. unsigned int ofs, unsigned int size)
  241. {
  242. const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
  243. if (ops && ops->get_chunk_size)
  244. return ops->get_chunk_size(dmab, ofs, size);
  245. else
  246. return size;
  247. }
  248. EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
  249. /*
  250. * Continuous pages allocator
  251. */
  252. static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr,
  253. bool wc)
  254. {
  255. void *p;
  256. gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
  257. again:
  258. p = alloc_pages_exact(size, gfp);
  259. if (!p)
  260. return NULL;
  261. *addr = page_to_phys(virt_to_page(p));
  262. if (!dev)
  263. return p;
  264. if ((*addr + size - 1) & ~dev->coherent_dma_mask) {
  265. if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
  266. gfp |= GFP_DMA32;
  267. goto again;
  268. }
  269. if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
  270. gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
  271. goto again;
  272. }
  273. }
  274. #ifdef CONFIG_X86
  275. if (wc)
  276. set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
  277. #endif
  278. return p;
  279. }
  280. static void do_free_pages(void *p, size_t size, bool wc)
  281. {
  282. #ifdef CONFIG_X86
  283. if (wc)
  284. set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
  285. #endif
  286. free_pages_exact(p, size);
  287. }
  288. static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
  289. {
  290. return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false);
  291. }
  292. static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
  293. {
  294. do_free_pages(dmab->area, dmab->bytes, false);
  295. }
  296. static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
  297. struct vm_area_struct *area)
  298. {
  299. return remap_pfn_range(area, area->vm_start,
  300. dmab->addr >> PAGE_SHIFT,
  301. area->vm_end - area->vm_start,
  302. area->vm_page_prot);
  303. }
  304. static const struct snd_malloc_ops snd_dma_continuous_ops = {
  305. .alloc = snd_dma_continuous_alloc,
  306. .free = snd_dma_continuous_free,
  307. .mmap = snd_dma_continuous_mmap,
  308. };
  309. /*
  310. * VMALLOC allocator
  311. */
  312. static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
  313. {
  314. return vmalloc(size);
  315. }
  316. static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
  317. {
  318. vfree(dmab->area);
  319. }
  320. static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
  321. struct vm_area_struct *area)
  322. {
  323. return remap_vmalloc_range(area, dmab->area, 0);
  324. }
  325. #define get_vmalloc_page_addr(dmab, offset) \
  326. page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
  327. static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
  328. size_t offset)
  329. {
  330. return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
  331. }
  332. static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
  333. size_t offset)
  334. {
  335. return vmalloc_to_page(dmab->area + offset);
  336. }
  337. static unsigned int
  338. snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
  339. unsigned int ofs, unsigned int size)
  340. {
  341. unsigned int start, end;
  342. unsigned long addr;
  343. start = ALIGN_DOWN(ofs, PAGE_SIZE);
  344. end = ofs + size - 1; /* the last byte address */
  345. /* check page continuity */
  346. addr = get_vmalloc_page_addr(dmab, start);
  347. for (;;) {
  348. start += PAGE_SIZE;
  349. if (start > end)
  350. break;
  351. addr += PAGE_SIZE;
  352. if (get_vmalloc_page_addr(dmab, start) != addr)
  353. return start - ofs;
  354. }
  355. /* ok, all on continuous pages */
  356. return size;
  357. }
  358. static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
  359. .alloc = snd_dma_vmalloc_alloc,
  360. .free = snd_dma_vmalloc_free,
  361. .mmap = snd_dma_vmalloc_mmap,
  362. .get_addr = snd_dma_vmalloc_get_addr,
  363. .get_page = snd_dma_vmalloc_get_page,
  364. .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
  365. };
  366. #ifdef CONFIG_HAS_DMA
  367. /*
  368. * IRAM allocator
  369. */
  370. #ifdef CONFIG_GENERIC_ALLOCATOR
  371. static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
  372. {
  373. struct device *dev = dmab->dev.dev;
  374. struct gen_pool *pool;
  375. void *p;
  376. if (dev->of_node) {
  377. pool = of_gen_pool_get(dev->of_node, "iram", 0);
  378. /* Assign the pool into private_data field */
  379. dmab->private_data = pool;
  380. p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
  381. if (p)
  382. return p;
  383. }
  384. /* Internal memory might have limited size and no enough space,
  385. * so if we fail to malloc, try to fetch memory traditionally.
  386. */
  387. dmab->dev.type = SNDRV_DMA_TYPE_DEV;
  388. return __snd_dma_alloc_pages(dmab, size);
  389. }
  390. static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
  391. {
  392. struct gen_pool *pool = dmab->private_data;
  393. if (pool && dmab->area)
  394. gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
  395. }
  396. static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
  397. struct vm_area_struct *area)
  398. {
  399. area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
  400. return remap_pfn_range(area, area->vm_start,
  401. dmab->addr >> PAGE_SHIFT,
  402. area->vm_end - area->vm_start,
  403. area->vm_page_prot);
  404. }
  405. static const struct snd_malloc_ops snd_dma_iram_ops = {
  406. .alloc = snd_dma_iram_alloc,
  407. .free = snd_dma_iram_free,
  408. .mmap = snd_dma_iram_mmap,
  409. };
  410. #endif /* CONFIG_GENERIC_ALLOCATOR */
  411. /*
  412. * Coherent device pages allocator
  413. */
  414. static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
  415. {
  416. return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
  417. }
  418. static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
  419. {
  420. dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
  421. }
  422. static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
  423. struct vm_area_struct *area)
  424. {
  425. return dma_mmap_coherent(dmab->dev.dev, area,
  426. dmab->area, dmab->addr, dmab->bytes);
  427. }
  428. static const struct snd_malloc_ops snd_dma_dev_ops = {
  429. .alloc = snd_dma_dev_alloc,
  430. .free = snd_dma_dev_free,
  431. .mmap = snd_dma_dev_mmap,
  432. };
  433. /*
  434. * Write-combined pages
  435. */
  436. /* x86-specific allocations */
  437. #ifdef CONFIG_SND_DMA_SGBUF
  438. static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
  439. {
  440. return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true);
  441. }
  442. static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
  443. {
  444. do_free_pages(dmab->area, dmab->bytes, true);
  445. }
  446. static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
  447. struct vm_area_struct *area)
  448. {
  449. area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
  450. return snd_dma_continuous_mmap(dmab, area);
  451. }
  452. #else
  453. static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
  454. {
  455. return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
  456. }
  457. static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
  458. {
  459. dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
  460. }
  461. static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
  462. struct vm_area_struct *area)
  463. {
  464. return dma_mmap_wc(dmab->dev.dev, area,
  465. dmab->area, dmab->addr, dmab->bytes);
  466. }
  467. #endif /* CONFIG_SND_DMA_SGBUF */
  468. static const struct snd_malloc_ops snd_dma_wc_ops = {
  469. .alloc = snd_dma_wc_alloc,
  470. .free = snd_dma_wc_free,
  471. .mmap = snd_dma_wc_mmap,
  472. };
  473. /*
  474. * Non-contiguous pages allocator
  475. */
  476. static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
  477. {
  478. struct sg_table *sgt;
  479. void *p;
  480. #ifdef CONFIG_SND_DMA_SGBUF
  481. if (cpu_feature_enabled(X86_FEATURE_XENPV))
  482. return snd_dma_sg_fallback_alloc(dmab, size);
  483. #endif
  484. sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
  485. DEFAULT_GFP, 0);
  486. #ifdef CONFIG_SND_DMA_SGBUF
  487. if (!sgt && !get_dma_ops(dmab->dev.dev))
  488. return snd_dma_sg_fallback_alloc(dmab, size);
  489. #endif
  490. if (!sgt)
  491. return NULL;
  492. dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
  493. sg_dma_address(sgt->sgl));
  494. p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
  495. if (p) {
  496. dmab->private_data = sgt;
  497. /* store the first page address for convenience */
  498. dmab->addr = snd_sgbuf_get_addr(dmab, 0);
  499. } else {
  500. dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
  501. }
  502. return p;
  503. }
  504. static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
  505. {
  506. dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
  507. dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
  508. dmab->dev.dir);
  509. }
  510. static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
  511. struct vm_area_struct *area)
  512. {
  513. return dma_mmap_noncontiguous(dmab->dev.dev, area,
  514. dmab->bytes, dmab->private_data);
  515. }
  516. static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
  517. enum snd_dma_sync_mode mode)
  518. {
  519. if (mode == SNDRV_DMA_SYNC_CPU) {
  520. if (dmab->dev.dir == DMA_TO_DEVICE)
  521. return;
  522. invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
  523. dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
  524. dmab->dev.dir);
  525. } else {
  526. if (dmab->dev.dir == DMA_FROM_DEVICE)
  527. return;
  528. flush_kernel_vmap_range(dmab->area, dmab->bytes);
  529. dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
  530. dmab->dev.dir);
  531. }
  532. }
  533. static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
  534. struct sg_page_iter *piter,
  535. size_t offset)
  536. {
  537. struct sg_table *sgt = dmab->private_data;
  538. __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
  539. offset >> PAGE_SHIFT);
  540. }
  541. static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
  542. size_t offset)
  543. {
  544. struct sg_dma_page_iter iter;
  545. snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
  546. __sg_page_iter_dma_next(&iter);
  547. return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
  548. }
  549. static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
  550. size_t offset)
  551. {
  552. struct sg_page_iter iter;
  553. snd_dma_noncontig_iter_set(dmab, &iter, offset);
  554. __sg_page_iter_next(&iter);
  555. return sg_page_iter_page(&iter);
  556. }
  557. static unsigned int
  558. snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
  559. unsigned int ofs, unsigned int size)
  560. {
  561. struct sg_dma_page_iter iter;
  562. unsigned int start, end;
  563. unsigned long addr;
  564. start = ALIGN_DOWN(ofs, PAGE_SIZE);
  565. end = ofs + size - 1; /* the last byte address */
  566. snd_dma_noncontig_iter_set(dmab, &iter.base, start);
  567. if (!__sg_page_iter_dma_next(&iter))
  568. return 0;
  569. /* check page continuity */
  570. addr = sg_page_iter_dma_address(&iter);
  571. for (;;) {
  572. start += PAGE_SIZE;
  573. if (start > end)
  574. break;
  575. addr += PAGE_SIZE;
  576. if (!__sg_page_iter_dma_next(&iter) ||
  577. sg_page_iter_dma_address(&iter) != addr)
  578. return start - ofs;
  579. }
  580. /* ok, all on continuous pages */
  581. return size;
  582. }
  583. static const struct snd_malloc_ops snd_dma_noncontig_ops = {
  584. .alloc = snd_dma_noncontig_alloc,
  585. .free = snd_dma_noncontig_free,
  586. .mmap = snd_dma_noncontig_mmap,
  587. .sync = snd_dma_noncontig_sync,
  588. .get_addr = snd_dma_noncontig_get_addr,
  589. .get_page = snd_dma_noncontig_get_page,
  590. .get_chunk_size = snd_dma_noncontig_get_chunk_size,
  591. };
  592. /* x86-specific SG-buffer with WC pages */
  593. #ifdef CONFIG_SND_DMA_SGBUF
  594. #define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it)))
  595. static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
  596. {
  597. void *p = snd_dma_noncontig_alloc(dmab, size);
  598. struct sg_table *sgt = dmab->private_data;
  599. struct sg_page_iter iter;
  600. if (!p)
  601. return NULL;
  602. if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
  603. return p;
  604. for_each_sgtable_page(sgt, &iter, 0)
  605. set_memory_wc(sg_wc_address(&iter), 1);
  606. return p;
  607. }
  608. static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
  609. {
  610. struct sg_table *sgt = dmab->private_data;
  611. struct sg_page_iter iter;
  612. for_each_sgtable_page(sgt, &iter, 0)
  613. set_memory_wb(sg_wc_address(&iter), 1);
  614. snd_dma_noncontig_free(dmab);
  615. }
  616. static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
  617. struct vm_area_struct *area)
  618. {
  619. area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
  620. return dma_mmap_noncontiguous(dmab->dev.dev, area,
  621. dmab->bytes, dmab->private_data);
  622. }
  623. static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
  624. .alloc = snd_dma_sg_wc_alloc,
  625. .free = snd_dma_sg_wc_free,
  626. .mmap = snd_dma_sg_wc_mmap,
  627. .sync = snd_dma_noncontig_sync,
  628. .get_addr = snd_dma_noncontig_get_addr,
  629. .get_page = snd_dma_noncontig_get_page,
  630. .get_chunk_size = snd_dma_noncontig_get_chunk_size,
  631. };
  632. /* Fallback SG-buffer allocations for x86 */
  633. struct snd_dma_sg_fallback {
  634. bool use_dma_alloc_coherent;
  635. size_t count;
  636. struct page **pages;
  637. /* DMA address array; the first page contains #pages in ~PAGE_MASK */
  638. dma_addr_t *addrs;
  639. };
  640. static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
  641. struct snd_dma_sg_fallback *sgbuf)
  642. {
  643. size_t i, size;
  644. if (sgbuf->pages && sgbuf->addrs) {
  645. i = 0;
  646. while (i < sgbuf->count) {
  647. if (!sgbuf->pages[i] || !sgbuf->addrs[i])
  648. break;
  649. size = sgbuf->addrs[i] & ~PAGE_MASK;
  650. if (WARN_ON(!size))
  651. break;
  652. if (sgbuf->use_dma_alloc_coherent)
  653. dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT,
  654. page_address(sgbuf->pages[i]),
  655. sgbuf->addrs[i] & PAGE_MASK);
  656. else
  657. do_free_pages(page_address(sgbuf->pages[i]),
  658. size << PAGE_SHIFT, false);
  659. i += size;
  660. }
  661. }
  662. kvfree(sgbuf->pages);
  663. kvfree(sgbuf->addrs);
  664. kfree(sgbuf);
  665. }
  666. static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
  667. {
  668. struct snd_dma_sg_fallback *sgbuf;
  669. struct page **pagep, *curp;
  670. size_t chunk, npages;
  671. dma_addr_t *addrp;
  672. dma_addr_t addr;
  673. void *p;
  674. /* correct the type */
  675. if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG)
  676. dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
  677. else if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
  678. dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
  679. sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
  680. if (!sgbuf)
  681. return NULL;
  682. sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV);
  683. size = PAGE_ALIGN(size);
  684. sgbuf->count = size >> PAGE_SHIFT;
  685. sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
  686. sgbuf->addrs = kvcalloc(sgbuf->count, sizeof(*sgbuf->addrs), GFP_KERNEL);
  687. if (!sgbuf->pages || !sgbuf->addrs)
  688. goto error;
  689. pagep = sgbuf->pages;
  690. addrp = sgbuf->addrs;
  691. chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */
  692. while (size > 0) {
  693. chunk = min(size, chunk);
  694. if (sgbuf->use_dma_alloc_coherent)
  695. p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP);
  696. else
  697. p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
  698. if (!p) {
  699. if (chunk <= PAGE_SIZE)
  700. goto error;
  701. chunk >>= 1;
  702. chunk = PAGE_SIZE << get_order(chunk);
  703. continue;
  704. }
  705. size -= chunk;
  706. /* fill pages */
  707. npages = chunk >> PAGE_SHIFT;
  708. *addrp = npages; /* store in lower bits */
  709. curp = virt_to_page(p);
  710. while (npages--) {
  711. *pagep++ = curp++;
  712. *addrp++ |= addr;
  713. addr += PAGE_SIZE;
  714. }
  715. }
  716. p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
  717. if (!p)
  718. goto error;
  719. if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
  720. set_pages_array_wc(sgbuf->pages, sgbuf->count);
  721. dmab->private_data = sgbuf;
  722. /* store the first page address for convenience */
  723. dmab->addr = sgbuf->addrs[0] & PAGE_MASK;
  724. return p;
  725. error:
  726. __snd_dma_sg_fallback_free(dmab, sgbuf);
  727. return NULL;
  728. }
  729. static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
  730. {
  731. struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
  732. if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
  733. set_pages_array_wb(sgbuf->pages, sgbuf->count);
  734. vunmap(dmab->area);
  735. __snd_dma_sg_fallback_free(dmab, dmab->private_data);
  736. }
  737. static dma_addr_t snd_dma_sg_fallback_get_addr(struct snd_dma_buffer *dmab,
  738. size_t offset)
  739. {
  740. struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
  741. size_t index = offset >> PAGE_SHIFT;
  742. return (sgbuf->addrs[index] & PAGE_MASK) | (offset & ~PAGE_MASK);
  743. }
  744. static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
  745. struct vm_area_struct *area)
  746. {
  747. struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
  748. if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
  749. area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
  750. return vm_map_pages(area, sgbuf->pages, sgbuf->count);
  751. }
  752. static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
  753. .alloc = snd_dma_sg_fallback_alloc,
  754. .free = snd_dma_sg_fallback_free,
  755. .mmap = snd_dma_sg_fallback_mmap,
  756. .get_addr = snd_dma_sg_fallback_get_addr,
  757. /* reuse vmalloc helpers */
  758. .get_page = snd_dma_vmalloc_get_page,
  759. .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
  760. };
  761. #endif /* CONFIG_SND_DMA_SGBUF */
  762. /*
  763. * Non-coherent pages allocator
  764. */
  765. static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
  766. {
  767. void *p;
  768. p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
  769. dmab->dev.dir, DEFAULT_GFP);
  770. if (p)
  771. dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
  772. return p;
  773. }
  774. static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
  775. {
  776. dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
  777. dmab->addr, dmab->dev.dir);
  778. }
  779. static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
  780. struct vm_area_struct *area)
  781. {
  782. area->vm_page_prot = vm_get_page_prot(area->vm_flags);
  783. return dma_mmap_pages(dmab->dev.dev, area,
  784. area->vm_end - area->vm_start,
  785. virt_to_page(dmab->area));
  786. }
  787. static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
  788. enum snd_dma_sync_mode mode)
  789. {
  790. if (mode == SNDRV_DMA_SYNC_CPU) {
  791. if (dmab->dev.dir != DMA_TO_DEVICE)
  792. dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
  793. dmab->bytes, dmab->dev.dir);
  794. } else {
  795. if (dmab->dev.dir != DMA_FROM_DEVICE)
  796. dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
  797. dmab->bytes, dmab->dev.dir);
  798. }
  799. }
  800. static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
  801. .alloc = snd_dma_noncoherent_alloc,
  802. .free = snd_dma_noncoherent_free,
  803. .mmap = snd_dma_noncoherent_mmap,
  804. .sync = snd_dma_noncoherent_sync,
  805. };
  806. #endif /* CONFIG_HAS_DMA */
  807. /*
  808. * Entry points
  809. */
  810. static const struct snd_malloc_ops *snd_dma_ops[] = {
  811. [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
  812. [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
  813. #ifdef CONFIG_HAS_DMA
  814. [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
  815. [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
  816. [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
  817. [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
  818. #ifdef CONFIG_SND_DMA_SGBUF
  819. [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
  820. #endif
  821. #ifdef CONFIG_GENERIC_ALLOCATOR
  822. [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
  823. #endif /* CONFIG_GENERIC_ALLOCATOR */
  824. #ifdef CONFIG_SND_DMA_SGBUF
  825. [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
  826. [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
  827. #endif
  828. #endif /* CONFIG_HAS_DMA */
  829. };
  830. static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
  831. {
  832. if (WARN_ON_ONCE(!dmab))
  833. return NULL;
  834. if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
  835. dmab->dev.type >= ARRAY_SIZE(snd_dma_ops)))
  836. return NULL;
  837. return snd_dma_ops[dmab->dev.type];
  838. }