dmaengine.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
  4. */
  5. /*
  6. * This code implements the DMA subsystem. It provides a HW-neutral interface
  7. * for other kernel code to use asynchronous memory copy capabilities,
  8. * if present, and allows different HW DMA drivers to register as providing
  9. * this capability.
  10. *
  11. * Due to the fact we are accelerating what is already a relatively fast
  12. * operation, the code goes to great lengths to avoid additional overhead,
  13. * such as locking.
  14. *
  15. * LOCKING:
  16. *
  17. * The subsystem keeps a global list of dma_device structs it is protected by a
  18. * mutex, dma_list_mutex.
  19. *
  20. * A subsystem can get access to a channel by calling dmaengine_get() followed
  21. * by dma_find_channel(), or if it has need for an exclusive channel it can call
  22. * dma_request_channel(). Once a channel is allocated a reference is taken
  23. * against its corresponding driver to disable removal.
  24. *
  25. * Each device has a channels list, which runs unlocked but is never modified
  26. * once the device is registered, it's just setup by the driver.
  27. *
  28. * See Documentation/driver-api/dmaengine for more details
  29. */
  30. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  31. #include <linux/platform_device.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/init.h>
  34. #include <linux/module.h>
  35. #include <linux/mm.h>
  36. #include <linux/device.h>
  37. #include <linux/dmaengine.h>
  38. #include <linux/hardirq.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/percpu.h>
  41. #include <linux/rcupdate.h>
  42. #include <linux/mutex.h>
  43. #include <linux/jiffies.h>
  44. #include <linux/rculist.h>
  45. #include <linux/idr.h>
  46. #include <linux/slab.h>
  47. #include <linux/acpi.h>
  48. #include <linux/acpi_dma.h>
  49. #include <linux/of_dma.h>
  50. #include <linux/mempool.h>
  51. #include <linux/numa.h>
  52. #include "dmaengine.h"
  53. static DEFINE_MUTEX(dma_list_mutex);
  54. static DEFINE_IDA(dma_ida);
  55. static LIST_HEAD(dma_device_list);
  56. static long dmaengine_ref_count;
  57. /* --- debugfs implementation --- */
  58. #ifdef CONFIG_DEBUG_FS
  59. #include <linux/debugfs.h>
  60. static struct dentry *rootdir;
  61. static void dmaengine_debug_register(struct dma_device *dma_dev)
  62. {
  63. dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
  64. rootdir);
  65. if (IS_ERR(dma_dev->dbg_dev_root))
  66. dma_dev->dbg_dev_root = NULL;
  67. }
  68. static void dmaengine_debug_unregister(struct dma_device *dma_dev)
  69. {
  70. debugfs_remove_recursive(dma_dev->dbg_dev_root);
  71. dma_dev->dbg_dev_root = NULL;
  72. }
  73. static void dmaengine_dbg_summary_show(struct seq_file *s,
  74. struct dma_device *dma_dev)
  75. {
  76. struct dma_chan *chan;
  77. list_for_each_entry(chan, &dma_dev->channels, device_node) {
  78. if (chan->client_count) {
  79. seq_printf(s, " %-13s| %s", dma_chan_name(chan),
  80. chan->dbg_client_name ?: "in-use");
  81. if (chan->router)
  82. seq_printf(s, " (via router: %s)\n",
  83. dev_name(chan->router->dev));
  84. else
  85. seq_puts(s, "\n");
  86. }
  87. }
  88. }
  89. static int dmaengine_summary_show(struct seq_file *s, void *data)
  90. {
  91. struct dma_device *dma_dev = NULL;
  92. mutex_lock(&dma_list_mutex);
  93. list_for_each_entry(dma_dev, &dma_device_list, global_node) {
  94. seq_printf(s, "dma%d (%s): number of channels: %u\n",
  95. dma_dev->dev_id, dev_name(dma_dev->dev),
  96. dma_dev->chancnt);
  97. if (dma_dev->dbg_summary_show)
  98. dma_dev->dbg_summary_show(s, dma_dev);
  99. else
  100. dmaengine_dbg_summary_show(s, dma_dev);
  101. if (!list_is_last(&dma_dev->global_node, &dma_device_list))
  102. seq_puts(s, "\n");
  103. }
  104. mutex_unlock(&dma_list_mutex);
  105. return 0;
  106. }
  107. DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
  108. static void __init dmaengine_debugfs_init(void)
  109. {
  110. rootdir = debugfs_create_dir("dmaengine", NULL);
  111. /* /sys/kernel/debug/dmaengine/summary */
  112. debugfs_create_file("summary", 0444, rootdir, NULL,
  113. &dmaengine_summary_fops);
  114. }
  115. #else
  116. static inline void dmaengine_debugfs_init(void) { }
  117. static inline int dmaengine_debug_register(struct dma_device *dma_dev)
  118. {
  119. return 0;
  120. }
  121. static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
  122. #endif /* DEBUG_FS */
  123. /* --- sysfs implementation --- */
  124. #define DMA_SLAVE_NAME "slave"
  125. /**
  126. * dev_to_dma_chan - convert a device pointer to its sysfs container object
  127. * @dev: device node
  128. *
  129. * Must be called under dma_list_mutex.
  130. */
  131. static struct dma_chan *dev_to_dma_chan(struct device *dev)
  132. {
  133. struct dma_chan_dev *chan_dev;
  134. chan_dev = container_of(dev, typeof(*chan_dev), device);
  135. return chan_dev->chan;
  136. }
  137. static ssize_t memcpy_count_show(struct device *dev,
  138. struct device_attribute *attr, char *buf)
  139. {
  140. struct dma_chan *chan;
  141. unsigned long count = 0;
  142. int i;
  143. int err;
  144. mutex_lock(&dma_list_mutex);
  145. chan = dev_to_dma_chan(dev);
  146. if (chan) {
  147. for_each_possible_cpu(i)
  148. count += per_cpu_ptr(chan->local, i)->memcpy_count;
  149. err = sprintf(buf, "%lu\n", count);
  150. } else
  151. err = -ENODEV;
  152. mutex_unlock(&dma_list_mutex);
  153. return err;
  154. }
  155. static DEVICE_ATTR_RO(memcpy_count);
  156. static ssize_t bytes_transferred_show(struct device *dev,
  157. struct device_attribute *attr, char *buf)
  158. {
  159. struct dma_chan *chan;
  160. unsigned long count = 0;
  161. int i;
  162. int err;
  163. mutex_lock(&dma_list_mutex);
  164. chan = dev_to_dma_chan(dev);
  165. if (chan) {
  166. for_each_possible_cpu(i)
  167. count += per_cpu_ptr(chan->local, i)->bytes_transferred;
  168. err = sprintf(buf, "%lu\n", count);
  169. } else
  170. err = -ENODEV;
  171. mutex_unlock(&dma_list_mutex);
  172. return err;
  173. }
  174. static DEVICE_ATTR_RO(bytes_transferred);
  175. static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
  176. char *buf)
  177. {
  178. struct dma_chan *chan;
  179. int err;
  180. mutex_lock(&dma_list_mutex);
  181. chan = dev_to_dma_chan(dev);
  182. if (chan)
  183. err = sprintf(buf, "%d\n", chan->client_count);
  184. else
  185. err = -ENODEV;
  186. mutex_unlock(&dma_list_mutex);
  187. return err;
  188. }
  189. static DEVICE_ATTR_RO(in_use);
  190. static struct attribute *dma_dev_attrs[] = {
  191. &dev_attr_memcpy_count.attr,
  192. &dev_attr_bytes_transferred.attr,
  193. &dev_attr_in_use.attr,
  194. NULL,
  195. };
  196. ATTRIBUTE_GROUPS(dma_dev);
  197. static void chan_dev_release(struct device *dev)
  198. {
  199. struct dma_chan_dev *chan_dev;
  200. chan_dev = container_of(dev, typeof(*chan_dev), device);
  201. kfree(chan_dev);
  202. }
  203. static struct class dma_devclass = {
  204. .name = "dma",
  205. .dev_groups = dma_dev_groups,
  206. .dev_release = chan_dev_release,
  207. };
  208. /* --- client and device registration --- */
  209. /* enable iteration over all operation types */
  210. static dma_cap_mask_t dma_cap_mask_all;
  211. /**
  212. * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
  213. * @chan: associated channel for this entry
  214. */
  215. struct dma_chan_tbl_ent {
  216. struct dma_chan *chan;
  217. };
  218. /* percpu lookup table for memory-to-memory offload providers */
  219. static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
  220. static int __init dma_channel_table_init(void)
  221. {
  222. enum dma_transaction_type cap;
  223. int err = 0;
  224. bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
  225. /* 'interrupt', 'private', and 'slave' are channel capabilities,
  226. * but are not associated with an operation so they do not need
  227. * an entry in the channel_table
  228. */
  229. clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
  230. clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
  231. clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
  232. for_each_dma_cap_mask(cap, dma_cap_mask_all) {
  233. channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
  234. if (!channel_table[cap]) {
  235. err = -ENOMEM;
  236. break;
  237. }
  238. }
  239. if (err) {
  240. pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
  241. for_each_dma_cap_mask(cap, dma_cap_mask_all)
  242. free_percpu(channel_table[cap]);
  243. }
  244. return err;
  245. }
  246. arch_initcall(dma_channel_table_init);
  247. /**
  248. * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
  249. * @chan: DMA channel to test
  250. * @cpu: CPU index which the channel should be close to
  251. *
  252. * Returns true if the channel is in the same NUMA-node as the CPU.
  253. */
  254. static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
  255. {
  256. int node = dev_to_node(chan->device->dev);
  257. return node == NUMA_NO_NODE ||
  258. cpumask_test_cpu(cpu, cpumask_of_node(node));
  259. }
  260. /**
  261. * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
  262. * @cap: capability to match
  263. * @cpu: CPU index which the channel should be close to
  264. *
  265. * If some channels are close to the given CPU, the one with the lowest
  266. * reference count is returned. Otherwise, CPU is ignored and only the
  267. * reference count is taken into account.
  268. *
  269. * Must be called under dma_list_mutex.
  270. */
  271. static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
  272. {
  273. struct dma_device *device;
  274. struct dma_chan *chan;
  275. struct dma_chan *min = NULL;
  276. struct dma_chan *localmin = NULL;
  277. list_for_each_entry(device, &dma_device_list, global_node) {
  278. if (!dma_has_cap(cap, device->cap_mask) ||
  279. dma_has_cap(DMA_PRIVATE, device->cap_mask))
  280. continue;
  281. list_for_each_entry(chan, &device->channels, device_node) {
  282. if (!chan->client_count)
  283. continue;
  284. if (!min || chan->table_count < min->table_count)
  285. min = chan;
  286. if (dma_chan_is_local(chan, cpu))
  287. if (!localmin ||
  288. chan->table_count < localmin->table_count)
  289. localmin = chan;
  290. }
  291. }
  292. chan = localmin ? localmin : min;
  293. if (chan)
  294. chan->table_count++;
  295. return chan;
  296. }
  297. /**
  298. * dma_channel_rebalance - redistribute the available channels
  299. *
  300. * Optimize for CPU isolation (each CPU gets a dedicated channel for an
  301. * operation type) in the SMP case, and operation isolation (avoid
  302. * multi-tasking channels) in the non-SMP case.
  303. *
  304. * Must be called under dma_list_mutex.
  305. */
  306. static void dma_channel_rebalance(void)
  307. {
  308. struct dma_chan *chan;
  309. struct dma_device *device;
  310. int cpu;
  311. int cap;
  312. /* undo the last distribution */
  313. for_each_dma_cap_mask(cap, dma_cap_mask_all)
  314. for_each_possible_cpu(cpu)
  315. per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
  316. list_for_each_entry(device, &dma_device_list, global_node) {
  317. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  318. continue;
  319. list_for_each_entry(chan, &device->channels, device_node)
  320. chan->table_count = 0;
  321. }
  322. /* don't populate the channel_table if no clients are available */
  323. if (!dmaengine_ref_count)
  324. return;
  325. /* redistribute available channels */
  326. for_each_dma_cap_mask(cap, dma_cap_mask_all)
  327. for_each_online_cpu(cpu) {
  328. chan = min_chan(cap, cpu);
  329. per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
  330. }
  331. }
  332. static int dma_device_satisfies_mask(struct dma_device *device,
  333. const dma_cap_mask_t *want)
  334. {
  335. dma_cap_mask_t has;
  336. bitmap_and(has.bits, want->bits, device->cap_mask.bits,
  337. DMA_TX_TYPE_END);
  338. return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
  339. }
  340. static struct module *dma_chan_to_owner(struct dma_chan *chan)
  341. {
  342. return chan->device->owner;
  343. }
  344. /**
  345. * balance_ref_count - catch up the channel reference count
  346. * @chan: channel to balance ->client_count versus dmaengine_ref_count
  347. *
  348. * Must be called under dma_list_mutex.
  349. */
  350. static void balance_ref_count(struct dma_chan *chan)
  351. {
  352. struct module *owner = dma_chan_to_owner(chan);
  353. while (chan->client_count < dmaengine_ref_count) {
  354. __module_get(owner);
  355. chan->client_count++;
  356. }
  357. }
  358. static void dma_device_release(struct kref *ref)
  359. {
  360. struct dma_device *device = container_of(ref, struct dma_device, ref);
  361. list_del_rcu(&device->global_node);
  362. dma_channel_rebalance();
  363. if (device->device_release)
  364. device->device_release(device);
  365. }
  366. static void dma_device_put(struct dma_device *device)
  367. {
  368. lockdep_assert_held(&dma_list_mutex);
  369. kref_put(&device->ref, dma_device_release);
  370. }
  371. /**
  372. * dma_chan_get - try to grab a DMA channel's parent driver module
  373. * @chan: channel to grab
  374. *
  375. * Must be called under dma_list_mutex.
  376. */
  377. static int dma_chan_get(struct dma_chan *chan)
  378. {
  379. struct module *owner = dma_chan_to_owner(chan);
  380. int ret;
  381. /* The channel is already in use, update client count */
  382. if (chan->client_count) {
  383. __module_get(owner);
  384. chan->client_count++;
  385. return 0;
  386. }
  387. if (!try_module_get(owner))
  388. return -ENODEV;
  389. ret = kref_get_unless_zero(&chan->device->ref);
  390. if (!ret) {
  391. ret = -ENODEV;
  392. goto module_put_out;
  393. }
  394. /* allocate upon first client reference */
  395. if (chan->device->device_alloc_chan_resources) {
  396. ret = chan->device->device_alloc_chan_resources(chan);
  397. if (ret < 0)
  398. goto err_out;
  399. }
  400. chan->client_count++;
  401. if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
  402. balance_ref_count(chan);
  403. return 0;
  404. err_out:
  405. dma_device_put(chan->device);
  406. module_put_out:
  407. module_put(owner);
  408. return ret;
  409. }
  410. /**
  411. * dma_chan_put - drop a reference to a DMA channel's parent driver module
  412. * @chan: channel to release
  413. *
  414. * Must be called under dma_list_mutex.
  415. */
  416. static void dma_chan_put(struct dma_chan *chan)
  417. {
  418. /* This channel is not in use, bail out */
  419. if (!chan->client_count)
  420. return;
  421. chan->client_count--;
  422. /* This channel is not in use anymore, free it */
  423. if (!chan->client_count && chan->device->device_free_chan_resources) {
  424. /* Make sure all operations have completed */
  425. dmaengine_synchronize(chan);
  426. chan->device->device_free_chan_resources(chan);
  427. }
  428. /* If the channel is used via a DMA request router, free the mapping */
  429. if (chan->router && chan->router->route_free) {
  430. chan->router->route_free(chan->router->dev, chan->route_data);
  431. chan->router = NULL;
  432. chan->route_data = NULL;
  433. }
  434. dma_device_put(chan->device);
  435. module_put(dma_chan_to_owner(chan));
  436. }
  437. enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
  438. {
  439. enum dma_status status;
  440. unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
  441. dma_async_issue_pending(chan);
  442. do {
  443. status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
  444. if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
  445. dev_err(chan->device->dev, "%s: timeout!\n", __func__);
  446. return DMA_ERROR;
  447. }
  448. if (status != DMA_IN_PROGRESS)
  449. break;
  450. cpu_relax();
  451. } while (1);
  452. return status;
  453. }
  454. EXPORT_SYMBOL(dma_sync_wait);
  455. /**
  456. * dma_find_channel - find a channel to carry out the operation
  457. * @tx_type: transaction type
  458. */
  459. struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
  460. {
  461. return this_cpu_read(channel_table[tx_type]->chan);
  462. }
  463. EXPORT_SYMBOL(dma_find_channel);
  464. /**
  465. * dma_issue_pending_all - flush all pending operations across all channels
  466. */
  467. void dma_issue_pending_all(void)
  468. {
  469. struct dma_device *device;
  470. struct dma_chan *chan;
  471. rcu_read_lock();
  472. list_for_each_entry_rcu(device, &dma_device_list, global_node) {
  473. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  474. continue;
  475. list_for_each_entry(chan, &device->channels, device_node)
  476. if (chan->client_count)
  477. device->device_issue_pending(chan);
  478. }
  479. rcu_read_unlock();
  480. }
  481. EXPORT_SYMBOL(dma_issue_pending_all);
  482. int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
  483. {
  484. struct dma_device *device;
  485. if (!chan || !caps)
  486. return -EINVAL;
  487. device = chan->device;
  488. /* check if the channel supports slave transactions */
  489. if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
  490. test_bit(DMA_CYCLIC, device->cap_mask.bits)))
  491. return -ENXIO;
  492. /*
  493. * Check whether it reports it uses the generic slave
  494. * capabilities, if not, that means it doesn't support any
  495. * kind of slave capabilities reporting.
  496. */
  497. if (!device->directions)
  498. return -ENXIO;
  499. caps->src_addr_widths = device->src_addr_widths;
  500. caps->dst_addr_widths = device->dst_addr_widths;
  501. caps->directions = device->directions;
  502. caps->min_burst = device->min_burst;
  503. caps->max_burst = device->max_burst;
  504. caps->max_sg_burst = device->max_sg_burst;
  505. caps->residue_granularity = device->residue_granularity;
  506. caps->descriptor_reuse = device->descriptor_reuse;
  507. caps->cmd_pause = !!device->device_pause;
  508. caps->cmd_resume = !!device->device_resume;
  509. caps->cmd_terminate = !!device->device_terminate_all;
  510. /*
  511. * DMA engine device might be configured with non-uniformly
  512. * distributed slave capabilities per device channels. In this
  513. * case the corresponding driver may provide the device_caps
  514. * callback to override the generic capabilities with
  515. * channel-specific ones.
  516. */
  517. if (device->device_caps)
  518. device->device_caps(chan, caps);
  519. return 0;
  520. }
  521. EXPORT_SYMBOL_GPL(dma_get_slave_caps);
  522. static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
  523. struct dma_device *dev,
  524. dma_filter_fn fn, void *fn_param)
  525. {
  526. struct dma_chan *chan;
  527. if (mask && !dma_device_satisfies_mask(dev, mask)) {
  528. dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
  529. return NULL;
  530. }
  531. /* devices with multiple channels need special handling as we need to
  532. * ensure that all channels are either private or public.
  533. */
  534. if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
  535. list_for_each_entry(chan, &dev->channels, device_node) {
  536. /* some channels are already publicly allocated */
  537. if (chan->client_count)
  538. return NULL;
  539. }
  540. list_for_each_entry(chan, &dev->channels, device_node) {
  541. if (chan->client_count) {
  542. dev_dbg(dev->dev, "%s: %s busy\n",
  543. __func__, dma_chan_name(chan));
  544. continue;
  545. }
  546. if (fn && !fn(chan, fn_param)) {
  547. dev_dbg(dev->dev, "%s: %s filter said false\n",
  548. __func__, dma_chan_name(chan));
  549. continue;
  550. }
  551. return chan;
  552. }
  553. return NULL;
  554. }
  555. static struct dma_chan *find_candidate(struct dma_device *device,
  556. const dma_cap_mask_t *mask,
  557. dma_filter_fn fn, void *fn_param)
  558. {
  559. struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
  560. int err;
  561. if (chan) {
  562. /* Found a suitable channel, try to grab, prep, and return it.
  563. * We first set DMA_PRIVATE to disable balance_ref_count as this
  564. * channel will not be published in the general-purpose
  565. * allocator
  566. */
  567. dma_cap_set(DMA_PRIVATE, device->cap_mask);
  568. device->privatecnt++;
  569. err = dma_chan_get(chan);
  570. if (err) {
  571. if (err == -ENODEV) {
  572. dev_dbg(device->dev, "%s: %s module removed\n",
  573. __func__, dma_chan_name(chan));
  574. list_del_rcu(&device->global_node);
  575. } else
  576. dev_dbg(device->dev,
  577. "%s: failed to get %s: (%d)\n",
  578. __func__, dma_chan_name(chan), err);
  579. if (--device->privatecnt == 0)
  580. dma_cap_clear(DMA_PRIVATE, device->cap_mask);
  581. chan = ERR_PTR(err);
  582. }
  583. }
  584. return chan ? chan : ERR_PTR(-EPROBE_DEFER);
  585. }
  586. /**
  587. * dma_get_slave_channel - try to get specific channel exclusively
  588. * @chan: target channel
  589. */
  590. struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
  591. {
  592. /* lock against __dma_request_channel */
  593. mutex_lock(&dma_list_mutex);
  594. if (chan->client_count == 0) {
  595. struct dma_device *device = chan->device;
  596. int err;
  597. dma_cap_set(DMA_PRIVATE, device->cap_mask);
  598. device->privatecnt++;
  599. err = dma_chan_get(chan);
  600. if (err) {
  601. dev_dbg(chan->device->dev,
  602. "%s: failed to get %s: (%d)\n",
  603. __func__, dma_chan_name(chan), err);
  604. chan = NULL;
  605. if (--device->privatecnt == 0)
  606. dma_cap_clear(DMA_PRIVATE, device->cap_mask);
  607. }
  608. } else
  609. chan = NULL;
  610. mutex_unlock(&dma_list_mutex);
  611. return chan;
  612. }
  613. EXPORT_SYMBOL_GPL(dma_get_slave_channel);
  614. struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
  615. {
  616. dma_cap_mask_t mask;
  617. struct dma_chan *chan;
  618. dma_cap_zero(mask);
  619. dma_cap_set(DMA_SLAVE, mask);
  620. /* lock against __dma_request_channel */
  621. mutex_lock(&dma_list_mutex);
  622. chan = find_candidate(device, &mask, NULL, NULL);
  623. mutex_unlock(&dma_list_mutex);
  624. return IS_ERR(chan) ? NULL : chan;
  625. }
  626. EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
  627. /**
  628. * __dma_request_channel - try to allocate an exclusive channel
  629. * @mask: capabilities that the channel must satisfy
  630. * @fn: optional callback to disposition available channels
  631. * @fn_param: opaque parameter to pass to dma_filter_fn()
  632. * @np: device node to look for DMA channels
  633. *
  634. * Returns pointer to appropriate DMA channel on success or NULL.
  635. */
  636. struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
  637. dma_filter_fn fn, void *fn_param,
  638. struct device_node *np)
  639. {
  640. struct dma_device *device, *_d;
  641. struct dma_chan *chan = NULL;
  642. /* Find a channel */
  643. mutex_lock(&dma_list_mutex);
  644. list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
  645. /* Finds a DMA controller with matching device node */
  646. if (np && device->dev->of_node && np != device->dev->of_node)
  647. continue;
  648. chan = find_candidate(device, mask, fn, fn_param);
  649. if (!IS_ERR(chan))
  650. break;
  651. chan = NULL;
  652. }
  653. mutex_unlock(&dma_list_mutex);
  654. pr_debug("%s: %s (%s)\n",
  655. __func__,
  656. chan ? "success" : "fail",
  657. chan ? dma_chan_name(chan) : NULL);
  658. return chan;
  659. }
  660. EXPORT_SYMBOL_GPL(__dma_request_channel);
  661. static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
  662. const char *name,
  663. struct device *dev)
  664. {
  665. int i;
  666. if (!device->filter.mapcnt)
  667. return NULL;
  668. for (i = 0; i < device->filter.mapcnt; i++) {
  669. const struct dma_slave_map *map = &device->filter.map[i];
  670. if (!strcmp(map->devname, dev_name(dev)) &&
  671. !strcmp(map->slave, name))
  672. return map;
  673. }
  674. return NULL;
  675. }
  676. /**
  677. * dma_request_chan - try to allocate an exclusive slave channel
  678. * @dev: pointer to client device structure
  679. * @name: slave channel name
  680. *
  681. * Returns pointer to appropriate DMA channel on success or an error pointer.
  682. */
  683. struct dma_chan *dma_request_chan(struct device *dev, const char *name)
  684. {
  685. struct dma_device *d, *_d;
  686. struct dma_chan *chan = NULL;
  687. /* If device-tree is present get slave info from here */
  688. if (dev->of_node)
  689. chan = of_dma_request_slave_channel(dev->of_node, name);
  690. /* If device was enumerated by ACPI get slave info from here */
  691. if (has_acpi_companion(dev) && !chan)
  692. chan = acpi_dma_request_slave_chan_by_name(dev, name);
  693. if (PTR_ERR(chan) == -EPROBE_DEFER)
  694. return chan;
  695. if (!IS_ERR_OR_NULL(chan))
  696. goto found;
  697. /* Try to find the channel via the DMA filter map(s) */
  698. mutex_lock(&dma_list_mutex);
  699. list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
  700. dma_cap_mask_t mask;
  701. const struct dma_slave_map *map = dma_filter_match(d, name, dev);
  702. if (!map)
  703. continue;
  704. dma_cap_zero(mask);
  705. dma_cap_set(DMA_SLAVE, mask);
  706. chan = find_candidate(d, &mask, d->filter.fn, map->param);
  707. if (!IS_ERR(chan))
  708. break;
  709. }
  710. mutex_unlock(&dma_list_mutex);
  711. if (IS_ERR(chan))
  712. return chan;
  713. if (!chan)
  714. return ERR_PTR(-EPROBE_DEFER);
  715. found:
  716. #ifdef CONFIG_DEBUG_FS
  717. chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
  718. name);
  719. #endif
  720. chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
  721. if (!chan->name)
  722. return chan;
  723. chan->slave = dev;
  724. if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
  725. DMA_SLAVE_NAME))
  726. dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
  727. if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
  728. dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
  729. return chan;
  730. }
  731. EXPORT_SYMBOL_GPL(dma_request_chan);
  732. /**
  733. * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
  734. * @mask: capabilities that the channel must satisfy
  735. *
  736. * Returns pointer to appropriate DMA channel on success or an error pointer.
  737. */
  738. struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
  739. {
  740. struct dma_chan *chan;
  741. if (!mask)
  742. return ERR_PTR(-ENODEV);
  743. chan = __dma_request_channel(mask, NULL, NULL, NULL);
  744. if (!chan) {
  745. mutex_lock(&dma_list_mutex);
  746. if (list_empty(&dma_device_list))
  747. chan = ERR_PTR(-EPROBE_DEFER);
  748. else
  749. chan = ERR_PTR(-ENODEV);
  750. mutex_unlock(&dma_list_mutex);
  751. }
  752. return chan;
  753. }
  754. EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
  755. void dma_release_channel(struct dma_chan *chan)
  756. {
  757. mutex_lock(&dma_list_mutex);
  758. WARN_ONCE(chan->client_count != 1,
  759. "chan reference count %d != 1\n", chan->client_count);
  760. dma_chan_put(chan);
  761. /* drop PRIVATE cap enabled by __dma_request_channel() */
  762. if (--chan->device->privatecnt == 0)
  763. dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
  764. if (chan->slave) {
  765. sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
  766. sysfs_remove_link(&chan->slave->kobj, chan->name);
  767. kfree(chan->name);
  768. chan->name = NULL;
  769. chan->slave = NULL;
  770. }
  771. #ifdef CONFIG_DEBUG_FS
  772. kfree(chan->dbg_client_name);
  773. chan->dbg_client_name = NULL;
  774. #endif
  775. mutex_unlock(&dma_list_mutex);
  776. }
  777. EXPORT_SYMBOL_GPL(dma_release_channel);
  778. /**
  779. * dmaengine_get - register interest in dma_channels
  780. */
  781. void dmaengine_get(void)
  782. {
  783. struct dma_device *device, *_d;
  784. struct dma_chan *chan;
  785. int err;
  786. mutex_lock(&dma_list_mutex);
  787. dmaengine_ref_count++;
  788. /* try to grab channels */
  789. list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
  790. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  791. continue;
  792. list_for_each_entry(chan, &device->channels, device_node) {
  793. err = dma_chan_get(chan);
  794. if (err == -ENODEV) {
  795. /* module removed before we could use it */
  796. list_del_rcu(&device->global_node);
  797. break;
  798. } else if (err)
  799. dev_dbg(chan->device->dev,
  800. "%s: failed to get %s: (%d)\n",
  801. __func__, dma_chan_name(chan), err);
  802. }
  803. }
  804. /* if this is the first reference and there were channels
  805. * waiting we need to rebalance to get those channels
  806. * incorporated into the channel table
  807. */
  808. if (dmaengine_ref_count == 1)
  809. dma_channel_rebalance();
  810. mutex_unlock(&dma_list_mutex);
  811. }
  812. EXPORT_SYMBOL(dmaengine_get);
  813. /**
  814. * dmaengine_put - let DMA drivers be removed when ref_count == 0
  815. */
  816. void dmaengine_put(void)
  817. {
  818. struct dma_device *device, *_d;
  819. struct dma_chan *chan;
  820. mutex_lock(&dma_list_mutex);
  821. dmaengine_ref_count--;
  822. BUG_ON(dmaengine_ref_count < 0);
  823. /* drop channel references */
  824. list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
  825. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  826. continue;
  827. list_for_each_entry(chan, &device->channels, device_node)
  828. dma_chan_put(chan);
  829. }
  830. mutex_unlock(&dma_list_mutex);
  831. }
  832. EXPORT_SYMBOL(dmaengine_put);
  833. static bool device_has_all_tx_types(struct dma_device *device)
  834. {
  835. /* A device that satisfies this test has channels that will never cause
  836. * an async_tx channel switch event as all possible operation types can
  837. * be handled.
  838. */
  839. #ifdef CONFIG_ASYNC_TX_DMA
  840. if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
  841. return false;
  842. #endif
  843. #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
  844. if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
  845. return false;
  846. #endif
  847. #if IS_ENABLED(CONFIG_ASYNC_XOR)
  848. if (!dma_has_cap(DMA_XOR, device->cap_mask))
  849. return false;
  850. #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
  851. if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
  852. return false;
  853. #endif
  854. #endif
  855. #if IS_ENABLED(CONFIG_ASYNC_PQ)
  856. if (!dma_has_cap(DMA_PQ, device->cap_mask))
  857. return false;
  858. #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
  859. if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
  860. return false;
  861. #endif
  862. #endif
  863. return true;
  864. }
  865. static int get_dma_id(struct dma_device *device)
  866. {
  867. int rc = ida_alloc(&dma_ida, GFP_KERNEL);
  868. if (rc < 0)
  869. return rc;
  870. device->dev_id = rc;
  871. return 0;
  872. }
  873. static int __dma_async_device_channel_register(struct dma_device *device,
  874. struct dma_chan *chan)
  875. {
  876. int rc;
  877. chan->local = alloc_percpu(typeof(*chan->local));
  878. if (!chan->local)
  879. return -ENOMEM;
  880. chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
  881. if (!chan->dev) {
  882. rc = -ENOMEM;
  883. goto err_free_local;
  884. }
  885. /*
  886. * When the chan_id is a negative value, we are dynamically adding
  887. * the channel. Otherwise we are static enumerating.
  888. */
  889. chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
  890. if (chan->chan_id < 0) {
  891. pr_err("%s: unable to alloc ida for chan: %d\n",
  892. __func__, chan->chan_id);
  893. rc = chan->chan_id;
  894. goto err_free_dev;
  895. }
  896. chan->dev->device.class = &dma_devclass;
  897. chan->dev->device.parent = device->dev;
  898. chan->dev->chan = chan;
  899. chan->dev->dev_id = device->dev_id;
  900. dev_set_name(&chan->dev->device, "dma%dchan%d",
  901. device->dev_id, chan->chan_id);
  902. rc = device_register(&chan->dev->device);
  903. if (rc)
  904. goto err_out_ida;
  905. chan->client_count = 0;
  906. device->chancnt++;
  907. return 0;
  908. err_out_ida:
  909. ida_free(&device->chan_ida, chan->chan_id);
  910. err_free_dev:
  911. kfree(chan->dev);
  912. err_free_local:
  913. free_percpu(chan->local);
  914. chan->local = NULL;
  915. return rc;
  916. }
  917. int dma_async_device_channel_register(struct dma_device *device,
  918. struct dma_chan *chan)
  919. {
  920. int rc;
  921. rc = __dma_async_device_channel_register(device, chan);
  922. if (rc < 0)
  923. return rc;
  924. dma_channel_rebalance();
  925. return 0;
  926. }
  927. EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
  928. static void __dma_async_device_channel_unregister(struct dma_device *device,
  929. struct dma_chan *chan)
  930. {
  931. WARN_ONCE(!device->device_release && chan->client_count,
  932. "%s called while %d clients hold a reference\n",
  933. __func__, chan->client_count);
  934. mutex_lock(&dma_list_mutex);
  935. device->chancnt--;
  936. chan->dev->chan = NULL;
  937. mutex_unlock(&dma_list_mutex);
  938. ida_free(&device->chan_ida, chan->chan_id);
  939. device_unregister(&chan->dev->device);
  940. free_percpu(chan->local);
  941. }
  942. void dma_async_device_channel_unregister(struct dma_device *device,
  943. struct dma_chan *chan)
  944. {
  945. __dma_async_device_channel_unregister(device, chan);
  946. dma_channel_rebalance();
  947. }
  948. EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
  949. /**
  950. * dma_async_device_register - registers DMA devices found
  951. * @device: pointer to &struct dma_device
  952. *
  953. * After calling this routine the structure should not be freed except in the
  954. * device_release() callback which will be called after
  955. * dma_async_device_unregister() is called and no further references are taken.
  956. */
  957. int dma_async_device_register(struct dma_device *device)
  958. {
  959. int rc;
  960. struct dma_chan* chan;
  961. if (!device)
  962. return -ENODEV;
  963. /* validate device routines */
  964. if (!device->dev) {
  965. pr_err("DMAdevice must have dev\n");
  966. return -EIO;
  967. }
  968. device->owner = device->dev->driver->owner;
  969. if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
  970. dev_err(device->dev,
  971. "Device claims capability %s, but op is not defined\n",
  972. "DMA_MEMCPY");
  973. return -EIO;
  974. }
  975. if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
  976. dev_err(device->dev,
  977. "Device claims capability %s, but op is not defined\n",
  978. "DMA_XOR");
  979. return -EIO;
  980. }
  981. if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
  982. dev_err(device->dev,
  983. "Device claims capability %s, but op is not defined\n",
  984. "DMA_XOR_VAL");
  985. return -EIO;
  986. }
  987. if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
  988. dev_err(device->dev,
  989. "Device claims capability %s, but op is not defined\n",
  990. "DMA_PQ");
  991. return -EIO;
  992. }
  993. if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
  994. dev_err(device->dev,
  995. "Device claims capability %s, but op is not defined\n",
  996. "DMA_PQ_VAL");
  997. return -EIO;
  998. }
  999. if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
  1000. dev_err(device->dev,
  1001. "Device claims capability %s, but op is not defined\n",
  1002. "DMA_MEMSET");
  1003. return -EIO;
  1004. }
  1005. if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
  1006. dev_err(device->dev,
  1007. "Device claims capability %s, but op is not defined\n",
  1008. "DMA_INTERRUPT");
  1009. return -EIO;
  1010. }
  1011. if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
  1012. dev_err(device->dev,
  1013. "Device claims capability %s, but op is not defined\n",
  1014. "DMA_CYCLIC");
  1015. return -EIO;
  1016. }
  1017. if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
  1018. dev_err(device->dev,
  1019. "Device claims capability %s, but op is not defined\n",
  1020. "DMA_INTERLEAVE");
  1021. return -EIO;
  1022. }
  1023. if (!device->device_tx_status) {
  1024. dev_err(device->dev, "Device tx_status is not defined\n");
  1025. return -EIO;
  1026. }
  1027. if (!device->device_issue_pending) {
  1028. dev_err(device->dev, "Device issue_pending is not defined\n");
  1029. return -EIO;
  1030. }
  1031. if (!device->device_release)
  1032. dev_dbg(device->dev,
  1033. "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
  1034. kref_init(&device->ref);
  1035. /* note: this only matters in the
  1036. * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
  1037. */
  1038. if (device_has_all_tx_types(device))
  1039. dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
  1040. rc = get_dma_id(device);
  1041. if (rc != 0)
  1042. return rc;
  1043. ida_init(&device->chan_ida);
  1044. /* represent channels in sysfs. Probably want devs too */
  1045. list_for_each_entry(chan, &device->channels, device_node) {
  1046. rc = __dma_async_device_channel_register(device, chan);
  1047. if (rc < 0)
  1048. goto err_out;
  1049. }
  1050. mutex_lock(&dma_list_mutex);
  1051. /* take references on public channels */
  1052. if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
  1053. list_for_each_entry(chan, &device->channels, device_node) {
  1054. /* if clients are already waiting for channels we need
  1055. * to take references on their behalf
  1056. */
  1057. if (dma_chan_get(chan) == -ENODEV) {
  1058. /* note we can only get here for the first
  1059. * channel as the remaining channels are
  1060. * guaranteed to get a reference
  1061. */
  1062. rc = -ENODEV;
  1063. mutex_unlock(&dma_list_mutex);
  1064. goto err_out;
  1065. }
  1066. }
  1067. list_add_tail_rcu(&device->global_node, &dma_device_list);
  1068. if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
  1069. device->privatecnt++; /* Always private */
  1070. dma_channel_rebalance();
  1071. mutex_unlock(&dma_list_mutex);
  1072. dmaengine_debug_register(device);
  1073. return 0;
  1074. err_out:
  1075. /* if we never registered a channel just release the idr */
  1076. if (!device->chancnt) {
  1077. ida_free(&dma_ida, device->dev_id);
  1078. return rc;
  1079. }
  1080. list_for_each_entry(chan, &device->channels, device_node) {
  1081. if (chan->local == NULL)
  1082. continue;
  1083. mutex_lock(&dma_list_mutex);
  1084. chan->dev->chan = NULL;
  1085. mutex_unlock(&dma_list_mutex);
  1086. device_unregister(&chan->dev->device);
  1087. free_percpu(chan->local);
  1088. }
  1089. return rc;
  1090. }
  1091. EXPORT_SYMBOL(dma_async_device_register);
  1092. /**
  1093. * dma_async_device_unregister - unregister a DMA device
  1094. * @device: pointer to &struct dma_device
  1095. *
  1096. * This routine is called by dma driver exit routines, dmaengine holds module
  1097. * references to prevent it being called while channels are in use.
  1098. */
  1099. void dma_async_device_unregister(struct dma_device *device)
  1100. {
  1101. struct dma_chan *chan, *n;
  1102. dmaengine_debug_unregister(device);
  1103. list_for_each_entry_safe(chan, n, &device->channels, device_node)
  1104. __dma_async_device_channel_unregister(device, chan);
  1105. mutex_lock(&dma_list_mutex);
  1106. /*
  1107. * setting DMA_PRIVATE ensures the device being torn down will not
  1108. * be used in the channel_table
  1109. */
  1110. dma_cap_set(DMA_PRIVATE, device->cap_mask);
  1111. dma_channel_rebalance();
  1112. ida_free(&dma_ida, device->dev_id);
  1113. dma_device_put(device);
  1114. mutex_unlock(&dma_list_mutex);
  1115. }
  1116. EXPORT_SYMBOL(dma_async_device_unregister);
  1117. static void dmam_device_release(struct device *dev, void *res)
  1118. {
  1119. struct dma_device *device;
  1120. device = *(struct dma_device **)res;
  1121. dma_async_device_unregister(device);
  1122. }
  1123. /**
  1124. * dmaenginem_async_device_register - registers DMA devices found
  1125. * @device: pointer to &struct dma_device
  1126. *
  1127. * The operation is managed and will be undone on driver detach.
  1128. */
  1129. int dmaenginem_async_device_register(struct dma_device *device)
  1130. {
  1131. void *p;
  1132. int ret;
  1133. p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
  1134. if (!p)
  1135. return -ENOMEM;
  1136. ret = dma_async_device_register(device);
  1137. if (!ret) {
  1138. *(struct dma_device **)p = device;
  1139. devres_add(device->dev, p);
  1140. } else {
  1141. devres_free(p);
  1142. }
  1143. return ret;
  1144. }
  1145. EXPORT_SYMBOL(dmaenginem_async_device_register);
  1146. struct dmaengine_unmap_pool {
  1147. struct kmem_cache *cache;
  1148. const char *name;
  1149. mempool_t *pool;
  1150. size_t size;
  1151. };
  1152. #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
  1153. static struct dmaengine_unmap_pool unmap_pool[] = {
  1154. __UNMAP_POOL(2),
  1155. #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
  1156. __UNMAP_POOL(16),
  1157. __UNMAP_POOL(128),
  1158. __UNMAP_POOL(256),
  1159. #endif
  1160. };
  1161. static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
  1162. {
  1163. int order = get_count_order(nr);
  1164. switch (order) {
  1165. case 0 ... 1:
  1166. return &unmap_pool[0];
  1167. #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
  1168. case 2 ... 4:
  1169. return &unmap_pool[1];
  1170. case 5 ... 7:
  1171. return &unmap_pool[2];
  1172. case 8:
  1173. return &unmap_pool[3];
  1174. #endif
  1175. default:
  1176. BUG();
  1177. return NULL;
  1178. }
  1179. }
  1180. static void dmaengine_unmap(struct kref *kref)
  1181. {
  1182. struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
  1183. struct device *dev = unmap->dev;
  1184. int cnt, i;
  1185. cnt = unmap->to_cnt;
  1186. for (i = 0; i < cnt; i++)
  1187. dma_unmap_page(dev, unmap->addr[i], unmap->len,
  1188. DMA_TO_DEVICE);
  1189. cnt += unmap->from_cnt;
  1190. for (; i < cnt; i++)
  1191. dma_unmap_page(dev, unmap->addr[i], unmap->len,
  1192. DMA_FROM_DEVICE);
  1193. cnt += unmap->bidi_cnt;
  1194. for (; i < cnt; i++) {
  1195. if (unmap->addr[i] == 0)
  1196. continue;
  1197. dma_unmap_page(dev, unmap->addr[i], unmap->len,
  1198. DMA_BIDIRECTIONAL);
  1199. }
  1200. cnt = unmap->map_cnt;
  1201. mempool_free(unmap, __get_unmap_pool(cnt)->pool);
  1202. }
  1203. void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
  1204. {
  1205. if (unmap)
  1206. kref_put(&unmap->kref, dmaengine_unmap);
  1207. }
  1208. EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
  1209. static void dmaengine_destroy_unmap_pool(void)
  1210. {
  1211. int i;
  1212. for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
  1213. struct dmaengine_unmap_pool *p = &unmap_pool[i];
  1214. mempool_destroy(p->pool);
  1215. p->pool = NULL;
  1216. kmem_cache_destroy(p->cache);
  1217. p->cache = NULL;
  1218. }
  1219. }
  1220. static int __init dmaengine_init_unmap_pool(void)
  1221. {
  1222. int i;
  1223. for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
  1224. struct dmaengine_unmap_pool *p = &unmap_pool[i];
  1225. size_t size;
  1226. size = sizeof(struct dmaengine_unmap_data) +
  1227. sizeof(dma_addr_t) * p->size;
  1228. p->cache = kmem_cache_create(p->name, size, 0,
  1229. SLAB_HWCACHE_ALIGN, NULL);
  1230. if (!p->cache)
  1231. break;
  1232. p->pool = mempool_create_slab_pool(1, p->cache);
  1233. if (!p->pool)
  1234. break;
  1235. }
  1236. if (i == ARRAY_SIZE(unmap_pool))
  1237. return 0;
  1238. dmaengine_destroy_unmap_pool();
  1239. return -ENOMEM;
  1240. }
  1241. struct dmaengine_unmap_data *
  1242. dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
  1243. {
  1244. struct dmaengine_unmap_data *unmap;
  1245. unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
  1246. if (!unmap)
  1247. return NULL;
  1248. memset(unmap, 0, sizeof(*unmap));
  1249. kref_init(&unmap->kref);
  1250. unmap->dev = dev;
  1251. unmap->map_cnt = nr;
  1252. return unmap;
  1253. }
  1254. EXPORT_SYMBOL(dmaengine_get_unmap_data);
  1255. void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
  1256. struct dma_chan *chan)
  1257. {
  1258. tx->chan = chan;
  1259. #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
  1260. spin_lock_init(&tx->lock);
  1261. #endif
  1262. }
  1263. EXPORT_SYMBOL(dma_async_tx_descriptor_init);
  1264. static inline int desc_check_and_set_metadata_mode(
  1265. struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
  1266. {
  1267. /* Make sure that the metadata mode is not mixed */
  1268. if (!desc->desc_metadata_mode) {
  1269. if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
  1270. desc->desc_metadata_mode = mode;
  1271. else
  1272. return -ENOTSUPP;
  1273. } else if (desc->desc_metadata_mode != mode) {
  1274. return -EINVAL;
  1275. }
  1276. return 0;
  1277. }
  1278. int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
  1279. void *data, size_t len)
  1280. {
  1281. int ret;
  1282. if (!desc)
  1283. return -EINVAL;
  1284. ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
  1285. if (ret)
  1286. return ret;
  1287. if (!desc->metadata_ops || !desc->metadata_ops->attach)
  1288. return -ENOTSUPP;
  1289. return desc->metadata_ops->attach(desc, data, len);
  1290. }
  1291. EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
  1292. void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
  1293. size_t *payload_len, size_t *max_len)
  1294. {
  1295. int ret;
  1296. if (!desc)
  1297. return ERR_PTR(-EINVAL);
  1298. ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
  1299. if (ret)
  1300. return ERR_PTR(ret);
  1301. if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
  1302. return ERR_PTR(-ENOTSUPP);
  1303. return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
  1304. }
  1305. EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
  1306. int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
  1307. size_t payload_len)
  1308. {
  1309. int ret;
  1310. if (!desc)
  1311. return -EINVAL;
  1312. ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
  1313. if (ret)
  1314. return ret;
  1315. if (!desc->metadata_ops || !desc->metadata_ops->set_len)
  1316. return -ENOTSUPP;
  1317. return desc->metadata_ops->set_len(desc, payload_len);
  1318. }
  1319. EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
  1320. /**
  1321. * dma_wait_for_async_tx - spin wait for a transaction to complete
  1322. * @tx: in-flight transaction to wait on
  1323. */
  1324. enum dma_status
  1325. dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
  1326. {
  1327. unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
  1328. if (!tx)
  1329. return DMA_COMPLETE;
  1330. while (tx->cookie == -EBUSY) {
  1331. if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
  1332. dev_err(tx->chan->device->dev,
  1333. "%s timeout waiting for descriptor submission\n",
  1334. __func__);
  1335. return DMA_ERROR;
  1336. }
  1337. cpu_relax();
  1338. }
  1339. return dma_sync_wait(tx->chan, tx->cookie);
  1340. }
  1341. EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
  1342. /**
  1343. * dma_run_dependencies - process dependent operations on the target channel
  1344. * @tx: transaction with dependencies
  1345. *
  1346. * Helper routine for DMA drivers to process (start) dependent operations
  1347. * on their target channel.
  1348. */
  1349. void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
  1350. {
  1351. struct dma_async_tx_descriptor *dep = txd_next(tx);
  1352. struct dma_async_tx_descriptor *dep_next;
  1353. struct dma_chan *chan;
  1354. if (!dep)
  1355. return;
  1356. /* we'll submit tx->next now, so clear the link */
  1357. txd_clear_next(tx);
  1358. chan = dep->chan;
  1359. /* keep submitting up until a channel switch is detected
  1360. * in that case we will be called again as a result of
  1361. * processing the interrupt from async_tx_channel_switch
  1362. */
  1363. for (; dep; dep = dep_next) {
  1364. txd_lock(dep);
  1365. txd_clear_parent(dep);
  1366. dep_next = txd_next(dep);
  1367. if (dep_next && dep_next->chan == chan)
  1368. txd_clear_next(dep); /* ->next will be submitted */
  1369. else
  1370. dep_next = NULL; /* submit current dep and terminate */
  1371. txd_unlock(dep);
  1372. dep->tx_submit(dep);
  1373. }
  1374. chan->device->device_issue_pending(chan);
  1375. }
  1376. EXPORT_SYMBOL_GPL(dma_run_dependencies);
  1377. static int __init dma_bus_init(void)
  1378. {
  1379. int err = dmaengine_init_unmap_pool();
  1380. if (err)
  1381. return err;
  1382. err = class_register(&dma_devclass);
  1383. if (!err)
  1384. dmaengine_debugfs_init();
  1385. return err;
  1386. }
  1387. arch_initcall(dma_bus_init);