dma-axi-dmac.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Driver for the Analog Devices AXI-DMAC core
  4. *
  5. * Copyright 2013-2019 Analog Devices Inc.
  6. * Author: Lars-Peter Clausen <[email protected]>
  7. */
  8. #include <linux/bitfield.h>
  9. #include <linux/clk.h>
  10. #include <linux/device.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/dmaengine.h>
  13. #include <linux/err.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/io.h>
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/of_dma.h>
  20. #include <linux/of_address.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/regmap.h>
  23. #include <linux/slab.h>
  24. #include <linux/fpga/adi-axi-common.h>
  25. #include <dt-bindings/dma/axi-dmac.h>
  26. #include "dmaengine.h"
  27. #include "virt-dma.h"
  28. /*
  29. * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
  30. * various instantiation parameters which decided the exact feature set support
  31. * by the core.
  32. *
  33. * Each channel of the core has a source interface and a destination interface.
  34. * The number of channels and the type of the channel interfaces is selected at
  35. * configuration time. A interface can either be a connected to a central memory
  36. * interconnect, which allows access to system memory, or it can be connected to
  37. * a dedicated bus which is directly connected to a data port on a peripheral.
  38. * Given that those are configuration options of the core that are selected when
  39. * it is instantiated this means that they can not be changed by software at
  40. * runtime. By extension this means that each channel is uni-directional. It can
  41. * either be device to memory or memory to device, but not both. Also since the
  42. * device side is a dedicated data bus only connected to a single peripheral
  43. * there is no address than can or needs to be configured for the device side.
  44. */
  45. #define AXI_DMAC_REG_INTERFACE_DESC 0x10
  46. #define AXI_DMAC_DMA_SRC_TYPE_MSK GENMASK(13, 12)
  47. #define AXI_DMAC_DMA_SRC_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x)
  48. #define AXI_DMAC_DMA_SRC_WIDTH_MSK GENMASK(11, 8)
  49. #define AXI_DMAC_DMA_SRC_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x)
  50. #define AXI_DMAC_DMA_DST_TYPE_MSK GENMASK(5, 4)
  51. #define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
  52. #define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
  53. #define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
  54. #define AXI_DMAC_REG_COHERENCY_DESC 0x14
  55. #define AXI_DMAC_DST_COHERENT_MSK BIT(0)
  56. #define AXI_DMAC_DST_COHERENT_GET(x) FIELD_GET(AXI_DMAC_DST_COHERENT_MSK, x)
  57. #define AXI_DMAC_REG_IRQ_MASK 0x80
  58. #define AXI_DMAC_REG_IRQ_PENDING 0x84
  59. #define AXI_DMAC_REG_IRQ_SOURCE 0x88
  60. #define AXI_DMAC_REG_CTRL 0x400
  61. #define AXI_DMAC_REG_TRANSFER_ID 0x404
  62. #define AXI_DMAC_REG_START_TRANSFER 0x408
  63. #define AXI_DMAC_REG_FLAGS 0x40c
  64. #define AXI_DMAC_REG_DEST_ADDRESS 0x410
  65. #define AXI_DMAC_REG_SRC_ADDRESS 0x414
  66. #define AXI_DMAC_REG_X_LENGTH 0x418
  67. #define AXI_DMAC_REG_Y_LENGTH 0x41c
  68. #define AXI_DMAC_REG_DEST_STRIDE 0x420
  69. #define AXI_DMAC_REG_SRC_STRIDE 0x424
  70. #define AXI_DMAC_REG_TRANSFER_DONE 0x428
  71. #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
  72. #define AXI_DMAC_REG_STATUS 0x430
  73. #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
  74. #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
  75. #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
  76. #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
  77. #define AXI_DMAC_CTRL_ENABLE BIT(0)
  78. #define AXI_DMAC_CTRL_PAUSE BIT(1)
  79. #define AXI_DMAC_IRQ_SOT BIT(0)
  80. #define AXI_DMAC_IRQ_EOT BIT(1)
  81. #define AXI_DMAC_FLAG_CYCLIC BIT(0)
  82. #define AXI_DMAC_FLAG_LAST BIT(1)
  83. #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2)
  84. #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
  85. /* The maximum ID allocated by the hardware is 31 */
  86. #define AXI_DMAC_SG_UNUSED 32U
  87. struct axi_dmac_sg {
  88. dma_addr_t src_addr;
  89. dma_addr_t dest_addr;
  90. unsigned int x_len;
  91. unsigned int y_len;
  92. unsigned int dest_stride;
  93. unsigned int src_stride;
  94. unsigned int id;
  95. unsigned int partial_len;
  96. bool schedule_when_free;
  97. };
  98. struct axi_dmac_desc {
  99. struct virt_dma_desc vdesc;
  100. bool cyclic;
  101. bool have_partial_xfer;
  102. unsigned int num_submitted;
  103. unsigned int num_completed;
  104. unsigned int num_sgs;
  105. struct axi_dmac_sg sg[];
  106. };
  107. struct axi_dmac_chan {
  108. struct virt_dma_chan vchan;
  109. struct axi_dmac_desc *next_desc;
  110. struct list_head active_descs;
  111. enum dma_transfer_direction direction;
  112. unsigned int src_width;
  113. unsigned int dest_width;
  114. unsigned int src_type;
  115. unsigned int dest_type;
  116. unsigned int max_length;
  117. unsigned int address_align_mask;
  118. unsigned int length_align_mask;
  119. bool hw_partial_xfer;
  120. bool hw_cyclic;
  121. bool hw_2d;
  122. };
  123. struct axi_dmac {
  124. void __iomem *base;
  125. int irq;
  126. struct clk *clk;
  127. struct dma_device dma_dev;
  128. struct axi_dmac_chan chan;
  129. };
  130. static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
  131. {
  132. return container_of(chan->vchan.chan.device, struct axi_dmac,
  133. dma_dev);
  134. }
  135. static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
  136. {
  137. return container_of(c, struct axi_dmac_chan, vchan.chan);
  138. }
  139. static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
  140. {
  141. return container_of(vdesc, struct axi_dmac_desc, vdesc);
  142. }
  143. static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
  144. unsigned int val)
  145. {
  146. writel(val, axi_dmac->base + reg);
  147. }
  148. static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
  149. {
  150. return readl(axi_dmac->base + reg);
  151. }
  152. static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
  153. {
  154. return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
  155. }
  156. static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
  157. {
  158. return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
  159. }
  160. static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
  161. {
  162. if (len == 0)
  163. return false;
  164. if ((len & chan->length_align_mask) != 0) /* Not aligned */
  165. return false;
  166. return true;
  167. }
  168. static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
  169. {
  170. if ((addr & chan->address_align_mask) != 0) /* Not aligned */
  171. return false;
  172. return true;
  173. }
  174. static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
  175. {
  176. struct axi_dmac *dmac = chan_to_axi_dmac(chan);
  177. struct virt_dma_desc *vdesc;
  178. struct axi_dmac_desc *desc;
  179. struct axi_dmac_sg *sg;
  180. unsigned int flags = 0;
  181. unsigned int val;
  182. val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
  183. if (val) /* Queue is full, wait for the next SOT IRQ */
  184. return;
  185. desc = chan->next_desc;
  186. if (!desc) {
  187. vdesc = vchan_next_desc(&chan->vchan);
  188. if (!vdesc)
  189. return;
  190. list_move_tail(&vdesc->node, &chan->active_descs);
  191. desc = to_axi_dmac_desc(vdesc);
  192. }
  193. sg = &desc->sg[desc->num_submitted];
  194. /* Already queued in cyclic mode. Wait for it to finish */
  195. if (sg->id != AXI_DMAC_SG_UNUSED) {
  196. sg->schedule_when_free = true;
  197. return;
  198. }
  199. desc->num_submitted++;
  200. if (desc->num_submitted == desc->num_sgs ||
  201. desc->have_partial_xfer) {
  202. if (desc->cyclic)
  203. desc->num_submitted = 0; /* Start again */
  204. else
  205. chan->next_desc = NULL;
  206. flags |= AXI_DMAC_FLAG_LAST;
  207. } else {
  208. chan->next_desc = desc;
  209. }
  210. sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
  211. if (axi_dmac_dest_is_mem(chan)) {
  212. axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
  213. axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
  214. }
  215. if (axi_dmac_src_is_mem(chan)) {
  216. axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
  217. axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
  218. }
  219. /*
  220. * If the hardware supports cyclic transfers and there is no callback to
  221. * call and only a single segment, enable hw cyclic mode to avoid
  222. * unnecessary interrupts.
  223. */
  224. if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
  225. desc->num_sgs == 1)
  226. flags |= AXI_DMAC_FLAG_CYCLIC;
  227. if (chan->hw_partial_xfer)
  228. flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
  229. axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
  230. axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
  231. axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
  232. axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
  233. }
  234. static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
  235. {
  236. return list_first_entry_or_null(&chan->active_descs,
  237. struct axi_dmac_desc, vdesc.node);
  238. }
  239. static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
  240. struct axi_dmac_sg *sg)
  241. {
  242. if (chan->hw_2d)
  243. return sg->x_len * sg->y_len;
  244. else
  245. return sg->x_len;
  246. }
  247. static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
  248. {
  249. struct axi_dmac *dmac = chan_to_axi_dmac(chan);
  250. struct axi_dmac_desc *desc;
  251. struct axi_dmac_sg *sg;
  252. u32 xfer_done, len, id, i;
  253. bool found_sg;
  254. do {
  255. len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN);
  256. id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID);
  257. found_sg = false;
  258. list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
  259. for (i = 0; i < desc->num_sgs; i++) {
  260. sg = &desc->sg[i];
  261. if (sg->id == AXI_DMAC_SG_UNUSED)
  262. continue;
  263. if (sg->id == id) {
  264. desc->have_partial_xfer = true;
  265. sg->partial_len = len;
  266. found_sg = true;
  267. break;
  268. }
  269. }
  270. if (found_sg)
  271. break;
  272. }
  273. if (found_sg) {
  274. dev_dbg(dmac->dma_dev.dev,
  275. "Found partial segment id=%u, len=%u\n",
  276. id, len);
  277. } else {
  278. dev_warn(dmac->dma_dev.dev,
  279. "Not found partial segment id=%u, len=%u\n",
  280. id, len);
  281. }
  282. /* Check if we have any more partial transfers */
  283. xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
  284. xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE);
  285. } while (!xfer_done);
  286. }
  287. static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
  288. struct axi_dmac_desc *active)
  289. {
  290. struct dmaengine_result *rslt = &active->vdesc.tx_result;
  291. unsigned int start = active->num_completed - 1;
  292. struct axi_dmac_sg *sg;
  293. unsigned int i, total;
  294. rslt->result = DMA_TRANS_NOERROR;
  295. rslt->residue = 0;
  296. /*
  297. * We get here if the last completed segment is partial, which
  298. * means we can compute the residue from that segment onwards
  299. */
  300. for (i = start; i < active->num_sgs; i++) {
  301. sg = &active->sg[i];
  302. total = axi_dmac_total_sg_bytes(chan, sg);
  303. rslt->residue += (total - sg->partial_len);
  304. }
  305. }
  306. static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
  307. unsigned int completed_transfers)
  308. {
  309. struct axi_dmac_desc *active;
  310. struct axi_dmac_sg *sg;
  311. bool start_next = false;
  312. active = axi_dmac_active_desc(chan);
  313. if (!active)
  314. return false;
  315. if (chan->hw_partial_xfer &&
  316. (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
  317. axi_dmac_dequeue_partial_xfers(chan);
  318. do {
  319. sg = &active->sg[active->num_completed];
  320. if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
  321. break;
  322. if (!(BIT(sg->id) & completed_transfers))
  323. break;
  324. active->num_completed++;
  325. sg->id = AXI_DMAC_SG_UNUSED;
  326. if (sg->schedule_when_free) {
  327. sg->schedule_when_free = false;
  328. start_next = true;
  329. }
  330. if (sg->partial_len)
  331. axi_dmac_compute_residue(chan, active);
  332. if (active->cyclic)
  333. vchan_cyclic_callback(&active->vdesc);
  334. if (active->num_completed == active->num_sgs ||
  335. sg->partial_len) {
  336. if (active->cyclic) {
  337. active->num_completed = 0; /* wrap around */
  338. } else {
  339. list_del(&active->vdesc.node);
  340. vchan_cookie_complete(&active->vdesc);
  341. active = axi_dmac_active_desc(chan);
  342. }
  343. }
  344. } while (active);
  345. return start_next;
  346. }
  347. static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
  348. {
  349. struct axi_dmac *dmac = devid;
  350. unsigned int pending;
  351. bool start_next = false;
  352. pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
  353. if (!pending)
  354. return IRQ_NONE;
  355. axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
  356. spin_lock(&dmac->chan.vchan.lock);
  357. /* One or more transfers have finished */
  358. if (pending & AXI_DMAC_IRQ_EOT) {
  359. unsigned int completed;
  360. completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
  361. start_next = axi_dmac_transfer_done(&dmac->chan, completed);
  362. }
  363. /* Space has become available in the descriptor queue */
  364. if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
  365. axi_dmac_start_transfer(&dmac->chan);
  366. spin_unlock(&dmac->chan.vchan.lock);
  367. return IRQ_HANDLED;
  368. }
  369. static int axi_dmac_terminate_all(struct dma_chan *c)
  370. {
  371. struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
  372. struct axi_dmac *dmac = chan_to_axi_dmac(chan);
  373. unsigned long flags;
  374. LIST_HEAD(head);
  375. spin_lock_irqsave(&chan->vchan.lock, flags);
  376. axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
  377. chan->next_desc = NULL;
  378. vchan_get_all_descriptors(&chan->vchan, &head);
  379. list_splice_tail_init(&chan->active_descs, &head);
  380. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  381. vchan_dma_desc_free_list(&chan->vchan, &head);
  382. return 0;
  383. }
  384. static void axi_dmac_synchronize(struct dma_chan *c)
  385. {
  386. struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
  387. vchan_synchronize(&chan->vchan);
  388. }
  389. static void axi_dmac_issue_pending(struct dma_chan *c)
  390. {
  391. struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
  392. struct axi_dmac *dmac = chan_to_axi_dmac(chan);
  393. unsigned long flags;
  394. axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
  395. spin_lock_irqsave(&chan->vchan.lock, flags);
  396. if (vchan_issue_pending(&chan->vchan))
  397. axi_dmac_start_transfer(chan);
  398. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  399. }
  400. static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
  401. {
  402. struct axi_dmac_desc *desc;
  403. unsigned int i;
  404. desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
  405. if (!desc)
  406. return NULL;
  407. for (i = 0; i < num_sgs; i++)
  408. desc->sg[i].id = AXI_DMAC_SG_UNUSED;
  409. desc->num_sgs = num_sgs;
  410. return desc;
  411. }
  412. static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
  413. enum dma_transfer_direction direction, dma_addr_t addr,
  414. unsigned int num_periods, unsigned int period_len,
  415. struct axi_dmac_sg *sg)
  416. {
  417. unsigned int num_segments, i;
  418. unsigned int segment_size;
  419. unsigned int len;
  420. /* Split into multiple equally sized segments if necessary */
  421. num_segments = DIV_ROUND_UP(period_len, chan->max_length);
  422. segment_size = DIV_ROUND_UP(period_len, num_segments);
  423. /* Take care of alignment */
  424. segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
  425. for (i = 0; i < num_periods; i++) {
  426. len = period_len;
  427. while (len > segment_size) {
  428. if (direction == DMA_DEV_TO_MEM)
  429. sg->dest_addr = addr;
  430. else
  431. sg->src_addr = addr;
  432. sg->x_len = segment_size;
  433. sg->y_len = 1;
  434. sg++;
  435. addr += segment_size;
  436. len -= segment_size;
  437. }
  438. if (direction == DMA_DEV_TO_MEM)
  439. sg->dest_addr = addr;
  440. else
  441. sg->src_addr = addr;
  442. sg->x_len = len;
  443. sg->y_len = 1;
  444. sg++;
  445. addr += len;
  446. }
  447. return sg;
  448. }
  449. static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
  450. struct dma_chan *c, struct scatterlist *sgl,
  451. unsigned int sg_len, enum dma_transfer_direction direction,
  452. unsigned long flags, void *context)
  453. {
  454. struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
  455. struct axi_dmac_desc *desc;
  456. struct axi_dmac_sg *dsg;
  457. struct scatterlist *sg;
  458. unsigned int num_sgs;
  459. unsigned int i;
  460. if (direction != chan->direction)
  461. return NULL;
  462. num_sgs = 0;
  463. for_each_sg(sgl, sg, sg_len, i)
  464. num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
  465. desc = axi_dmac_alloc_desc(num_sgs);
  466. if (!desc)
  467. return NULL;
  468. dsg = desc->sg;
  469. for_each_sg(sgl, sg, sg_len, i) {
  470. if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
  471. !axi_dmac_check_len(chan, sg_dma_len(sg))) {
  472. kfree(desc);
  473. return NULL;
  474. }
  475. dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
  476. sg_dma_len(sg), dsg);
  477. }
  478. desc->cyclic = false;
  479. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  480. }
  481. static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
  482. struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
  483. size_t period_len, enum dma_transfer_direction direction,
  484. unsigned long flags)
  485. {
  486. struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
  487. struct axi_dmac_desc *desc;
  488. unsigned int num_periods, num_segments;
  489. if (direction != chan->direction)
  490. return NULL;
  491. if (!axi_dmac_check_len(chan, buf_len) ||
  492. !axi_dmac_check_addr(chan, buf_addr))
  493. return NULL;
  494. if (period_len == 0 || buf_len % period_len)
  495. return NULL;
  496. num_periods = buf_len / period_len;
  497. num_segments = DIV_ROUND_UP(period_len, chan->max_length);
  498. desc = axi_dmac_alloc_desc(num_periods * num_segments);
  499. if (!desc)
  500. return NULL;
  501. axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
  502. period_len, desc->sg);
  503. desc->cyclic = true;
  504. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  505. }
  506. static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
  507. struct dma_chan *c, struct dma_interleaved_template *xt,
  508. unsigned long flags)
  509. {
  510. struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
  511. struct axi_dmac_desc *desc;
  512. size_t dst_icg, src_icg;
  513. if (xt->frame_size != 1)
  514. return NULL;
  515. if (xt->dir != chan->direction)
  516. return NULL;
  517. if (axi_dmac_src_is_mem(chan)) {
  518. if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
  519. return NULL;
  520. }
  521. if (axi_dmac_dest_is_mem(chan)) {
  522. if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
  523. return NULL;
  524. }
  525. dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
  526. src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
  527. if (chan->hw_2d) {
  528. if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
  529. xt->numf == 0)
  530. return NULL;
  531. if (xt->sgl[0].size + dst_icg > chan->max_length ||
  532. xt->sgl[0].size + src_icg > chan->max_length)
  533. return NULL;
  534. } else {
  535. if (dst_icg != 0 || src_icg != 0)
  536. return NULL;
  537. if (chan->max_length / xt->sgl[0].size < xt->numf)
  538. return NULL;
  539. if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
  540. return NULL;
  541. }
  542. desc = axi_dmac_alloc_desc(1);
  543. if (!desc)
  544. return NULL;
  545. if (axi_dmac_src_is_mem(chan)) {
  546. desc->sg[0].src_addr = xt->src_start;
  547. desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
  548. }
  549. if (axi_dmac_dest_is_mem(chan)) {
  550. desc->sg[0].dest_addr = xt->dst_start;
  551. desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
  552. }
  553. if (chan->hw_2d) {
  554. desc->sg[0].x_len = xt->sgl[0].size;
  555. desc->sg[0].y_len = xt->numf;
  556. } else {
  557. desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
  558. desc->sg[0].y_len = 1;
  559. }
  560. if (flags & DMA_CYCLIC)
  561. desc->cyclic = true;
  562. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  563. }
  564. static void axi_dmac_free_chan_resources(struct dma_chan *c)
  565. {
  566. vchan_free_chan_resources(to_virt_chan(c));
  567. }
  568. static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
  569. {
  570. kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
  571. }
  572. static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
  573. {
  574. switch (reg) {
  575. case AXI_DMAC_REG_IRQ_MASK:
  576. case AXI_DMAC_REG_IRQ_SOURCE:
  577. case AXI_DMAC_REG_IRQ_PENDING:
  578. case AXI_DMAC_REG_CTRL:
  579. case AXI_DMAC_REG_TRANSFER_ID:
  580. case AXI_DMAC_REG_START_TRANSFER:
  581. case AXI_DMAC_REG_FLAGS:
  582. case AXI_DMAC_REG_DEST_ADDRESS:
  583. case AXI_DMAC_REG_SRC_ADDRESS:
  584. case AXI_DMAC_REG_X_LENGTH:
  585. case AXI_DMAC_REG_Y_LENGTH:
  586. case AXI_DMAC_REG_DEST_STRIDE:
  587. case AXI_DMAC_REG_SRC_STRIDE:
  588. case AXI_DMAC_REG_TRANSFER_DONE:
  589. case AXI_DMAC_REG_ACTIVE_TRANSFER_ID:
  590. case AXI_DMAC_REG_STATUS:
  591. case AXI_DMAC_REG_CURRENT_SRC_ADDR:
  592. case AXI_DMAC_REG_CURRENT_DEST_ADDR:
  593. case AXI_DMAC_REG_PARTIAL_XFER_LEN:
  594. case AXI_DMAC_REG_PARTIAL_XFER_ID:
  595. return true;
  596. default:
  597. return false;
  598. }
  599. }
  600. static const struct regmap_config axi_dmac_regmap_config = {
  601. .reg_bits = 32,
  602. .val_bits = 32,
  603. .reg_stride = 4,
  604. .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID,
  605. .readable_reg = axi_dmac_regmap_rdwr,
  606. .writeable_reg = axi_dmac_regmap_rdwr,
  607. };
  608. static void axi_dmac_adjust_chan_params(struct axi_dmac_chan *chan)
  609. {
  610. chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
  611. if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
  612. chan->direction = DMA_MEM_TO_MEM;
  613. else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
  614. chan->direction = DMA_MEM_TO_DEV;
  615. else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
  616. chan->direction = DMA_DEV_TO_MEM;
  617. else
  618. chan->direction = DMA_DEV_TO_DEV;
  619. }
  620. /*
  621. * The configuration stored in the devicetree matches the configuration
  622. * parameters of the peripheral instance and allows the driver to know which
  623. * features are implemented and how it should behave.
  624. */
  625. static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
  626. struct axi_dmac_chan *chan)
  627. {
  628. u32 val;
  629. int ret;
  630. ret = of_property_read_u32(of_chan, "reg", &val);
  631. if (ret)
  632. return ret;
  633. /* We only support 1 channel for now */
  634. if (val != 0)
  635. return -EINVAL;
  636. ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
  637. if (ret)
  638. return ret;
  639. if (val > AXI_DMAC_BUS_TYPE_FIFO)
  640. return -EINVAL;
  641. chan->src_type = val;
  642. ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
  643. if (ret)
  644. return ret;
  645. if (val > AXI_DMAC_BUS_TYPE_FIFO)
  646. return -EINVAL;
  647. chan->dest_type = val;
  648. ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
  649. if (ret)
  650. return ret;
  651. chan->src_width = val / 8;
  652. ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
  653. if (ret)
  654. return ret;
  655. chan->dest_width = val / 8;
  656. axi_dmac_adjust_chan_params(chan);
  657. return 0;
  658. }
  659. static int axi_dmac_parse_dt(struct device *dev, struct axi_dmac *dmac)
  660. {
  661. struct device_node *of_channels, *of_chan;
  662. int ret;
  663. of_channels = of_get_child_by_name(dev->of_node, "adi,channels");
  664. if (of_channels == NULL)
  665. return -ENODEV;
  666. for_each_child_of_node(of_channels, of_chan) {
  667. ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
  668. if (ret) {
  669. of_node_put(of_chan);
  670. of_node_put(of_channels);
  671. return -EINVAL;
  672. }
  673. }
  674. of_node_put(of_channels);
  675. return 0;
  676. }
  677. static int axi_dmac_read_chan_config(struct device *dev, struct axi_dmac *dmac)
  678. {
  679. struct axi_dmac_chan *chan = &dmac->chan;
  680. unsigned int val, desc;
  681. desc = axi_dmac_read(dmac, AXI_DMAC_REG_INTERFACE_DESC);
  682. if (desc == 0) {
  683. dev_err(dev, "DMA interface register reads zero\n");
  684. return -EFAULT;
  685. }
  686. val = AXI_DMAC_DMA_SRC_TYPE_GET(desc);
  687. if (val > AXI_DMAC_BUS_TYPE_FIFO) {
  688. dev_err(dev, "Invalid source bus type read: %d\n", val);
  689. return -EINVAL;
  690. }
  691. chan->src_type = val;
  692. val = AXI_DMAC_DMA_DST_TYPE_GET(desc);
  693. if (val > AXI_DMAC_BUS_TYPE_FIFO) {
  694. dev_err(dev, "Invalid destination bus type read: %d\n", val);
  695. return -EINVAL;
  696. }
  697. chan->dest_type = val;
  698. val = AXI_DMAC_DMA_SRC_WIDTH_GET(desc);
  699. if (val == 0) {
  700. dev_err(dev, "Source bus width is zero\n");
  701. return -EINVAL;
  702. }
  703. /* widths are stored in log2 */
  704. chan->src_width = 1 << val;
  705. val = AXI_DMAC_DMA_DST_WIDTH_GET(desc);
  706. if (val == 0) {
  707. dev_err(dev, "Destination bus width is zero\n");
  708. return -EINVAL;
  709. }
  710. chan->dest_width = 1 << val;
  711. axi_dmac_adjust_chan_params(chan);
  712. return 0;
  713. }
  714. static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
  715. {
  716. struct axi_dmac_chan *chan = &dmac->chan;
  717. axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
  718. if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
  719. chan->hw_cyclic = true;
  720. axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
  721. if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
  722. chan->hw_2d = true;
  723. axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
  724. chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
  725. if (chan->max_length != UINT_MAX)
  726. chan->max_length++;
  727. axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff);
  728. if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 &&
  729. chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
  730. dev_err(dmac->dma_dev.dev,
  731. "Destination memory-mapped interface not supported.");
  732. return -ENODEV;
  733. }
  734. axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff);
  735. if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 &&
  736. chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
  737. dev_err(dmac->dma_dev.dev,
  738. "Source memory-mapped interface not supported.");
  739. return -ENODEV;
  740. }
  741. if (version >= ADI_AXI_PCORE_VER(4, 2, 'a'))
  742. chan->hw_partial_xfer = true;
  743. if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) {
  744. axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00);
  745. chan->length_align_mask =
  746. axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
  747. } else {
  748. chan->length_align_mask = chan->address_align_mask;
  749. }
  750. return 0;
  751. }
  752. static int axi_dmac_probe(struct platform_device *pdev)
  753. {
  754. struct dma_device *dma_dev;
  755. struct axi_dmac *dmac;
  756. struct resource *res;
  757. struct regmap *regmap;
  758. unsigned int version;
  759. int ret;
  760. dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
  761. if (!dmac)
  762. return -ENOMEM;
  763. dmac->irq = platform_get_irq(pdev, 0);
  764. if (dmac->irq < 0)
  765. return dmac->irq;
  766. if (dmac->irq == 0)
  767. return -EINVAL;
  768. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  769. dmac->base = devm_ioremap_resource(&pdev->dev, res);
  770. if (IS_ERR(dmac->base))
  771. return PTR_ERR(dmac->base);
  772. dmac->clk = devm_clk_get(&pdev->dev, NULL);
  773. if (IS_ERR(dmac->clk))
  774. return PTR_ERR(dmac->clk);
  775. ret = clk_prepare_enable(dmac->clk);
  776. if (ret < 0)
  777. return ret;
  778. version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
  779. if (version >= ADI_AXI_PCORE_VER(4, 3, 'a'))
  780. ret = axi_dmac_read_chan_config(&pdev->dev, dmac);
  781. else
  782. ret = axi_dmac_parse_dt(&pdev->dev, dmac);
  783. if (ret < 0)
  784. goto err_clk_disable;
  785. INIT_LIST_HEAD(&dmac->chan.active_descs);
  786. dma_set_max_seg_size(&pdev->dev, UINT_MAX);
  787. dma_dev = &dmac->dma_dev;
  788. dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
  789. dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
  790. dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
  791. dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
  792. dma_dev->device_tx_status = dma_cookie_status;
  793. dma_dev->device_issue_pending = axi_dmac_issue_pending;
  794. dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
  795. dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
  796. dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
  797. dma_dev->device_terminate_all = axi_dmac_terminate_all;
  798. dma_dev->device_synchronize = axi_dmac_synchronize;
  799. dma_dev->dev = &pdev->dev;
  800. dma_dev->chancnt = 1;
  801. dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
  802. dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
  803. dma_dev->directions = BIT(dmac->chan.direction);
  804. dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
  805. INIT_LIST_HEAD(&dma_dev->channels);
  806. dmac->chan.vchan.desc_free = axi_dmac_desc_free;
  807. vchan_init(&dmac->chan.vchan, dma_dev);
  808. ret = axi_dmac_detect_caps(dmac, version);
  809. if (ret)
  810. goto err_clk_disable;
  811. dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
  812. axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
  813. if (of_dma_is_coherent(pdev->dev.of_node)) {
  814. ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC);
  815. if (version < ADI_AXI_PCORE_VER(4, 4, 'a') ||
  816. !AXI_DMAC_DST_COHERENT_GET(ret)) {
  817. dev_err(dmac->dma_dev.dev,
  818. "Coherent DMA not supported in hardware");
  819. ret = -EINVAL;
  820. goto err_clk_disable;
  821. }
  822. }
  823. ret = dma_async_device_register(dma_dev);
  824. if (ret)
  825. goto err_clk_disable;
  826. ret = of_dma_controller_register(pdev->dev.of_node,
  827. of_dma_xlate_by_chan_id, dma_dev);
  828. if (ret)
  829. goto err_unregister_device;
  830. ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
  831. dev_name(&pdev->dev), dmac);
  832. if (ret)
  833. goto err_unregister_of;
  834. platform_set_drvdata(pdev, dmac);
  835. regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
  836. &axi_dmac_regmap_config);
  837. if (IS_ERR(regmap)) {
  838. ret = PTR_ERR(regmap);
  839. goto err_free_irq;
  840. }
  841. return 0;
  842. err_free_irq:
  843. free_irq(dmac->irq, dmac);
  844. err_unregister_of:
  845. of_dma_controller_free(pdev->dev.of_node);
  846. err_unregister_device:
  847. dma_async_device_unregister(&dmac->dma_dev);
  848. err_clk_disable:
  849. clk_disable_unprepare(dmac->clk);
  850. return ret;
  851. }
  852. static int axi_dmac_remove(struct platform_device *pdev)
  853. {
  854. struct axi_dmac *dmac = platform_get_drvdata(pdev);
  855. of_dma_controller_free(pdev->dev.of_node);
  856. free_irq(dmac->irq, dmac);
  857. tasklet_kill(&dmac->chan.vchan.task);
  858. dma_async_device_unregister(&dmac->dma_dev);
  859. clk_disable_unprepare(dmac->clk);
  860. return 0;
  861. }
  862. static const struct of_device_id axi_dmac_of_match_table[] = {
  863. { .compatible = "adi,axi-dmac-1.00.a" },
  864. { },
  865. };
  866. MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
  867. static struct platform_driver axi_dmac_driver = {
  868. .driver = {
  869. .name = "dma-axi-dmac",
  870. .of_match_table = axi_dmac_of_match_table,
  871. },
  872. .probe = axi_dmac_probe,
  873. .remove = axi_dmac_remove,
  874. };
  875. module_platform_driver(axi_dmac_driver);
  876. MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>");
  877. MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
  878. MODULE_LICENSE("GPL v2");