fsl-edma-common.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
  4. // Copyright (c) 2017 Sysam, Angelo Dureghello <[email protected]>
  5. #include <linux/dmapool.h>
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/dma-mapping.h>
  9. #include "fsl-edma-common.h"
  10. #define EDMA_CR 0x00
  11. #define EDMA_ES 0x04
  12. #define EDMA_ERQ 0x0C
  13. #define EDMA_EEI 0x14
  14. #define EDMA_SERQ 0x1B
  15. #define EDMA_CERQ 0x1A
  16. #define EDMA_SEEI 0x19
  17. #define EDMA_CEEI 0x18
  18. #define EDMA_CINT 0x1F
  19. #define EDMA_CERR 0x1E
  20. #define EDMA_SSRT 0x1D
  21. #define EDMA_CDNE 0x1C
  22. #define EDMA_INTR 0x24
  23. #define EDMA_ERR 0x2C
  24. #define EDMA64_ERQH 0x08
  25. #define EDMA64_EEIH 0x10
  26. #define EDMA64_SERQ 0x18
  27. #define EDMA64_CERQ 0x19
  28. #define EDMA64_SEEI 0x1a
  29. #define EDMA64_CEEI 0x1b
  30. #define EDMA64_CINT 0x1c
  31. #define EDMA64_CERR 0x1d
  32. #define EDMA64_SSRT 0x1e
  33. #define EDMA64_CDNE 0x1f
  34. #define EDMA64_INTH 0x20
  35. #define EDMA64_INTL 0x24
  36. #define EDMA64_ERRH 0x28
  37. #define EDMA64_ERRL 0x2c
  38. #define EDMA_TCD 0x1000
  39. static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
  40. {
  41. struct edma_regs *regs = &fsl_chan->edma->regs;
  42. u32 ch = fsl_chan->vchan.chan.chan_id;
  43. if (fsl_chan->edma->drvdata->version == v1) {
  44. edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
  45. edma_writeb(fsl_chan->edma, ch, regs->serq);
  46. } else {
  47. /* ColdFire is big endian, and accesses natively
  48. * big endian I/O peripherals
  49. */
  50. iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
  51. iowrite8(ch, regs->serq);
  52. }
  53. }
  54. void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
  55. {
  56. struct edma_regs *regs = &fsl_chan->edma->regs;
  57. u32 ch = fsl_chan->vchan.chan.chan_id;
  58. if (fsl_chan->edma->drvdata->version == v1) {
  59. edma_writeb(fsl_chan->edma, ch, regs->cerq);
  60. edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
  61. } else {
  62. /* ColdFire is big endian, and accesses natively
  63. * big endian I/O peripherals
  64. */
  65. iowrite8(ch, regs->cerq);
  66. iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
  67. }
  68. }
  69. EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
  70. static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
  71. u32 off, u32 slot, bool enable)
  72. {
  73. u8 val8;
  74. if (enable)
  75. val8 = EDMAMUX_CHCFG_ENBL | slot;
  76. else
  77. val8 = EDMAMUX_CHCFG_DIS;
  78. iowrite8(val8, addr + off);
  79. }
  80. static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
  81. u32 off, u32 slot, bool enable)
  82. {
  83. u32 val;
  84. if (enable)
  85. val = EDMAMUX_CHCFG_ENBL << 24 | slot;
  86. else
  87. val = EDMAMUX_CHCFG_DIS;
  88. iowrite32(val, addr + off * 4);
  89. }
  90. void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
  91. unsigned int slot, bool enable)
  92. {
  93. u32 ch = fsl_chan->vchan.chan.chan_id;
  94. void __iomem *muxaddr;
  95. unsigned int chans_per_mux, ch_off;
  96. int endian_diff[4] = {3, 1, -1, -3};
  97. u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
  98. chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
  99. ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
  100. if (fsl_chan->edma->drvdata->mux_swap)
  101. ch_off += endian_diff[ch_off % 4];
  102. muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
  103. slot = EDMAMUX_CHCFG_SOURCE(slot);
  104. if (fsl_chan->edma->drvdata->version == v3)
  105. mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
  106. else
  107. mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
  108. }
  109. EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
  110. static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
  111. {
  112. switch (addr_width) {
  113. case 1:
  114. return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
  115. case 2:
  116. return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
  117. case 4:
  118. return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
  119. case 8:
  120. return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
  121. default:
  122. return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
  123. }
  124. }
  125. void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
  126. {
  127. struct fsl_edma_desc *fsl_desc;
  128. int i;
  129. fsl_desc = to_fsl_edma_desc(vdesc);
  130. for (i = 0; i < fsl_desc->n_tcds; i++)
  131. dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
  132. fsl_desc->tcd[i].ptcd);
  133. kfree(fsl_desc);
  134. }
  135. EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
  136. int fsl_edma_terminate_all(struct dma_chan *chan)
  137. {
  138. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  139. unsigned long flags;
  140. LIST_HEAD(head);
  141. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  142. fsl_edma_disable_request(fsl_chan);
  143. fsl_chan->edesc = NULL;
  144. fsl_chan->idle = true;
  145. vchan_get_all_descriptors(&fsl_chan->vchan, &head);
  146. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  147. vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
  148. return 0;
  149. }
  150. EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
  151. int fsl_edma_pause(struct dma_chan *chan)
  152. {
  153. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  154. unsigned long flags;
  155. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  156. if (fsl_chan->edesc) {
  157. fsl_edma_disable_request(fsl_chan);
  158. fsl_chan->status = DMA_PAUSED;
  159. fsl_chan->idle = true;
  160. }
  161. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  162. return 0;
  163. }
  164. EXPORT_SYMBOL_GPL(fsl_edma_pause);
  165. int fsl_edma_resume(struct dma_chan *chan)
  166. {
  167. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  168. unsigned long flags;
  169. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  170. if (fsl_chan->edesc) {
  171. fsl_edma_enable_request(fsl_chan);
  172. fsl_chan->status = DMA_IN_PROGRESS;
  173. fsl_chan->idle = false;
  174. }
  175. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  176. return 0;
  177. }
  178. EXPORT_SYMBOL_GPL(fsl_edma_resume);
  179. static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
  180. {
  181. if (fsl_chan->dma_dir != DMA_NONE)
  182. dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
  183. fsl_chan->dma_dev_addr,
  184. fsl_chan->dma_dev_size,
  185. fsl_chan->dma_dir, 0);
  186. fsl_chan->dma_dir = DMA_NONE;
  187. }
  188. static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
  189. enum dma_transfer_direction dir)
  190. {
  191. struct device *dev = fsl_chan->vchan.chan.device->dev;
  192. enum dma_data_direction dma_dir;
  193. phys_addr_t addr = 0;
  194. u32 size = 0;
  195. switch (dir) {
  196. case DMA_MEM_TO_DEV:
  197. dma_dir = DMA_FROM_DEVICE;
  198. addr = fsl_chan->cfg.dst_addr;
  199. size = fsl_chan->cfg.dst_maxburst;
  200. break;
  201. case DMA_DEV_TO_MEM:
  202. dma_dir = DMA_TO_DEVICE;
  203. addr = fsl_chan->cfg.src_addr;
  204. size = fsl_chan->cfg.src_maxburst;
  205. break;
  206. default:
  207. dma_dir = DMA_NONE;
  208. break;
  209. }
  210. /* Already mapped for this config? */
  211. if (fsl_chan->dma_dir == dma_dir)
  212. return true;
  213. fsl_edma_unprep_slave_dma(fsl_chan);
  214. fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
  215. if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
  216. return false;
  217. fsl_chan->dma_dev_size = size;
  218. fsl_chan->dma_dir = dma_dir;
  219. return true;
  220. }
  221. int fsl_edma_slave_config(struct dma_chan *chan,
  222. struct dma_slave_config *cfg)
  223. {
  224. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  225. memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
  226. fsl_edma_unprep_slave_dma(fsl_chan);
  227. return 0;
  228. }
  229. EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
  230. static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
  231. struct virt_dma_desc *vdesc, bool in_progress)
  232. {
  233. struct fsl_edma_desc *edesc = fsl_chan->edesc;
  234. struct edma_regs *regs = &fsl_chan->edma->regs;
  235. u32 ch = fsl_chan->vchan.chan.chan_id;
  236. enum dma_transfer_direction dir = edesc->dirn;
  237. dma_addr_t cur_addr, dma_addr;
  238. size_t len, size;
  239. int i;
  240. /* calculate the total size in this desc */
  241. for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
  242. len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
  243. * le16_to_cpu(edesc->tcd[i].vtcd->biter);
  244. if (!in_progress)
  245. return len;
  246. if (dir == DMA_MEM_TO_DEV)
  247. cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
  248. else
  249. cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
  250. /* figure out the finished and calculate the residue */
  251. for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
  252. size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
  253. * le16_to_cpu(edesc->tcd[i].vtcd->biter);
  254. if (dir == DMA_MEM_TO_DEV)
  255. dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
  256. else
  257. dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
  258. len -= size;
  259. if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
  260. len += dma_addr + size - cur_addr;
  261. break;
  262. }
  263. }
  264. return len;
  265. }
  266. enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
  267. dma_cookie_t cookie, struct dma_tx_state *txstate)
  268. {
  269. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  270. struct virt_dma_desc *vdesc;
  271. enum dma_status status;
  272. unsigned long flags;
  273. status = dma_cookie_status(chan, cookie, txstate);
  274. if (status == DMA_COMPLETE)
  275. return status;
  276. if (!txstate)
  277. return fsl_chan->status;
  278. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  279. vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
  280. if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
  281. txstate->residue =
  282. fsl_edma_desc_residue(fsl_chan, vdesc, true);
  283. else if (vdesc)
  284. txstate->residue =
  285. fsl_edma_desc_residue(fsl_chan, vdesc, false);
  286. else
  287. txstate->residue = 0;
  288. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  289. return fsl_chan->status;
  290. }
  291. EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
  292. static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
  293. struct fsl_edma_hw_tcd *tcd)
  294. {
  295. struct fsl_edma_engine *edma = fsl_chan->edma;
  296. struct edma_regs *regs = &fsl_chan->edma->regs;
  297. u32 ch = fsl_chan->vchan.chan.chan_id;
  298. u16 csr = 0;
  299. /*
  300. * TCD parameters are stored in struct fsl_edma_hw_tcd in little
  301. * endian format. However, we need to load the TCD registers in
  302. * big- or little-endian obeying the eDMA engine model endian,
  303. * and this is performed from specific edma_write functions
  304. */
  305. edma_writew(edma, 0, &regs->tcd[ch].csr);
  306. edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr);
  307. edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr);
  308. edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr);
  309. edma_writew(edma, tcd->soff, &regs->tcd[ch].soff);
  310. edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes);
  311. edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast);
  312. edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer);
  313. edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter);
  314. edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff);
  315. edma_writel(edma, (s32)tcd->dlast_sga,
  316. &regs->tcd[ch].dlast_sga);
  317. if (fsl_chan->is_sw) {
  318. csr = le16_to_cpu(tcd->csr);
  319. csr |= EDMA_TCD_CSR_START;
  320. tcd->csr = cpu_to_le16(csr);
  321. }
  322. edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr);
  323. }
  324. static inline
  325. void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
  326. u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
  327. u16 biter, u16 doff, u32 dlast_sga, bool major_int,
  328. bool disable_req, bool enable_sg)
  329. {
  330. u16 csr = 0;
  331. /*
  332. * eDMA hardware SGs require the TCDs to be stored in little
  333. * endian format irrespective of the register endian model.
  334. * So we put the value in little endian in memory, waiting
  335. * for fsl_edma_set_tcd_regs doing the swap.
  336. */
  337. tcd->saddr = cpu_to_le32(src);
  338. tcd->daddr = cpu_to_le32(dst);
  339. tcd->attr = cpu_to_le16(attr);
  340. tcd->soff = cpu_to_le16(soff);
  341. tcd->nbytes = cpu_to_le32(nbytes);
  342. tcd->slast = cpu_to_le32(slast);
  343. tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
  344. tcd->doff = cpu_to_le16(doff);
  345. tcd->dlast_sga = cpu_to_le32(dlast_sga);
  346. tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
  347. if (major_int)
  348. csr |= EDMA_TCD_CSR_INT_MAJOR;
  349. if (disable_req)
  350. csr |= EDMA_TCD_CSR_D_REQ;
  351. if (enable_sg)
  352. csr |= EDMA_TCD_CSR_E_SG;
  353. tcd->csr = cpu_to_le16(csr);
  354. }
  355. static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
  356. int sg_len)
  357. {
  358. struct fsl_edma_desc *fsl_desc;
  359. int i;
  360. fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
  361. if (!fsl_desc)
  362. return NULL;
  363. fsl_desc->echan = fsl_chan;
  364. fsl_desc->n_tcds = sg_len;
  365. for (i = 0; i < sg_len; i++) {
  366. fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
  367. GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
  368. if (!fsl_desc->tcd[i].vtcd)
  369. goto err;
  370. }
  371. return fsl_desc;
  372. err:
  373. while (--i >= 0)
  374. dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
  375. fsl_desc->tcd[i].ptcd);
  376. kfree(fsl_desc);
  377. return NULL;
  378. }
  379. struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
  380. struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
  381. size_t period_len, enum dma_transfer_direction direction,
  382. unsigned long flags)
  383. {
  384. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  385. struct fsl_edma_desc *fsl_desc;
  386. dma_addr_t dma_buf_next;
  387. int sg_len, i;
  388. u32 src_addr, dst_addr, last_sg, nbytes;
  389. u16 soff, doff, iter;
  390. if (!is_slave_direction(direction))
  391. return NULL;
  392. if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
  393. return NULL;
  394. sg_len = buf_len / period_len;
  395. fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
  396. if (!fsl_desc)
  397. return NULL;
  398. fsl_desc->iscyclic = true;
  399. fsl_desc->dirn = direction;
  400. dma_buf_next = dma_addr;
  401. if (direction == DMA_MEM_TO_DEV) {
  402. fsl_chan->attr =
  403. fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
  404. nbytes = fsl_chan->cfg.dst_addr_width *
  405. fsl_chan->cfg.dst_maxburst;
  406. } else {
  407. fsl_chan->attr =
  408. fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
  409. nbytes = fsl_chan->cfg.src_addr_width *
  410. fsl_chan->cfg.src_maxburst;
  411. }
  412. iter = period_len / nbytes;
  413. for (i = 0; i < sg_len; i++) {
  414. if (dma_buf_next >= dma_addr + buf_len)
  415. dma_buf_next = dma_addr;
  416. /* get next sg's physical address */
  417. last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
  418. if (direction == DMA_MEM_TO_DEV) {
  419. src_addr = dma_buf_next;
  420. dst_addr = fsl_chan->dma_dev_addr;
  421. soff = fsl_chan->cfg.dst_addr_width;
  422. doff = 0;
  423. } else {
  424. src_addr = fsl_chan->dma_dev_addr;
  425. dst_addr = dma_buf_next;
  426. soff = 0;
  427. doff = fsl_chan->cfg.src_addr_width;
  428. }
  429. fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
  430. fsl_chan->attr, soff, nbytes, 0, iter,
  431. iter, doff, last_sg, true, false, true);
  432. dma_buf_next += period_len;
  433. }
  434. return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
  435. }
  436. EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
  437. struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
  438. struct dma_chan *chan, struct scatterlist *sgl,
  439. unsigned int sg_len, enum dma_transfer_direction direction,
  440. unsigned long flags, void *context)
  441. {
  442. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  443. struct fsl_edma_desc *fsl_desc;
  444. struct scatterlist *sg;
  445. u32 src_addr, dst_addr, last_sg, nbytes;
  446. u16 soff, doff, iter;
  447. int i;
  448. if (!is_slave_direction(direction))
  449. return NULL;
  450. if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
  451. return NULL;
  452. fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
  453. if (!fsl_desc)
  454. return NULL;
  455. fsl_desc->iscyclic = false;
  456. fsl_desc->dirn = direction;
  457. if (direction == DMA_MEM_TO_DEV) {
  458. fsl_chan->attr =
  459. fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
  460. nbytes = fsl_chan->cfg.dst_addr_width *
  461. fsl_chan->cfg.dst_maxburst;
  462. } else {
  463. fsl_chan->attr =
  464. fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
  465. nbytes = fsl_chan->cfg.src_addr_width *
  466. fsl_chan->cfg.src_maxburst;
  467. }
  468. for_each_sg(sgl, sg, sg_len, i) {
  469. if (direction == DMA_MEM_TO_DEV) {
  470. src_addr = sg_dma_address(sg);
  471. dst_addr = fsl_chan->dma_dev_addr;
  472. soff = fsl_chan->cfg.dst_addr_width;
  473. doff = 0;
  474. } else {
  475. src_addr = fsl_chan->dma_dev_addr;
  476. dst_addr = sg_dma_address(sg);
  477. soff = 0;
  478. doff = fsl_chan->cfg.src_addr_width;
  479. }
  480. iter = sg_dma_len(sg) / nbytes;
  481. if (i < sg_len - 1) {
  482. last_sg = fsl_desc->tcd[(i + 1)].ptcd;
  483. fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
  484. dst_addr, fsl_chan->attr, soff,
  485. nbytes, 0, iter, iter, doff, last_sg,
  486. false, false, true);
  487. } else {
  488. last_sg = 0;
  489. fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
  490. dst_addr, fsl_chan->attr, soff,
  491. nbytes, 0, iter, iter, doff, last_sg,
  492. true, true, false);
  493. }
  494. }
  495. return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
  496. }
  497. EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
  498. struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
  499. dma_addr_t dma_dst, dma_addr_t dma_src,
  500. size_t len, unsigned long flags)
  501. {
  502. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  503. struct fsl_edma_desc *fsl_desc;
  504. fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
  505. if (!fsl_desc)
  506. return NULL;
  507. fsl_desc->iscyclic = false;
  508. fsl_chan->is_sw = true;
  509. /* To match with copy_align and max_seg_size so 1 tcd is enough */
  510. fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
  511. EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE,
  512. 32, len, 0, 1, 1, 32, 0, true, true, false);
  513. return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
  514. }
  515. EXPORT_SYMBOL_GPL(fsl_edma_prep_memcpy);
  516. void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
  517. {
  518. struct virt_dma_desc *vdesc;
  519. lockdep_assert_held(&fsl_chan->vchan.lock);
  520. vdesc = vchan_next_desc(&fsl_chan->vchan);
  521. if (!vdesc)
  522. return;
  523. fsl_chan->edesc = to_fsl_edma_desc(vdesc);
  524. fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
  525. fsl_edma_enable_request(fsl_chan);
  526. fsl_chan->status = DMA_IN_PROGRESS;
  527. fsl_chan->idle = false;
  528. }
  529. EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
  530. void fsl_edma_issue_pending(struct dma_chan *chan)
  531. {
  532. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  533. unsigned long flags;
  534. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  535. if (unlikely(fsl_chan->pm_state != RUNNING)) {
  536. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  537. /* cannot submit due to suspend */
  538. return;
  539. }
  540. if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
  541. fsl_edma_xfer_desc(fsl_chan);
  542. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  543. }
  544. EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
  545. int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
  546. {
  547. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  548. fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
  549. sizeof(struct fsl_edma_hw_tcd),
  550. 32, 0);
  551. return 0;
  552. }
  553. EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
  554. void fsl_edma_free_chan_resources(struct dma_chan *chan)
  555. {
  556. struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
  557. struct fsl_edma_engine *edma = fsl_chan->edma;
  558. unsigned long flags;
  559. LIST_HEAD(head);
  560. spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
  561. fsl_edma_disable_request(fsl_chan);
  562. if (edma->drvdata->dmamuxs)
  563. fsl_edma_chan_mux(fsl_chan, 0, false);
  564. fsl_chan->edesc = NULL;
  565. vchan_get_all_descriptors(&fsl_chan->vchan, &head);
  566. fsl_edma_unprep_slave_dma(fsl_chan);
  567. spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
  568. vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
  569. dma_pool_destroy(fsl_chan->tcd_pool);
  570. fsl_chan->tcd_pool = NULL;
  571. fsl_chan->is_sw = false;
  572. }
  573. EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
  574. void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
  575. {
  576. struct fsl_edma_chan *chan, *_chan;
  577. list_for_each_entry_safe(chan, _chan,
  578. &dmadev->channels, vchan.chan.device_node) {
  579. list_del(&chan->vchan.chan.device_node);
  580. tasklet_kill(&chan->vchan.task);
  581. }
  582. }
  583. EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
  584. /*
  585. * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
  586. * register offsets are different compared to ColdFire mcf5441x 64 channels
  587. * edma (here called "v2").
  588. *
  589. * This function sets up register offsets as per proper declared version
  590. * so must be called in xxx_edma_probe() just after setting the
  591. * edma "version" and "membase" appropriately.
  592. */
  593. void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
  594. {
  595. edma->regs.cr = edma->membase + EDMA_CR;
  596. edma->regs.es = edma->membase + EDMA_ES;
  597. edma->regs.erql = edma->membase + EDMA_ERQ;
  598. edma->regs.eeil = edma->membase + EDMA_EEI;
  599. edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ?
  600. EDMA64_SERQ : EDMA_SERQ);
  601. edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ?
  602. EDMA64_CERQ : EDMA_CERQ);
  603. edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ?
  604. EDMA64_SEEI : EDMA_SEEI);
  605. edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ?
  606. EDMA64_CEEI : EDMA_CEEI);
  607. edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ?
  608. EDMA64_CINT : EDMA_CINT);
  609. edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ?
  610. EDMA64_CERR : EDMA_CERR);
  611. edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ?
  612. EDMA64_SSRT : EDMA_SSRT);
  613. edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ?
  614. EDMA64_CDNE : EDMA_CDNE);
  615. edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ?
  616. EDMA64_INTL : EDMA_INTR);
  617. edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ?
  618. EDMA64_ERRL : EDMA_ERR);
  619. if (edma->drvdata->version == v2) {
  620. edma->regs.erqh = edma->membase + EDMA64_ERQH;
  621. edma->regs.eeih = edma->membase + EDMA64_EEIH;
  622. edma->regs.errh = edma->membase + EDMA64_ERRH;
  623. edma->regs.inth = edma->membase + EDMA64_INTH;
  624. }
  625. edma->regs.tcd = edma->membase + EDMA_TCD;
  626. }
  627. EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
  628. MODULE_LICENSE("GPL v2");