mcf-edma.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. // SPDX-License-Identifier: GPL-2.0+
  2. //
  3. // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
  4. // Copyright (c) 2017 Sysam, Angelo Dureghello <[email protected]>
  5. #include <linux/module.h>
  6. #include <linux/interrupt.h>
  7. #include <linux/dmaengine.h>
  8. #include <linux/platform_device.h>
  9. #include <linux/platform_data/dma-mcf-edma.h>
  10. #include "fsl-edma-common.h"
  11. #define EDMA_CHANNELS 64
  12. #define EDMA_MASK_CH(x) ((x) & GENMASK(5, 0))
  13. static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
  14. {
  15. struct fsl_edma_engine *mcf_edma = dev_id;
  16. struct edma_regs *regs = &mcf_edma->regs;
  17. unsigned int ch;
  18. struct fsl_edma_chan *mcf_chan;
  19. u64 intmap;
  20. intmap = ioread32(regs->inth);
  21. intmap <<= 32;
  22. intmap |= ioread32(regs->intl);
  23. if (!intmap)
  24. return IRQ_NONE;
  25. for (ch = 0; ch < mcf_edma->n_chans; ch++) {
  26. if (intmap & BIT(ch)) {
  27. iowrite8(EDMA_MASK_CH(ch), regs->cint);
  28. mcf_chan = &mcf_edma->chans[ch];
  29. spin_lock(&mcf_chan->vchan.lock);
  30. if (!mcf_chan->edesc) {
  31. /* terminate_all called before */
  32. spin_unlock(&mcf_chan->vchan.lock);
  33. continue;
  34. }
  35. if (!mcf_chan->edesc->iscyclic) {
  36. list_del(&mcf_chan->edesc->vdesc.node);
  37. vchan_cookie_complete(&mcf_chan->edesc->vdesc);
  38. mcf_chan->edesc = NULL;
  39. mcf_chan->status = DMA_COMPLETE;
  40. mcf_chan->idle = true;
  41. } else {
  42. vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
  43. }
  44. if (!mcf_chan->edesc)
  45. fsl_edma_xfer_desc(mcf_chan);
  46. spin_unlock(&mcf_chan->vchan.lock);
  47. }
  48. }
  49. return IRQ_HANDLED;
  50. }
  51. static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
  52. {
  53. struct fsl_edma_engine *mcf_edma = dev_id;
  54. struct edma_regs *regs = &mcf_edma->regs;
  55. unsigned int err, ch;
  56. err = ioread32(regs->errl);
  57. if (!err)
  58. return IRQ_NONE;
  59. for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
  60. if (err & BIT(ch)) {
  61. fsl_edma_disable_request(&mcf_edma->chans[ch]);
  62. iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
  63. mcf_edma->chans[ch].status = DMA_ERROR;
  64. mcf_edma->chans[ch].idle = true;
  65. }
  66. }
  67. err = ioread32(regs->errh);
  68. if (!err)
  69. return IRQ_NONE;
  70. for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
  71. if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
  72. fsl_edma_disable_request(&mcf_edma->chans[ch]);
  73. iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
  74. mcf_edma->chans[ch].status = DMA_ERROR;
  75. mcf_edma->chans[ch].idle = true;
  76. }
  77. }
  78. return IRQ_HANDLED;
  79. }
  80. static int mcf_edma_irq_init(struct platform_device *pdev,
  81. struct fsl_edma_engine *mcf_edma)
  82. {
  83. int ret = 0, i;
  84. struct resource *res;
  85. res = platform_get_resource_byname(pdev,
  86. IORESOURCE_IRQ, "edma-tx-00-15");
  87. if (!res)
  88. return -1;
  89. for (ret = 0, i = res->start; i <= res->end; ++i)
  90. ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
  91. if (ret)
  92. return ret;
  93. res = platform_get_resource_byname(pdev,
  94. IORESOURCE_IRQ, "edma-tx-16-55");
  95. if (!res)
  96. return -1;
  97. for (ret = 0, i = res->start; i <= res->end; ++i)
  98. ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
  99. if (ret)
  100. return ret;
  101. ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
  102. if (ret != -ENXIO) {
  103. ret = request_irq(ret, mcf_edma_tx_handler,
  104. 0, "eDMA", mcf_edma);
  105. if (ret)
  106. return ret;
  107. }
  108. ret = platform_get_irq_byname(pdev, "edma-err");
  109. if (ret != -ENXIO) {
  110. ret = request_irq(ret, mcf_edma_err_handler,
  111. 0, "eDMA", mcf_edma);
  112. if (ret)
  113. return ret;
  114. }
  115. return 0;
  116. }
  117. static void mcf_edma_irq_free(struct platform_device *pdev,
  118. struct fsl_edma_engine *mcf_edma)
  119. {
  120. int irq;
  121. struct resource *res;
  122. res = platform_get_resource_byname(pdev,
  123. IORESOURCE_IRQ, "edma-tx-00-15");
  124. if (res) {
  125. for (irq = res->start; irq <= res->end; irq++)
  126. free_irq(irq, mcf_edma);
  127. }
  128. res = platform_get_resource_byname(pdev,
  129. IORESOURCE_IRQ, "edma-tx-16-55");
  130. if (res) {
  131. for (irq = res->start; irq <= res->end; irq++)
  132. free_irq(irq, mcf_edma);
  133. }
  134. irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
  135. if (irq != -ENXIO)
  136. free_irq(irq, mcf_edma);
  137. irq = platform_get_irq_byname(pdev, "edma-err");
  138. if (irq != -ENXIO)
  139. free_irq(irq, mcf_edma);
  140. }
  141. static struct fsl_edma_drvdata mcf_data = {
  142. .version = v2,
  143. .setup_irq = mcf_edma_irq_init,
  144. };
  145. static int mcf_edma_probe(struct platform_device *pdev)
  146. {
  147. struct mcf_edma_platform_data *pdata;
  148. struct fsl_edma_engine *mcf_edma;
  149. struct fsl_edma_chan *mcf_chan;
  150. struct edma_regs *regs;
  151. struct resource *res;
  152. int ret, i, len, chans;
  153. pdata = dev_get_platdata(&pdev->dev);
  154. if (!pdata) {
  155. dev_err(&pdev->dev, "no platform data supplied\n");
  156. return -EINVAL;
  157. }
  158. if (!pdata->dma_channels) {
  159. dev_info(&pdev->dev, "setting default channel number to 64");
  160. chans = 64;
  161. } else {
  162. chans = pdata->dma_channels;
  163. }
  164. len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
  165. mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
  166. if (!mcf_edma)
  167. return -ENOMEM;
  168. mcf_edma->n_chans = chans;
  169. /* Set up drvdata for ColdFire edma */
  170. mcf_edma->drvdata = &mcf_data;
  171. mcf_edma->big_endian = 1;
  172. mutex_init(&mcf_edma->fsl_edma_mutex);
  173. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  174. mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
  175. if (IS_ERR(mcf_edma->membase))
  176. return PTR_ERR(mcf_edma->membase);
  177. fsl_edma_setup_regs(mcf_edma);
  178. regs = &mcf_edma->regs;
  179. INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
  180. for (i = 0; i < mcf_edma->n_chans; i++) {
  181. struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
  182. mcf_chan->edma = mcf_edma;
  183. mcf_chan->slave_id = i;
  184. mcf_chan->idle = true;
  185. mcf_chan->dma_dir = DMA_NONE;
  186. mcf_chan->vchan.desc_free = fsl_edma_free_desc;
  187. vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
  188. iowrite32(0x0, &regs->tcd[i].csr);
  189. }
  190. iowrite32(~0, regs->inth);
  191. iowrite32(~0, regs->intl);
  192. ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma);
  193. if (ret)
  194. return ret;
  195. dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
  196. dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
  197. dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
  198. mcf_edma->dma_dev.dev = &pdev->dev;
  199. mcf_edma->dma_dev.device_alloc_chan_resources =
  200. fsl_edma_alloc_chan_resources;
  201. mcf_edma->dma_dev.device_free_chan_resources =
  202. fsl_edma_free_chan_resources;
  203. mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
  204. mcf_edma->dma_dev.device_prep_dma_cyclic =
  205. fsl_edma_prep_dma_cyclic;
  206. mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
  207. mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
  208. mcf_edma->dma_dev.device_pause = fsl_edma_pause;
  209. mcf_edma->dma_dev.device_resume = fsl_edma_resume;
  210. mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
  211. mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
  212. mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
  213. mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
  214. mcf_edma->dma_dev.directions =
  215. BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  216. mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
  217. mcf_edma->dma_dev.filter.map = pdata->slave_map;
  218. mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
  219. platform_set_drvdata(pdev, mcf_edma);
  220. ret = dma_async_device_register(&mcf_edma->dma_dev);
  221. if (ret) {
  222. dev_err(&pdev->dev,
  223. "Can't register Freescale eDMA engine. (%d)\n", ret);
  224. return ret;
  225. }
  226. /* Enable round robin arbitration */
  227. iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
  228. return 0;
  229. }
  230. static int mcf_edma_remove(struct platform_device *pdev)
  231. {
  232. struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
  233. mcf_edma_irq_free(pdev, mcf_edma);
  234. fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
  235. dma_async_device_unregister(&mcf_edma->dma_dev);
  236. return 0;
  237. }
  238. static struct platform_driver mcf_edma_driver = {
  239. .driver = {
  240. .name = "mcf-edma",
  241. },
  242. .probe = mcf_edma_probe,
  243. .remove = mcf_edma_remove,
  244. };
  245. bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
  246. {
  247. if (chan->device->dev->driver == &mcf_edma_driver.driver) {
  248. struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
  249. return (mcf_chan->slave_id == (uintptr_t)param);
  250. }
  251. return false;
  252. }
  253. EXPORT_SYMBOL(mcf_edma_filter_fn);
  254. static int __init mcf_edma_init(void)
  255. {
  256. return platform_driver_register(&mcf_edma_driver);
  257. }
  258. subsys_initcall(mcf_edma_init);
  259. static void __exit mcf_edma_exit(void)
  260. {
  261. platform_driver_unregister(&mcf_edma_driver);
  262. }
  263. module_exit(mcf_edma_exit);
  264. MODULE_ALIAS("platform:mcf-edma");
  265. MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
  266. MODULE_LICENSE("GPL v2");