stm32-dmamux.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (C) STMicroelectronics SA 2017
  5. * Author(s): M'boumba Cedric Madianga <[email protected]>
  6. * Pierre-Yves Mordret <[email protected]>
  7. *
  8. * DMA Router driver for STM32 DMA MUX
  9. *
  10. * Based on TI DMA Crossbar driver
  11. */
  12. #include <linux/clk.h>
  13. #include <linux/delay.h>
  14. #include <linux/err.h>
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <linux/of_device.h>
  18. #include <linux/of_dma.h>
  19. #include <linux/pm_runtime.h>
  20. #include <linux/reset.h>
  21. #include <linux/slab.h>
  22. #include <linux/spinlock.h>
  23. #define STM32_DMAMUX_CCR(x) (0x4 * (x))
  24. #define STM32_DMAMUX_MAX_DMA_REQUESTS 32
  25. #define STM32_DMAMUX_MAX_REQUESTS 255
  26. struct stm32_dmamux {
  27. u32 master;
  28. u32 request;
  29. u32 chan_id;
  30. };
  31. struct stm32_dmamux_data {
  32. struct dma_router dmarouter;
  33. struct clk *clk;
  34. void __iomem *iomem;
  35. u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
  36. u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
  37. spinlock_t lock; /* Protects register access */
  38. DECLARE_BITMAP(dma_inuse, STM32_DMAMUX_MAX_DMA_REQUESTS); /* Used DMA channel */
  39. u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
  40. * in suspend
  41. */
  42. u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
  43. * [0] holds number of DMA Masters.
  44. * To be kept at very end of this structure
  45. */
  46. };
  47. static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg)
  48. {
  49. return readl_relaxed(iomem + reg);
  50. }
  51. static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val)
  52. {
  53. writel_relaxed(val, iomem + reg);
  54. }
  55. static void stm32_dmamux_free(struct device *dev, void *route_data)
  56. {
  57. struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev);
  58. struct stm32_dmamux *mux = route_data;
  59. unsigned long flags;
  60. /* Clear dma request */
  61. spin_lock_irqsave(&dmamux->lock, flags);
  62. stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
  63. clear_bit(mux->chan_id, dmamux->dma_inuse);
  64. pm_runtime_put_sync(dev);
  65. spin_unlock_irqrestore(&dmamux->lock, flags);
  66. dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n",
  67. mux->request, mux->master, mux->chan_id);
  68. kfree(mux);
  69. }
  70. static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
  71. struct of_dma *ofdma)
  72. {
  73. struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
  74. struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
  75. struct stm32_dmamux *mux;
  76. u32 i, min, max;
  77. int ret;
  78. unsigned long flags;
  79. if (dma_spec->args_count != 3) {
  80. dev_err(&pdev->dev, "invalid number of dma mux args\n");
  81. return ERR_PTR(-EINVAL);
  82. }
  83. if (dma_spec->args[0] > dmamux->dmamux_requests) {
  84. dev_err(&pdev->dev, "invalid mux request number: %d\n",
  85. dma_spec->args[0]);
  86. return ERR_PTR(-EINVAL);
  87. }
  88. mux = kzalloc(sizeof(*mux), GFP_KERNEL);
  89. if (!mux)
  90. return ERR_PTR(-ENOMEM);
  91. spin_lock_irqsave(&dmamux->lock, flags);
  92. mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
  93. dmamux->dma_requests);
  94. if (mux->chan_id == dmamux->dma_requests) {
  95. spin_unlock_irqrestore(&dmamux->lock, flags);
  96. dev_err(&pdev->dev, "Run out of free DMA requests\n");
  97. ret = -ENOMEM;
  98. goto error_chan_id;
  99. }
  100. set_bit(mux->chan_id, dmamux->dma_inuse);
  101. spin_unlock_irqrestore(&dmamux->lock, flags);
  102. /* Look for DMA Master */
  103. for (i = 1, min = 0, max = dmamux->dma_reqs[i];
  104. i <= dmamux->dma_reqs[0];
  105. min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i])
  106. if (mux->chan_id < max)
  107. break;
  108. mux->master = i - 1;
  109. /* The of_node_put() will be done in of_dma_router_xlate function */
  110. dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
  111. if (!dma_spec->np) {
  112. dev_err(&pdev->dev, "can't get dma master\n");
  113. ret = -EINVAL;
  114. goto error;
  115. }
  116. /* Set dma request */
  117. spin_lock_irqsave(&dmamux->lock, flags);
  118. ret = pm_runtime_resume_and_get(&pdev->dev);
  119. if (ret < 0) {
  120. spin_unlock_irqrestore(&dmamux->lock, flags);
  121. goto error;
  122. }
  123. spin_unlock_irqrestore(&dmamux->lock, flags);
  124. mux->request = dma_spec->args[0];
  125. /* craft DMA spec */
  126. dma_spec->args[3] = dma_spec->args[2] | mux->chan_id << 16;
  127. dma_spec->args[2] = dma_spec->args[1];
  128. dma_spec->args[1] = 0;
  129. dma_spec->args[0] = mux->chan_id - min;
  130. dma_spec->args_count = 4;
  131. stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id),
  132. mux->request);
  133. dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
  134. mux->request, mux->master, mux->chan_id);
  135. return mux;
  136. error:
  137. clear_bit(mux->chan_id, dmamux->dma_inuse);
  138. error_chan_id:
  139. kfree(mux);
  140. return ERR_PTR(ret);
  141. }
  142. static const struct of_device_id stm32_stm32dma_master_match[] __maybe_unused = {
  143. { .compatible = "st,stm32-dma", },
  144. {},
  145. };
  146. static int stm32_dmamux_probe(struct platform_device *pdev)
  147. {
  148. struct device_node *node = pdev->dev.of_node;
  149. const struct of_device_id *match;
  150. struct device_node *dma_node;
  151. struct stm32_dmamux_data *stm32_dmamux;
  152. struct resource *res;
  153. void __iomem *iomem;
  154. struct reset_control *rst;
  155. int i, count, ret;
  156. u32 dma_req;
  157. if (!node)
  158. return -ENODEV;
  159. count = device_property_count_u32(&pdev->dev, "dma-masters");
  160. if (count < 0) {
  161. dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
  162. return -ENODEV;
  163. }
  164. stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) +
  165. sizeof(u32) * (count + 1), GFP_KERNEL);
  166. if (!stm32_dmamux)
  167. return -ENOMEM;
  168. dma_req = 0;
  169. for (i = 1; i <= count; i++) {
  170. dma_node = of_parse_phandle(node, "dma-masters", i - 1);
  171. match = of_match_node(stm32_stm32dma_master_match, dma_node);
  172. if (!match) {
  173. dev_err(&pdev->dev, "DMA master is not supported\n");
  174. of_node_put(dma_node);
  175. return -EINVAL;
  176. }
  177. if (of_property_read_u32(dma_node, "dma-requests",
  178. &stm32_dmamux->dma_reqs[i])) {
  179. dev_info(&pdev->dev,
  180. "Missing MUX output information, using %u.\n",
  181. STM32_DMAMUX_MAX_DMA_REQUESTS);
  182. stm32_dmamux->dma_reqs[i] =
  183. STM32_DMAMUX_MAX_DMA_REQUESTS;
  184. }
  185. dma_req += stm32_dmamux->dma_reqs[i];
  186. of_node_put(dma_node);
  187. }
  188. if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) {
  189. dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n");
  190. return -ENODEV;
  191. }
  192. stm32_dmamux->dma_requests = dma_req;
  193. stm32_dmamux->dma_reqs[0] = count;
  194. if (device_property_read_u32(&pdev->dev, "dma-requests",
  195. &stm32_dmamux->dmamux_requests)) {
  196. stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS;
  197. dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
  198. stm32_dmamux->dmamux_requests);
  199. }
  200. pm_runtime_get_noresume(&pdev->dev);
  201. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  202. iomem = devm_ioremap_resource(&pdev->dev, res);
  203. if (IS_ERR(iomem))
  204. return PTR_ERR(iomem);
  205. spin_lock_init(&stm32_dmamux->lock);
  206. stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
  207. if (IS_ERR(stm32_dmamux->clk))
  208. return dev_err_probe(&pdev->dev, PTR_ERR(stm32_dmamux->clk),
  209. "Missing clock controller\n");
  210. ret = clk_prepare_enable(stm32_dmamux->clk);
  211. if (ret < 0) {
  212. dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
  213. return ret;
  214. }
  215. rst = devm_reset_control_get(&pdev->dev, NULL);
  216. if (IS_ERR(rst)) {
  217. ret = PTR_ERR(rst);
  218. if (ret == -EPROBE_DEFER)
  219. goto err_clk;
  220. } else if (count > 1) { /* Don't reset if there is only one dma-master */
  221. reset_control_assert(rst);
  222. udelay(2);
  223. reset_control_deassert(rst);
  224. }
  225. stm32_dmamux->iomem = iomem;
  226. stm32_dmamux->dmarouter.dev = &pdev->dev;
  227. stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
  228. platform_set_drvdata(pdev, stm32_dmamux);
  229. pm_runtime_set_active(&pdev->dev);
  230. pm_runtime_enable(&pdev->dev);
  231. pm_runtime_get_noresume(&pdev->dev);
  232. /* Reset the dmamux */
  233. for (i = 0; i < stm32_dmamux->dma_requests; i++)
  234. stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
  235. pm_runtime_put(&pdev->dev);
  236. ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
  237. &stm32_dmamux->dmarouter);
  238. if (ret)
  239. goto pm_disable;
  240. return 0;
  241. pm_disable:
  242. pm_runtime_disable(&pdev->dev);
  243. err_clk:
  244. clk_disable_unprepare(stm32_dmamux->clk);
  245. return ret;
  246. }
  247. #ifdef CONFIG_PM
  248. static int stm32_dmamux_runtime_suspend(struct device *dev)
  249. {
  250. struct platform_device *pdev = to_platform_device(dev);
  251. struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
  252. clk_disable_unprepare(stm32_dmamux->clk);
  253. return 0;
  254. }
  255. static int stm32_dmamux_runtime_resume(struct device *dev)
  256. {
  257. struct platform_device *pdev = to_platform_device(dev);
  258. struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
  259. int ret;
  260. ret = clk_prepare_enable(stm32_dmamux->clk);
  261. if (ret) {
  262. dev_err(&pdev->dev, "failed to prepare_enable clock\n");
  263. return ret;
  264. }
  265. return 0;
  266. }
  267. #endif
  268. #ifdef CONFIG_PM_SLEEP
  269. static int stm32_dmamux_suspend(struct device *dev)
  270. {
  271. struct platform_device *pdev = to_platform_device(dev);
  272. struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
  273. int i, ret;
  274. ret = pm_runtime_resume_and_get(dev);
  275. if (ret < 0)
  276. return ret;
  277. for (i = 0; i < stm32_dmamux->dma_requests; i++)
  278. stm32_dmamux->ccr[i] = stm32_dmamux_read(stm32_dmamux->iomem,
  279. STM32_DMAMUX_CCR(i));
  280. pm_runtime_put_sync(dev);
  281. pm_runtime_force_suspend(dev);
  282. return 0;
  283. }
  284. static int stm32_dmamux_resume(struct device *dev)
  285. {
  286. struct platform_device *pdev = to_platform_device(dev);
  287. struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
  288. int i, ret;
  289. ret = pm_runtime_force_resume(dev);
  290. if (ret < 0)
  291. return ret;
  292. ret = pm_runtime_resume_and_get(dev);
  293. if (ret < 0)
  294. return ret;
  295. for (i = 0; i < stm32_dmamux->dma_requests; i++)
  296. stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i),
  297. stm32_dmamux->ccr[i]);
  298. pm_runtime_put_sync(dev);
  299. return 0;
  300. }
  301. #endif
  302. static const struct dev_pm_ops stm32_dmamux_pm_ops = {
  303. SET_SYSTEM_SLEEP_PM_OPS(stm32_dmamux_suspend, stm32_dmamux_resume)
  304. SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
  305. stm32_dmamux_runtime_resume, NULL)
  306. };
  307. static const struct of_device_id stm32_dmamux_match[] = {
  308. { .compatible = "st,stm32h7-dmamux" },
  309. {},
  310. };
  311. static struct platform_driver stm32_dmamux_driver = {
  312. .probe = stm32_dmamux_probe,
  313. .driver = {
  314. .name = "stm32-dmamux",
  315. .of_match_table = stm32_dmamux_match,
  316. .pm = &stm32_dmamux_pm_ops,
  317. },
  318. };
  319. static int __init stm32_dmamux_init(void)
  320. {
  321. return platform_driver_register(&stm32_dmamux_driver);
  322. }
  323. arch_initcall(stm32_dmamux_init);
  324. MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX");
  325. MODULE_AUTHOR("M'boumba Cedric Madianga <[email protected]>");
  326. MODULE_AUTHOR("Pierre-Yves Mordret <[email protected]>");
  327. MODULE_LICENSE("GPL v2");