stm32-mdma.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (C) STMicroelectronics SA 2017
  5. * Author(s): M'boumba Cedric Madianga <[email protected]>
  6. * Pierre-Yves Mordret <[email protected]>
  7. *
  8. * Driver for STM32 MDMA controller
  9. *
  10. * Inspired by stm32-dma.c and dma-jz4780.c
  11. */
  12. #include <linux/bitfield.h>
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/dmaengine.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/dmapool.h>
  18. #include <linux/err.h>
  19. #include <linux/init.h>
  20. #include <linux/iopoll.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/list.h>
  23. #include <linux/log2.h>
  24. #include <linux/module.h>
  25. #include <linux/of.h>
  26. #include <linux/of_device.h>
  27. #include <linux/of_dma.h>
  28. #include <linux/platform_device.h>
  29. #include <linux/pm_runtime.h>
  30. #include <linux/reset.h>
  31. #include <linux/slab.h>
  32. #include "virt-dma.h"
  33. #define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
  34. /* MDMA Channel x interrupt/status register */
  35. #define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
  36. #define STM32_MDMA_CISR_CRQA BIT(16)
  37. #define STM32_MDMA_CISR_TCIF BIT(4)
  38. #define STM32_MDMA_CISR_BTIF BIT(3)
  39. #define STM32_MDMA_CISR_BRTIF BIT(2)
  40. #define STM32_MDMA_CISR_CTCIF BIT(1)
  41. #define STM32_MDMA_CISR_TEIF BIT(0)
  42. /* MDMA Channel x interrupt flag clear register */
  43. #define STM32_MDMA_CIFCR(x) (0x44 + 0x40 * (x))
  44. #define STM32_MDMA_CIFCR_CLTCIF BIT(4)
  45. #define STM32_MDMA_CIFCR_CBTIF BIT(3)
  46. #define STM32_MDMA_CIFCR_CBRTIF BIT(2)
  47. #define STM32_MDMA_CIFCR_CCTCIF BIT(1)
  48. #define STM32_MDMA_CIFCR_CTEIF BIT(0)
  49. #define STM32_MDMA_CIFCR_CLEAR_ALL (STM32_MDMA_CIFCR_CLTCIF \
  50. | STM32_MDMA_CIFCR_CBTIF \
  51. | STM32_MDMA_CIFCR_CBRTIF \
  52. | STM32_MDMA_CIFCR_CCTCIF \
  53. | STM32_MDMA_CIFCR_CTEIF)
  54. /* MDMA Channel x error status register */
  55. #define STM32_MDMA_CESR(x) (0x48 + 0x40 * (x))
  56. #define STM32_MDMA_CESR_BSE BIT(11)
  57. #define STM32_MDMA_CESR_ASR BIT(10)
  58. #define STM32_MDMA_CESR_TEMD BIT(9)
  59. #define STM32_MDMA_CESR_TELD BIT(8)
  60. #define STM32_MDMA_CESR_TED BIT(7)
  61. #define STM32_MDMA_CESR_TEA_MASK GENMASK(6, 0)
  62. /* MDMA Channel x control register */
  63. #define STM32_MDMA_CCR(x) (0x4C + 0x40 * (x))
  64. #define STM32_MDMA_CCR_SWRQ BIT(16)
  65. #define STM32_MDMA_CCR_WEX BIT(14)
  66. #define STM32_MDMA_CCR_HEX BIT(13)
  67. #define STM32_MDMA_CCR_BEX BIT(12)
  68. #define STM32_MDMA_CCR_SM BIT(8)
  69. #define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6)
  70. #define STM32_MDMA_CCR_PL(n) FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n))
  71. #define STM32_MDMA_CCR_TCIE BIT(5)
  72. #define STM32_MDMA_CCR_BTIE BIT(4)
  73. #define STM32_MDMA_CCR_BRTIE BIT(3)
  74. #define STM32_MDMA_CCR_CTCIE BIT(2)
  75. #define STM32_MDMA_CCR_TEIE BIT(1)
  76. #define STM32_MDMA_CCR_EN BIT(0)
  77. #define STM32_MDMA_CCR_IRQ_MASK (STM32_MDMA_CCR_TCIE \
  78. | STM32_MDMA_CCR_BTIE \
  79. | STM32_MDMA_CCR_BRTIE \
  80. | STM32_MDMA_CCR_CTCIE \
  81. | STM32_MDMA_CCR_TEIE)
  82. /* MDMA Channel x transfer configuration register */
  83. #define STM32_MDMA_CTCR(x) (0x50 + 0x40 * (x))
  84. #define STM32_MDMA_CTCR_BWM BIT(31)
  85. #define STM32_MDMA_CTCR_SWRM BIT(30)
  86. #define STM32_MDMA_CTCR_TRGM_MSK GENMASK(29, 28)
  87. #define STM32_MDMA_CTCR_TRGM(n) FIELD_PREP(STM32_MDMA_CTCR_TRGM_MSK, (n))
  88. #define STM32_MDMA_CTCR_TRGM_GET(n) FIELD_GET(STM32_MDMA_CTCR_TRGM_MSK, (n))
  89. #define STM32_MDMA_CTCR_PAM_MASK GENMASK(27, 26)
  90. #define STM32_MDMA_CTCR_PAM(n) FIELD_PREP(STM32_MDMA_CTCR_PAM_MASK, (n))
  91. #define STM32_MDMA_CTCR_PKE BIT(25)
  92. #define STM32_MDMA_CTCR_TLEN_MSK GENMASK(24, 18)
  93. #define STM32_MDMA_CTCR_TLEN(n) FIELD_PREP(STM32_MDMA_CTCR_TLEN_MSK, (n))
  94. #define STM32_MDMA_CTCR_TLEN_GET(n) FIELD_GET(STM32_MDMA_CTCR_TLEN_MSK, (n))
  95. #define STM32_MDMA_CTCR_LEN2_MSK GENMASK(25, 18)
  96. #define STM32_MDMA_CTCR_LEN2(n) FIELD_PREP(STM32_MDMA_CTCR_LEN2_MSK, (n))
  97. #define STM32_MDMA_CTCR_LEN2_GET(n) FIELD_GET(STM32_MDMA_CTCR_LEN2_MSK, (n))
  98. #define STM32_MDMA_CTCR_DBURST_MASK GENMASK(17, 15)
  99. #define STM32_MDMA_CTCR_DBURST(n) FIELD_PREP(STM32_MDMA_CTCR_DBURST_MASK, (n))
  100. #define STM32_MDMA_CTCR_SBURST_MASK GENMASK(14, 12)
  101. #define STM32_MDMA_CTCR_SBURST(n) FIELD_PREP(STM32_MDMA_CTCR_SBURST_MASK, (n))
  102. #define STM32_MDMA_CTCR_DINCOS_MASK GENMASK(11, 10)
  103. #define STM32_MDMA_CTCR_DINCOS(n) FIELD_PREP(STM32_MDMA_CTCR_DINCOS_MASK, (n))
  104. #define STM32_MDMA_CTCR_SINCOS_MASK GENMASK(9, 8)
  105. #define STM32_MDMA_CTCR_SINCOS(n) FIELD_PREP(STM32_MDMA_CTCR_SINCOS_MASK, (n))
  106. #define STM32_MDMA_CTCR_DSIZE_MASK GENMASK(7, 6)
  107. #define STM32_MDMA_CTCR_DSIZE(n) FIELD_PREP(STM32_MDMA_CTCR_DSIZE_MASK, (n))
  108. #define STM32_MDMA_CTCR_SSIZE_MASK GENMASK(5, 4)
  109. #define STM32_MDMA_CTCR_SSIZE(n) FIELD_PREP(STM32_MDMA_CTCR_SSIZE_MASK, (n))
  110. #define STM32_MDMA_CTCR_DINC_MASK GENMASK(3, 2)
  111. #define STM32_MDMA_CTCR_DINC(n) FIELD_PREP(STM32_MDMA_CTCR_DINC_MASK, (n))
  112. #define STM32_MDMA_CTCR_SINC_MASK GENMASK(1, 0)
  113. #define STM32_MDMA_CTCR_SINC(n) FIELD_PREP(STM32_MDMA_CTCR_SINC_MASK, (n))
  114. #define STM32_MDMA_CTCR_CFG_MASK (STM32_MDMA_CTCR_SINC_MASK \
  115. | STM32_MDMA_CTCR_DINC_MASK \
  116. | STM32_MDMA_CTCR_SINCOS_MASK \
  117. | STM32_MDMA_CTCR_DINCOS_MASK \
  118. | STM32_MDMA_CTCR_LEN2_MSK \
  119. | STM32_MDMA_CTCR_TRGM_MSK)
  120. /* MDMA Channel x block number of data register */
  121. #define STM32_MDMA_CBNDTR(x) (0x54 + 0x40 * (x))
  122. #define STM32_MDMA_CBNDTR_BRC_MK GENMASK(31, 20)
  123. #define STM32_MDMA_CBNDTR_BRC(n) FIELD_PREP(STM32_MDMA_CBNDTR_BRC_MK, (n))
  124. #define STM32_MDMA_CBNDTR_BRC_GET(n) FIELD_GET(STM32_MDMA_CBNDTR_BRC_MK, (n))
  125. #define STM32_MDMA_CBNDTR_BRDUM BIT(19)
  126. #define STM32_MDMA_CBNDTR_BRSUM BIT(18)
  127. #define STM32_MDMA_CBNDTR_BNDT_MASK GENMASK(16, 0)
  128. #define STM32_MDMA_CBNDTR_BNDT(n) FIELD_PREP(STM32_MDMA_CBNDTR_BNDT_MASK, (n))
  129. /* MDMA Channel x source address register */
  130. #define STM32_MDMA_CSAR(x) (0x58 + 0x40 * (x))
  131. /* MDMA Channel x destination address register */
  132. #define STM32_MDMA_CDAR(x) (0x5C + 0x40 * (x))
  133. /* MDMA Channel x block repeat address update register */
  134. #define STM32_MDMA_CBRUR(x) (0x60 + 0x40 * (x))
  135. #define STM32_MDMA_CBRUR_DUV_MASK GENMASK(31, 16)
  136. #define STM32_MDMA_CBRUR_DUV(n) FIELD_PREP(STM32_MDMA_CBRUR_DUV_MASK, (n))
  137. #define STM32_MDMA_CBRUR_SUV_MASK GENMASK(15, 0)
  138. #define STM32_MDMA_CBRUR_SUV(n) FIELD_PREP(STM32_MDMA_CBRUR_SUV_MASK, (n))
  139. /* MDMA Channel x link address register */
  140. #define STM32_MDMA_CLAR(x) (0x64 + 0x40 * (x))
  141. /* MDMA Channel x trigger and bus selection register */
  142. #define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x))
  143. #define STM32_MDMA_CTBR_DBUS BIT(17)
  144. #define STM32_MDMA_CTBR_SBUS BIT(16)
  145. #define STM32_MDMA_CTBR_TSEL_MASK GENMASK(5, 0)
  146. #define STM32_MDMA_CTBR_TSEL(n) FIELD_PREP(STM32_MDMA_CTBR_TSEL_MASK, (n))
  147. /* MDMA Channel x mask address register */
  148. #define STM32_MDMA_CMAR(x) (0x70 + 0x40 * (x))
  149. /* MDMA Channel x mask data register */
  150. #define STM32_MDMA_CMDR(x) (0x74 + 0x40 * (x))
  151. #define STM32_MDMA_MAX_BUF_LEN 128
  152. #define STM32_MDMA_MAX_BLOCK_LEN 65536
  153. #define STM32_MDMA_MAX_CHANNELS 32
  154. #define STM32_MDMA_MAX_REQUESTS 256
  155. #define STM32_MDMA_MAX_BURST 128
  156. #define STM32_MDMA_VERY_HIGH_PRIORITY 0x3
  157. enum stm32_mdma_trigger_mode {
  158. STM32_MDMA_BUFFER,
  159. STM32_MDMA_BLOCK,
  160. STM32_MDMA_BLOCK_REP,
  161. STM32_MDMA_LINKED_LIST,
  162. };
  163. enum stm32_mdma_width {
  164. STM32_MDMA_BYTE,
  165. STM32_MDMA_HALF_WORD,
  166. STM32_MDMA_WORD,
  167. STM32_MDMA_DOUBLE_WORD,
  168. };
  169. enum stm32_mdma_inc_mode {
  170. STM32_MDMA_FIXED = 0,
  171. STM32_MDMA_INC = 2,
  172. STM32_MDMA_DEC = 3,
  173. };
  174. struct stm32_mdma_chan_config {
  175. u32 request;
  176. u32 priority_level;
  177. u32 transfer_config;
  178. u32 mask_addr;
  179. u32 mask_data;
  180. bool m2m_hw; /* True when MDMA is triggered by STM32 DMA */
  181. };
  182. struct stm32_mdma_hwdesc {
  183. u32 ctcr;
  184. u32 cbndtr;
  185. u32 csar;
  186. u32 cdar;
  187. u32 cbrur;
  188. u32 clar;
  189. u32 ctbr;
  190. u32 dummy;
  191. u32 cmar;
  192. u32 cmdr;
  193. } __aligned(64);
  194. struct stm32_mdma_desc_node {
  195. struct stm32_mdma_hwdesc *hwdesc;
  196. dma_addr_t hwdesc_phys;
  197. };
  198. struct stm32_mdma_desc {
  199. struct virt_dma_desc vdesc;
  200. u32 ccr;
  201. bool cyclic;
  202. u32 count;
  203. struct stm32_mdma_desc_node node[];
  204. };
  205. struct stm32_mdma_dma_config {
  206. u32 request; /* STM32 DMA channel stream id, triggering MDMA */
  207. u32 cmar; /* STM32 DMA interrupt flag clear register address */
  208. u32 cmdr; /* STM32 DMA Transfer Complete flag */
  209. };
  210. struct stm32_mdma_chan {
  211. struct virt_dma_chan vchan;
  212. struct dma_pool *desc_pool;
  213. u32 id;
  214. struct stm32_mdma_desc *desc;
  215. u32 curr_hwdesc;
  216. struct dma_slave_config dma_config;
  217. struct stm32_mdma_chan_config chan_config;
  218. bool busy;
  219. u32 mem_burst;
  220. u32 mem_width;
  221. };
  222. struct stm32_mdma_device {
  223. struct dma_device ddev;
  224. void __iomem *base;
  225. struct clk *clk;
  226. int irq;
  227. u32 nr_channels;
  228. u32 nr_requests;
  229. u32 nr_ahb_addr_masks;
  230. u32 chan_reserved;
  231. struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS];
  232. u32 ahb_addr_masks[];
  233. };
  234. static struct stm32_mdma_device *stm32_mdma_get_dev(
  235. struct stm32_mdma_chan *chan)
  236. {
  237. return container_of(chan->vchan.chan.device, struct stm32_mdma_device,
  238. ddev);
  239. }
  240. static struct stm32_mdma_chan *to_stm32_mdma_chan(struct dma_chan *c)
  241. {
  242. return container_of(c, struct stm32_mdma_chan, vchan.chan);
  243. }
  244. static struct stm32_mdma_desc *to_stm32_mdma_desc(struct virt_dma_desc *vdesc)
  245. {
  246. return container_of(vdesc, struct stm32_mdma_desc, vdesc);
  247. }
  248. static struct device *chan2dev(struct stm32_mdma_chan *chan)
  249. {
  250. return &chan->vchan.chan.dev->device;
  251. }
  252. static struct device *mdma2dev(struct stm32_mdma_device *mdma_dev)
  253. {
  254. return mdma_dev->ddev.dev;
  255. }
  256. static u32 stm32_mdma_read(struct stm32_mdma_device *dmadev, u32 reg)
  257. {
  258. return readl_relaxed(dmadev->base + reg);
  259. }
  260. static void stm32_mdma_write(struct stm32_mdma_device *dmadev, u32 reg, u32 val)
  261. {
  262. writel_relaxed(val, dmadev->base + reg);
  263. }
  264. static void stm32_mdma_set_bits(struct stm32_mdma_device *dmadev, u32 reg,
  265. u32 mask)
  266. {
  267. void __iomem *addr = dmadev->base + reg;
  268. writel_relaxed(readl_relaxed(addr) | mask, addr);
  269. }
  270. static void stm32_mdma_clr_bits(struct stm32_mdma_device *dmadev, u32 reg,
  271. u32 mask)
  272. {
  273. void __iomem *addr = dmadev->base + reg;
  274. writel_relaxed(readl_relaxed(addr) & ~mask, addr);
  275. }
  276. static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
  277. struct stm32_mdma_chan *chan, u32 count)
  278. {
  279. struct stm32_mdma_desc *desc;
  280. int i;
  281. desc = kzalloc(struct_size(desc, node, count), GFP_NOWAIT);
  282. if (!desc)
  283. return NULL;
  284. for (i = 0; i < count; i++) {
  285. desc->node[i].hwdesc =
  286. dma_pool_alloc(chan->desc_pool, GFP_NOWAIT,
  287. &desc->node[i].hwdesc_phys);
  288. if (!desc->node[i].hwdesc)
  289. goto err;
  290. }
  291. desc->count = count;
  292. return desc;
  293. err:
  294. dev_err(chan2dev(chan), "Failed to allocate descriptor\n");
  295. while (--i >= 0)
  296. dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
  297. desc->node[i].hwdesc_phys);
  298. kfree(desc);
  299. return NULL;
  300. }
  301. static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc)
  302. {
  303. struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc);
  304. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan);
  305. int i;
  306. for (i = 0; i < desc->count; i++)
  307. dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
  308. desc->node[i].hwdesc_phys);
  309. kfree(desc);
  310. }
  311. static int stm32_mdma_get_width(struct stm32_mdma_chan *chan,
  312. enum dma_slave_buswidth width)
  313. {
  314. switch (width) {
  315. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  316. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  317. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  318. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  319. return ffs(width) - 1;
  320. default:
  321. dev_err(chan2dev(chan), "Dma bus width %i not supported\n",
  322. width);
  323. return -EINVAL;
  324. }
  325. }
  326. static enum dma_slave_buswidth stm32_mdma_get_max_width(dma_addr_t addr,
  327. u32 buf_len, u32 tlen)
  328. {
  329. enum dma_slave_buswidth max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
  330. for (max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
  331. max_width > DMA_SLAVE_BUSWIDTH_1_BYTE;
  332. max_width >>= 1) {
  333. /*
  334. * Address and buffer length both have to be aligned on
  335. * bus width
  336. */
  337. if ((((buf_len | addr) & (max_width - 1)) == 0) &&
  338. tlen >= max_width)
  339. break;
  340. }
  341. return max_width;
  342. }
  343. static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst,
  344. enum dma_slave_buswidth width)
  345. {
  346. u32 best_burst;
  347. best_burst = min((u32)1 << __ffs(tlen | buf_len),
  348. max_burst * width) / width;
  349. return (best_burst > 0) ? best_burst : 1;
  350. }
  351. static int stm32_mdma_disable_chan(struct stm32_mdma_chan *chan)
  352. {
  353. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  354. u32 ccr, cisr, id, reg;
  355. int ret;
  356. id = chan->id;
  357. reg = STM32_MDMA_CCR(id);
  358. /* Disable interrupts */
  359. stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_IRQ_MASK);
  360. ccr = stm32_mdma_read(dmadev, reg);
  361. if (ccr & STM32_MDMA_CCR_EN) {
  362. stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_EN);
  363. /* Ensure that any ongoing transfer has been completed */
  364. ret = readl_relaxed_poll_timeout_atomic(
  365. dmadev->base + STM32_MDMA_CISR(id), cisr,
  366. (cisr & STM32_MDMA_CISR_CTCIF), 10, 1000);
  367. if (ret) {
  368. dev_err(chan2dev(chan), "%s: timeout!\n", __func__);
  369. return -EBUSY;
  370. }
  371. }
  372. return 0;
  373. }
  374. static void stm32_mdma_stop(struct stm32_mdma_chan *chan)
  375. {
  376. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  377. u32 status;
  378. int ret;
  379. /* Disable DMA */
  380. ret = stm32_mdma_disable_chan(chan);
  381. if (ret < 0)
  382. return;
  383. /* Clear interrupt status if it is there */
  384. status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
  385. if (status) {
  386. dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
  387. __func__, status);
  388. stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
  389. }
  390. chan->busy = false;
  391. }
  392. static void stm32_mdma_set_bus(struct stm32_mdma_device *dmadev, u32 *ctbr,
  393. u32 ctbr_mask, u32 src_addr)
  394. {
  395. u32 mask;
  396. int i;
  397. /* Check if memory device is on AHB or AXI */
  398. *ctbr &= ~ctbr_mask;
  399. mask = src_addr & 0xF0000000;
  400. for (i = 0; i < dmadev->nr_ahb_addr_masks; i++) {
  401. if (mask == dmadev->ahb_addr_masks[i]) {
  402. *ctbr |= ctbr_mask;
  403. break;
  404. }
  405. }
  406. }
  407. static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
  408. enum dma_transfer_direction direction,
  409. u32 *mdma_ccr, u32 *mdma_ctcr,
  410. u32 *mdma_ctbr, dma_addr_t addr,
  411. u32 buf_len)
  412. {
  413. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  414. struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
  415. enum dma_slave_buswidth src_addr_width, dst_addr_width;
  416. phys_addr_t src_addr, dst_addr;
  417. int src_bus_width, dst_bus_width;
  418. u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
  419. u32 ccr, ctcr, ctbr, tlen;
  420. src_addr_width = chan->dma_config.src_addr_width;
  421. dst_addr_width = chan->dma_config.dst_addr_width;
  422. src_maxburst = chan->dma_config.src_maxburst;
  423. dst_maxburst = chan->dma_config.dst_maxburst;
  424. ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
  425. ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
  426. ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
  427. /* Enable HW request mode */
  428. ctcr &= ~STM32_MDMA_CTCR_SWRM;
  429. /* Set DINC, SINC, DINCOS, SINCOS, TRGM and TLEN retrieve from DT */
  430. ctcr &= ~STM32_MDMA_CTCR_CFG_MASK;
  431. ctcr |= chan_config->transfer_config & STM32_MDMA_CTCR_CFG_MASK;
  432. /*
  433. * For buffer transfer length (TLEN) we have to set
  434. * the number of bytes - 1 in CTCR register
  435. */
  436. tlen = STM32_MDMA_CTCR_LEN2_GET(ctcr);
  437. ctcr &= ~STM32_MDMA_CTCR_LEN2_MSK;
  438. ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
  439. /* Disable Pack Enable */
  440. ctcr &= ~STM32_MDMA_CTCR_PKE;
  441. /* Check burst size constraints */
  442. if (src_maxburst * src_addr_width > STM32_MDMA_MAX_BURST ||
  443. dst_maxburst * dst_addr_width > STM32_MDMA_MAX_BURST) {
  444. dev_err(chan2dev(chan),
  445. "burst size * bus width higher than %d bytes\n",
  446. STM32_MDMA_MAX_BURST);
  447. return -EINVAL;
  448. }
  449. if ((!is_power_of_2(src_maxburst) && src_maxburst > 0) ||
  450. (!is_power_of_2(dst_maxburst) && dst_maxburst > 0)) {
  451. dev_err(chan2dev(chan), "burst size must be a power of 2\n");
  452. return -EINVAL;
  453. }
  454. /*
  455. * Configure channel control:
  456. * - Clear SW request as in this case this is a HW one
  457. * - Clear WEX, HEX and BEX bits
  458. * - Set priority level
  459. */
  460. ccr &= ~(STM32_MDMA_CCR_SWRQ | STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
  461. STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK);
  462. ccr |= STM32_MDMA_CCR_PL(chan_config->priority_level);
  463. /* Configure Trigger selection */
  464. ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
  465. ctbr |= STM32_MDMA_CTBR_TSEL(chan_config->request);
  466. switch (direction) {
  467. case DMA_MEM_TO_DEV:
  468. dst_addr = chan->dma_config.dst_addr;
  469. /* Set device data size */
  470. if (chan_config->m2m_hw)
  471. dst_addr_width = stm32_mdma_get_max_width(dst_addr, buf_len,
  472. STM32_MDMA_MAX_BUF_LEN);
  473. dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
  474. if (dst_bus_width < 0)
  475. return dst_bus_width;
  476. ctcr &= ~STM32_MDMA_CTCR_DSIZE_MASK;
  477. ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width);
  478. if (chan_config->m2m_hw) {
  479. ctcr &= ~STM32_MDMA_CTCR_DINCOS_MASK;
  480. ctcr |= STM32_MDMA_CTCR_DINCOS(dst_bus_width);
  481. }
  482. /* Set device burst value */
  483. if (chan_config->m2m_hw)
  484. dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width;
  485. dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
  486. dst_maxburst,
  487. dst_addr_width);
  488. chan->mem_burst = dst_best_burst;
  489. ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
  490. ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
  491. /* Set memory data size */
  492. src_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
  493. chan->mem_width = src_addr_width;
  494. src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
  495. if (src_bus_width < 0)
  496. return src_bus_width;
  497. ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK |
  498. STM32_MDMA_CTCR_SINCOS_MASK;
  499. ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width) |
  500. STM32_MDMA_CTCR_SINCOS(src_bus_width);
  501. /* Set memory burst value */
  502. src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width;
  503. src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
  504. src_maxburst,
  505. src_addr_width);
  506. chan->mem_burst = src_best_burst;
  507. ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
  508. ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
  509. /* Select bus */
  510. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
  511. dst_addr);
  512. if (dst_bus_width != src_bus_width)
  513. ctcr |= STM32_MDMA_CTCR_PKE;
  514. /* Set destination address */
  515. stm32_mdma_write(dmadev, STM32_MDMA_CDAR(chan->id), dst_addr);
  516. break;
  517. case DMA_DEV_TO_MEM:
  518. src_addr = chan->dma_config.src_addr;
  519. /* Set device data size */
  520. if (chan_config->m2m_hw)
  521. src_addr_width = stm32_mdma_get_max_width(src_addr, buf_len,
  522. STM32_MDMA_MAX_BUF_LEN);
  523. src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
  524. if (src_bus_width < 0)
  525. return src_bus_width;
  526. ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK;
  527. ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width);
  528. if (chan_config->m2m_hw) {
  529. ctcr &= ~STM32_MDMA_CTCR_SINCOS_MASK;
  530. ctcr |= STM32_MDMA_CTCR_SINCOS(src_bus_width);
  531. }
  532. /* Set device burst value */
  533. if (chan_config->m2m_hw)
  534. src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width;
  535. src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
  536. src_maxburst,
  537. src_addr_width);
  538. ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
  539. ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
  540. /* Set memory data size */
  541. dst_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
  542. chan->mem_width = dst_addr_width;
  543. dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
  544. if (dst_bus_width < 0)
  545. return dst_bus_width;
  546. ctcr &= ~(STM32_MDMA_CTCR_DSIZE_MASK |
  547. STM32_MDMA_CTCR_DINCOS_MASK);
  548. ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
  549. STM32_MDMA_CTCR_DINCOS(dst_bus_width);
  550. /* Set memory burst value */
  551. dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width;
  552. dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
  553. dst_maxburst,
  554. dst_addr_width);
  555. ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
  556. ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
  557. /* Select bus */
  558. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
  559. src_addr);
  560. if (dst_bus_width != src_bus_width)
  561. ctcr |= STM32_MDMA_CTCR_PKE;
  562. /* Set source address */
  563. stm32_mdma_write(dmadev, STM32_MDMA_CSAR(chan->id), src_addr);
  564. break;
  565. default:
  566. dev_err(chan2dev(chan), "Dma direction is not supported\n");
  567. return -EINVAL;
  568. }
  569. *mdma_ccr = ccr;
  570. *mdma_ctcr = ctcr;
  571. *mdma_ctbr = ctbr;
  572. return 0;
  573. }
  574. static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan,
  575. struct stm32_mdma_desc_node *node)
  576. {
  577. dev_dbg(chan2dev(chan), "hwdesc: %pad\n", &node->hwdesc_phys);
  578. dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", node->hwdesc->ctcr);
  579. dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", node->hwdesc->cbndtr);
  580. dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", node->hwdesc->csar);
  581. dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", node->hwdesc->cdar);
  582. dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", node->hwdesc->cbrur);
  583. dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", node->hwdesc->clar);
  584. dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", node->hwdesc->ctbr);
  585. dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", node->hwdesc->cmar);
  586. dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n\n", node->hwdesc->cmdr);
  587. }
  588. static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
  589. struct stm32_mdma_desc *desc,
  590. enum dma_transfer_direction dir, u32 count,
  591. dma_addr_t src_addr, dma_addr_t dst_addr,
  592. u32 len, u32 ctcr, u32 ctbr, bool is_last,
  593. bool is_first, bool is_cyclic)
  594. {
  595. struct stm32_mdma_chan_config *config = &chan->chan_config;
  596. struct stm32_mdma_hwdesc *hwdesc;
  597. u32 next = count + 1;
  598. hwdesc = desc->node[count].hwdesc;
  599. hwdesc->ctcr = ctcr;
  600. hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK |
  601. STM32_MDMA_CBNDTR_BRDUM |
  602. STM32_MDMA_CBNDTR_BRSUM |
  603. STM32_MDMA_CBNDTR_BNDT_MASK);
  604. hwdesc->cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
  605. hwdesc->csar = src_addr;
  606. hwdesc->cdar = dst_addr;
  607. hwdesc->cbrur = 0;
  608. hwdesc->ctbr = ctbr;
  609. hwdesc->cmar = config->mask_addr;
  610. hwdesc->cmdr = config->mask_data;
  611. if (is_last) {
  612. if (is_cyclic)
  613. hwdesc->clar = desc->node[0].hwdesc_phys;
  614. else
  615. hwdesc->clar = 0;
  616. } else {
  617. hwdesc->clar = desc->node[next].hwdesc_phys;
  618. }
  619. stm32_mdma_dump_hwdesc(chan, &desc->node[count]);
  620. }
  621. static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
  622. struct stm32_mdma_desc *desc,
  623. struct scatterlist *sgl, u32 sg_len,
  624. enum dma_transfer_direction direction)
  625. {
  626. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  627. struct dma_slave_config *dma_config = &chan->dma_config;
  628. struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
  629. struct scatterlist *sg;
  630. dma_addr_t src_addr, dst_addr;
  631. u32 m2m_hw_period, ccr, ctcr, ctbr;
  632. int i, ret = 0;
  633. if (chan_config->m2m_hw)
  634. m2m_hw_period = sg_dma_len(sgl);
  635. for_each_sg(sgl, sg, sg_len, i) {
  636. if (sg_dma_len(sg) > STM32_MDMA_MAX_BLOCK_LEN) {
  637. dev_err(chan2dev(chan), "Invalid block len\n");
  638. return -EINVAL;
  639. }
  640. if (direction == DMA_MEM_TO_DEV) {
  641. src_addr = sg_dma_address(sg);
  642. dst_addr = dma_config->dst_addr;
  643. if (chan_config->m2m_hw && (i & 1))
  644. dst_addr += m2m_hw_period;
  645. ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
  646. &ctcr, &ctbr, src_addr,
  647. sg_dma_len(sg));
  648. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
  649. src_addr);
  650. } else {
  651. src_addr = dma_config->src_addr;
  652. if (chan_config->m2m_hw && (i & 1))
  653. src_addr += m2m_hw_period;
  654. dst_addr = sg_dma_address(sg);
  655. ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
  656. &ctcr, &ctbr, dst_addr,
  657. sg_dma_len(sg));
  658. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
  659. dst_addr);
  660. }
  661. if (ret < 0)
  662. return ret;
  663. stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
  664. dst_addr, sg_dma_len(sg), ctcr, ctbr,
  665. i == sg_len - 1, i == 0, false);
  666. }
  667. /* Enable interrupts */
  668. ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
  669. ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
  670. desc->ccr = ccr;
  671. return 0;
  672. }
  673. static struct dma_async_tx_descriptor *
  674. stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
  675. u32 sg_len, enum dma_transfer_direction direction,
  676. unsigned long flags, void *context)
  677. {
  678. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  679. struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
  680. struct stm32_mdma_desc *desc;
  681. int i, ret;
  682. /*
  683. * Once DMA is in setup cyclic mode the channel we cannot assign this
  684. * channel anymore. The DMA channel needs to be aborted or terminated
  685. * for allowing another request.
  686. */
  687. if (chan->desc && chan->desc->cyclic) {
  688. dev_err(chan2dev(chan),
  689. "Request not allowed when dma in cyclic mode\n");
  690. return NULL;
  691. }
  692. desc = stm32_mdma_alloc_desc(chan, sg_len);
  693. if (!desc)
  694. return NULL;
  695. ret = stm32_mdma_setup_xfer(chan, desc, sgl, sg_len, direction);
  696. if (ret < 0)
  697. goto xfer_setup_err;
  698. /*
  699. * In case of M2M HW transfer triggered by STM32 DMA, we do not have to clear the
  700. * transfer complete flag by hardware in order to let the CPU rearm the STM32 DMA
  701. * with the next sg element and update some data in dmaengine framework.
  702. */
  703. if (chan_config->m2m_hw && direction == DMA_MEM_TO_DEV) {
  704. struct stm32_mdma_hwdesc *hwdesc;
  705. for (i = 0; i < sg_len; i++) {
  706. hwdesc = desc->node[i].hwdesc;
  707. hwdesc->cmar = 0;
  708. hwdesc->cmdr = 0;
  709. }
  710. }
  711. desc->cyclic = false;
  712. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  713. xfer_setup_err:
  714. for (i = 0; i < desc->count; i++)
  715. dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
  716. desc->node[i].hwdesc_phys);
  717. kfree(desc);
  718. return NULL;
  719. }
  720. static struct dma_async_tx_descriptor *
  721. stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
  722. size_t buf_len, size_t period_len,
  723. enum dma_transfer_direction direction,
  724. unsigned long flags)
  725. {
  726. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  727. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  728. struct dma_slave_config *dma_config = &chan->dma_config;
  729. struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
  730. struct stm32_mdma_desc *desc;
  731. dma_addr_t src_addr, dst_addr;
  732. u32 ccr, ctcr, ctbr, count;
  733. int i, ret;
  734. /*
  735. * Once DMA is in setup cyclic mode the channel we cannot assign this
  736. * channel anymore. The DMA channel needs to be aborted or terminated
  737. * for allowing another request.
  738. */
  739. if (chan->desc && chan->desc->cyclic) {
  740. dev_err(chan2dev(chan),
  741. "Request not allowed when dma in cyclic mode\n");
  742. return NULL;
  743. }
  744. if (!buf_len || !period_len || period_len > STM32_MDMA_MAX_BLOCK_LEN) {
  745. dev_err(chan2dev(chan), "Invalid buffer/period len\n");
  746. return NULL;
  747. }
  748. if (buf_len % period_len) {
  749. dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
  750. return NULL;
  751. }
  752. count = buf_len / period_len;
  753. desc = stm32_mdma_alloc_desc(chan, count);
  754. if (!desc)
  755. return NULL;
  756. /* Select bus */
  757. if (direction == DMA_MEM_TO_DEV) {
  758. src_addr = buf_addr;
  759. ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
  760. &ctbr, src_addr, period_len);
  761. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
  762. src_addr);
  763. } else {
  764. dst_addr = buf_addr;
  765. ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
  766. &ctbr, dst_addr, period_len);
  767. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
  768. dst_addr);
  769. }
  770. if (ret < 0)
  771. goto xfer_setup_err;
  772. /* Enable interrupts */
  773. ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
  774. ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE | STM32_MDMA_CCR_BTIE;
  775. desc->ccr = ccr;
  776. /* Configure hwdesc list */
  777. for (i = 0; i < count; i++) {
  778. if (direction == DMA_MEM_TO_DEV) {
  779. src_addr = buf_addr + i * period_len;
  780. dst_addr = dma_config->dst_addr;
  781. if (chan_config->m2m_hw && (i & 1))
  782. dst_addr += period_len;
  783. } else {
  784. src_addr = dma_config->src_addr;
  785. if (chan_config->m2m_hw && (i & 1))
  786. src_addr += period_len;
  787. dst_addr = buf_addr + i * period_len;
  788. }
  789. stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
  790. dst_addr, period_len, ctcr, ctbr,
  791. i == count - 1, i == 0, true);
  792. }
  793. desc->cyclic = true;
  794. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  795. xfer_setup_err:
  796. for (i = 0; i < desc->count; i++)
  797. dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
  798. desc->node[i].hwdesc_phys);
  799. kfree(desc);
  800. return NULL;
  801. }
  802. static struct dma_async_tx_descriptor *
  803. stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
  804. size_t len, unsigned long flags)
  805. {
  806. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  807. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  808. enum dma_slave_buswidth max_width;
  809. struct stm32_mdma_desc *desc;
  810. struct stm32_mdma_hwdesc *hwdesc;
  811. u32 ccr, ctcr, ctbr, cbndtr, count, max_burst, mdma_burst;
  812. u32 best_burst, tlen;
  813. size_t xfer_count, offset;
  814. int src_bus_width, dst_bus_width;
  815. int i;
  816. /*
  817. * Once DMA is in setup cyclic mode the channel we cannot assign this
  818. * channel anymore. The DMA channel needs to be aborted or terminated
  819. * to allow another request
  820. */
  821. if (chan->desc && chan->desc->cyclic) {
  822. dev_err(chan2dev(chan),
  823. "Request not allowed when dma in cyclic mode\n");
  824. return NULL;
  825. }
  826. count = DIV_ROUND_UP(len, STM32_MDMA_MAX_BLOCK_LEN);
  827. desc = stm32_mdma_alloc_desc(chan, count);
  828. if (!desc)
  829. return NULL;
  830. ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
  831. ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
  832. ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
  833. cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
  834. /* Enable sw req, some interrupts and clear other bits */
  835. ccr &= ~(STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
  836. STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK |
  837. STM32_MDMA_CCR_IRQ_MASK);
  838. ccr |= STM32_MDMA_CCR_TEIE;
  839. /* Enable SW request mode, dest/src inc and clear other bits */
  840. ctcr &= ~(STM32_MDMA_CTCR_BWM | STM32_MDMA_CTCR_TRGM_MSK |
  841. STM32_MDMA_CTCR_PAM_MASK | STM32_MDMA_CTCR_PKE |
  842. STM32_MDMA_CTCR_TLEN_MSK | STM32_MDMA_CTCR_DBURST_MASK |
  843. STM32_MDMA_CTCR_SBURST_MASK | STM32_MDMA_CTCR_DINCOS_MASK |
  844. STM32_MDMA_CTCR_SINCOS_MASK | STM32_MDMA_CTCR_DSIZE_MASK |
  845. STM32_MDMA_CTCR_SSIZE_MASK | STM32_MDMA_CTCR_DINC_MASK |
  846. STM32_MDMA_CTCR_SINC_MASK);
  847. ctcr |= STM32_MDMA_CTCR_SWRM | STM32_MDMA_CTCR_SINC(STM32_MDMA_INC) |
  848. STM32_MDMA_CTCR_DINC(STM32_MDMA_INC);
  849. /* Reset HW request */
  850. ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
  851. /* Select bus */
  852. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, src);
  853. stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, dest);
  854. /* Clear CBNDTR registers */
  855. cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | STM32_MDMA_CBNDTR_BRDUM |
  856. STM32_MDMA_CBNDTR_BRSUM | STM32_MDMA_CBNDTR_BNDT_MASK);
  857. if (len <= STM32_MDMA_MAX_BLOCK_LEN) {
  858. cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
  859. if (len <= STM32_MDMA_MAX_BUF_LEN) {
  860. /* Setup a buffer transfer */
  861. ccr |= STM32_MDMA_CCR_TCIE | STM32_MDMA_CCR_CTCIE;
  862. ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BUFFER);
  863. } else {
  864. /* Setup a block transfer */
  865. ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
  866. ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BLOCK);
  867. }
  868. tlen = STM32_MDMA_MAX_BUF_LEN;
  869. ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
  870. /* Set source best burst size */
  871. max_width = stm32_mdma_get_max_width(src, len, tlen);
  872. src_bus_width = stm32_mdma_get_width(chan, max_width);
  873. max_burst = tlen / max_width;
  874. best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
  875. max_width);
  876. mdma_burst = ilog2(best_burst);
  877. ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
  878. STM32_MDMA_CTCR_SSIZE(src_bus_width) |
  879. STM32_MDMA_CTCR_SINCOS(src_bus_width);
  880. /* Set destination best burst size */
  881. max_width = stm32_mdma_get_max_width(dest, len, tlen);
  882. dst_bus_width = stm32_mdma_get_width(chan, max_width);
  883. max_burst = tlen / max_width;
  884. best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
  885. max_width);
  886. mdma_burst = ilog2(best_burst);
  887. ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
  888. STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
  889. STM32_MDMA_CTCR_DINCOS(dst_bus_width);
  890. if (dst_bus_width != src_bus_width)
  891. ctcr |= STM32_MDMA_CTCR_PKE;
  892. /* Prepare hardware descriptor */
  893. hwdesc = desc->node[0].hwdesc;
  894. hwdesc->ctcr = ctcr;
  895. hwdesc->cbndtr = cbndtr;
  896. hwdesc->csar = src;
  897. hwdesc->cdar = dest;
  898. hwdesc->cbrur = 0;
  899. hwdesc->clar = 0;
  900. hwdesc->ctbr = ctbr;
  901. hwdesc->cmar = 0;
  902. hwdesc->cmdr = 0;
  903. stm32_mdma_dump_hwdesc(chan, &desc->node[0]);
  904. } else {
  905. /* Setup a LLI transfer */
  906. ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) |
  907. STM32_MDMA_CTCR_TLEN((STM32_MDMA_MAX_BUF_LEN - 1));
  908. ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
  909. tlen = STM32_MDMA_MAX_BUF_LEN;
  910. for (i = 0, offset = 0; offset < len;
  911. i++, offset += xfer_count) {
  912. xfer_count = min_t(size_t, len - offset,
  913. STM32_MDMA_MAX_BLOCK_LEN);
  914. /* Set source best burst size */
  915. max_width = stm32_mdma_get_max_width(src, len, tlen);
  916. src_bus_width = stm32_mdma_get_width(chan, max_width);
  917. max_burst = tlen / max_width;
  918. best_burst = stm32_mdma_get_best_burst(len, tlen,
  919. max_burst,
  920. max_width);
  921. mdma_burst = ilog2(best_burst);
  922. ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
  923. STM32_MDMA_CTCR_SSIZE(src_bus_width) |
  924. STM32_MDMA_CTCR_SINCOS(src_bus_width);
  925. /* Set destination best burst size */
  926. max_width = stm32_mdma_get_max_width(dest, len, tlen);
  927. dst_bus_width = stm32_mdma_get_width(chan, max_width);
  928. max_burst = tlen / max_width;
  929. best_burst = stm32_mdma_get_best_burst(len, tlen,
  930. max_burst,
  931. max_width);
  932. mdma_burst = ilog2(best_burst);
  933. ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
  934. STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
  935. STM32_MDMA_CTCR_DINCOS(dst_bus_width);
  936. if (dst_bus_width != src_bus_width)
  937. ctcr |= STM32_MDMA_CTCR_PKE;
  938. /* Prepare hardware descriptor */
  939. stm32_mdma_setup_hwdesc(chan, desc, DMA_MEM_TO_MEM, i,
  940. src + offset, dest + offset,
  941. xfer_count, ctcr, ctbr,
  942. i == count - 1, i == 0, false);
  943. }
  944. }
  945. desc->ccr = ccr;
  946. desc->cyclic = false;
  947. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  948. }
  949. static void stm32_mdma_dump_reg(struct stm32_mdma_chan *chan)
  950. {
  951. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  952. dev_dbg(chan2dev(chan), "CCR: 0x%08x\n",
  953. stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)));
  954. dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n",
  955. stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)));
  956. dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n",
  957. stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)));
  958. dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n",
  959. stm32_mdma_read(dmadev, STM32_MDMA_CSAR(chan->id)));
  960. dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n",
  961. stm32_mdma_read(dmadev, STM32_MDMA_CDAR(chan->id)));
  962. dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n",
  963. stm32_mdma_read(dmadev, STM32_MDMA_CBRUR(chan->id)));
  964. dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n",
  965. stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id)));
  966. dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n",
  967. stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)));
  968. dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n",
  969. stm32_mdma_read(dmadev, STM32_MDMA_CMAR(chan->id)));
  970. dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n",
  971. stm32_mdma_read(dmadev, STM32_MDMA_CMDR(chan->id)));
  972. }
  973. static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
  974. {
  975. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  976. struct virt_dma_desc *vdesc;
  977. struct stm32_mdma_hwdesc *hwdesc;
  978. u32 id = chan->id;
  979. u32 status, reg;
  980. vdesc = vchan_next_desc(&chan->vchan);
  981. if (!vdesc) {
  982. chan->desc = NULL;
  983. return;
  984. }
  985. list_del(&vdesc->node);
  986. chan->desc = to_stm32_mdma_desc(vdesc);
  987. hwdesc = chan->desc->node[0].hwdesc;
  988. chan->curr_hwdesc = 0;
  989. stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr);
  990. stm32_mdma_write(dmadev, STM32_MDMA_CTCR(id), hwdesc->ctcr);
  991. stm32_mdma_write(dmadev, STM32_MDMA_CBNDTR(id), hwdesc->cbndtr);
  992. stm32_mdma_write(dmadev, STM32_MDMA_CSAR(id), hwdesc->csar);
  993. stm32_mdma_write(dmadev, STM32_MDMA_CDAR(id), hwdesc->cdar);
  994. stm32_mdma_write(dmadev, STM32_MDMA_CBRUR(id), hwdesc->cbrur);
  995. stm32_mdma_write(dmadev, STM32_MDMA_CLAR(id), hwdesc->clar);
  996. stm32_mdma_write(dmadev, STM32_MDMA_CTBR(id), hwdesc->ctbr);
  997. stm32_mdma_write(dmadev, STM32_MDMA_CMAR(id), hwdesc->cmar);
  998. stm32_mdma_write(dmadev, STM32_MDMA_CMDR(id), hwdesc->cmdr);
  999. /* Clear interrupt status if it is there */
  1000. status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id));
  1001. if (status)
  1002. stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(id), status);
  1003. stm32_mdma_dump_reg(chan);
  1004. /* Start DMA */
  1005. stm32_mdma_set_bits(dmadev, STM32_MDMA_CCR(id), STM32_MDMA_CCR_EN);
  1006. /* Set SW request in case of MEM2MEM transfer */
  1007. if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM) {
  1008. reg = STM32_MDMA_CCR(id);
  1009. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
  1010. }
  1011. chan->busy = true;
  1012. dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
  1013. }
  1014. static void stm32_mdma_issue_pending(struct dma_chan *c)
  1015. {
  1016. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1017. unsigned long flags;
  1018. spin_lock_irqsave(&chan->vchan.lock, flags);
  1019. if (!vchan_issue_pending(&chan->vchan))
  1020. goto end;
  1021. dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
  1022. if (!chan->desc && !chan->busy)
  1023. stm32_mdma_start_transfer(chan);
  1024. end:
  1025. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  1026. }
  1027. static int stm32_mdma_pause(struct dma_chan *c)
  1028. {
  1029. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1030. unsigned long flags;
  1031. int ret;
  1032. spin_lock_irqsave(&chan->vchan.lock, flags);
  1033. ret = stm32_mdma_disable_chan(chan);
  1034. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  1035. if (!ret)
  1036. dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
  1037. return ret;
  1038. }
  1039. static int stm32_mdma_resume(struct dma_chan *c)
  1040. {
  1041. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1042. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  1043. struct stm32_mdma_hwdesc *hwdesc;
  1044. unsigned long flags;
  1045. u32 status, reg;
  1046. /* Transfer can be terminated */
  1047. if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN))
  1048. return -EPERM;
  1049. hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
  1050. spin_lock_irqsave(&chan->vchan.lock, flags);
  1051. /* Re-configure control register */
  1052. stm32_mdma_write(dmadev, STM32_MDMA_CCR(chan->id), chan->desc->ccr);
  1053. /* Clear interrupt status if it is there */
  1054. status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
  1055. if (status)
  1056. stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
  1057. stm32_mdma_dump_reg(chan);
  1058. /* Re-start DMA */
  1059. reg = STM32_MDMA_CCR(chan->id);
  1060. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_EN);
  1061. /* Set SW request in case of MEM2MEM transfer */
  1062. if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM)
  1063. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
  1064. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  1065. dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
  1066. return 0;
  1067. }
  1068. static int stm32_mdma_terminate_all(struct dma_chan *c)
  1069. {
  1070. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1071. unsigned long flags;
  1072. LIST_HEAD(head);
  1073. spin_lock_irqsave(&chan->vchan.lock, flags);
  1074. if (chan->desc) {
  1075. vchan_terminate_vdesc(&chan->desc->vdesc);
  1076. if (chan->busy)
  1077. stm32_mdma_stop(chan);
  1078. chan->desc = NULL;
  1079. }
  1080. vchan_get_all_descriptors(&chan->vchan, &head);
  1081. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  1082. vchan_dma_desc_free_list(&chan->vchan, &head);
  1083. return 0;
  1084. }
  1085. static void stm32_mdma_synchronize(struct dma_chan *c)
  1086. {
  1087. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1088. vchan_synchronize(&chan->vchan);
  1089. }
  1090. static int stm32_mdma_slave_config(struct dma_chan *c,
  1091. struct dma_slave_config *config)
  1092. {
  1093. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1094. memcpy(&chan->dma_config, config, sizeof(*config));
  1095. /* Check if user is requesting STM32 DMA to trigger MDMA */
  1096. if (config->peripheral_size) {
  1097. struct stm32_mdma_dma_config *mdma_config;
  1098. mdma_config = (struct stm32_mdma_dma_config *)chan->dma_config.peripheral_config;
  1099. chan->chan_config.request = mdma_config->request;
  1100. chan->chan_config.mask_addr = mdma_config->cmar;
  1101. chan->chan_config.mask_data = mdma_config->cmdr;
  1102. chan->chan_config.m2m_hw = true;
  1103. }
  1104. return 0;
  1105. }
  1106. static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
  1107. struct stm32_mdma_desc *desc,
  1108. u32 curr_hwdesc,
  1109. struct dma_tx_state *state)
  1110. {
  1111. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  1112. struct stm32_mdma_hwdesc *hwdesc;
  1113. u32 cisr, clar, cbndtr, residue, modulo, burst_size;
  1114. int i;
  1115. cisr = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
  1116. residue = 0;
  1117. /* Get the next hw descriptor to process from current transfer */
  1118. clar = stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id));
  1119. for (i = desc->count - 1; i >= 0; i--) {
  1120. hwdesc = desc->node[i].hwdesc;
  1121. if (hwdesc->clar == clar)
  1122. break;/* Current transfer found, stop cumulating */
  1123. /* Cumulate residue of unprocessed hw descriptors */
  1124. residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
  1125. }
  1126. cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
  1127. residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
  1128. state->in_flight_bytes = 0;
  1129. if (chan->chan_config.m2m_hw && (cisr & STM32_MDMA_CISR_CRQA))
  1130. state->in_flight_bytes = cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
  1131. if (!chan->mem_burst)
  1132. return residue;
  1133. burst_size = chan->mem_burst * chan->mem_width;
  1134. modulo = residue % burst_size;
  1135. if (modulo)
  1136. residue = residue - modulo + burst_size;
  1137. return residue;
  1138. }
  1139. static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
  1140. dma_cookie_t cookie,
  1141. struct dma_tx_state *state)
  1142. {
  1143. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1144. struct virt_dma_desc *vdesc;
  1145. enum dma_status status;
  1146. unsigned long flags;
  1147. u32 residue = 0;
  1148. status = dma_cookie_status(c, cookie, state);
  1149. if ((status == DMA_COMPLETE) || (!state))
  1150. return status;
  1151. spin_lock_irqsave(&chan->vchan.lock, flags);
  1152. vdesc = vchan_find_desc(&chan->vchan, cookie);
  1153. if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
  1154. residue = stm32_mdma_desc_residue(chan, chan->desc, chan->curr_hwdesc, state);
  1155. else if (vdesc)
  1156. residue = stm32_mdma_desc_residue(chan, to_stm32_mdma_desc(vdesc), 0, state);
  1157. dma_set_residue(state, residue);
  1158. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  1159. return status;
  1160. }
  1161. static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
  1162. {
  1163. vchan_cookie_complete(&chan->desc->vdesc);
  1164. chan->desc = NULL;
  1165. chan->busy = false;
  1166. /* Start the next transfer if this driver has a next desc */
  1167. stm32_mdma_start_transfer(chan);
  1168. }
  1169. static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
  1170. {
  1171. struct stm32_mdma_device *dmadev = devid;
  1172. struct stm32_mdma_chan *chan;
  1173. u32 reg, id, ccr, ien, status;
  1174. /* Find out which channel generates the interrupt */
  1175. status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
  1176. if (!status) {
  1177. dev_dbg(mdma2dev(dmadev), "spurious it\n");
  1178. return IRQ_NONE;
  1179. }
  1180. id = __ffs(status);
  1181. chan = &dmadev->chan[id];
  1182. /* Handle interrupt for the channel */
  1183. spin_lock(&chan->vchan.lock);
  1184. status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id));
  1185. /* Mask Channel ReQuest Active bit which can be set in case of MEM2MEM */
  1186. status &= ~STM32_MDMA_CISR_CRQA;
  1187. ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
  1188. ien = (ccr & STM32_MDMA_CCR_IRQ_MASK) >> 1;
  1189. if (!(status & ien)) {
  1190. spin_unlock(&chan->vchan.lock);
  1191. if (chan->busy)
  1192. dev_warn(chan2dev(chan),
  1193. "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
  1194. else
  1195. dev_dbg(chan2dev(chan),
  1196. "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
  1197. return IRQ_NONE;
  1198. }
  1199. reg = STM32_MDMA_CIFCR(id);
  1200. if (status & STM32_MDMA_CISR_TEIF) {
  1201. dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n",
  1202. readl_relaxed(dmadev->base + STM32_MDMA_CESR(id)));
  1203. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF);
  1204. status &= ~STM32_MDMA_CISR_TEIF;
  1205. }
  1206. if (status & STM32_MDMA_CISR_CTCIF) {
  1207. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF);
  1208. status &= ~STM32_MDMA_CISR_CTCIF;
  1209. stm32_mdma_xfer_end(chan);
  1210. }
  1211. if (status & STM32_MDMA_CISR_BRTIF) {
  1212. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF);
  1213. status &= ~STM32_MDMA_CISR_BRTIF;
  1214. }
  1215. if (status & STM32_MDMA_CISR_BTIF) {
  1216. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF);
  1217. status &= ~STM32_MDMA_CISR_BTIF;
  1218. chan->curr_hwdesc++;
  1219. if (chan->desc && chan->desc->cyclic) {
  1220. if (chan->curr_hwdesc == chan->desc->count)
  1221. chan->curr_hwdesc = 0;
  1222. vchan_cyclic_callback(&chan->desc->vdesc);
  1223. }
  1224. }
  1225. if (status & STM32_MDMA_CISR_TCIF) {
  1226. stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF);
  1227. status &= ~STM32_MDMA_CISR_TCIF;
  1228. }
  1229. if (status) {
  1230. stm32_mdma_set_bits(dmadev, reg, status);
  1231. dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
  1232. if (!(ccr & STM32_MDMA_CCR_EN))
  1233. dev_err(chan2dev(chan), "chan disabled by HW\n");
  1234. }
  1235. spin_unlock(&chan->vchan.lock);
  1236. return IRQ_HANDLED;
  1237. }
  1238. static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
  1239. {
  1240. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1241. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  1242. int ret;
  1243. chan->desc_pool = dmam_pool_create(dev_name(&c->dev->device),
  1244. c->device->dev,
  1245. sizeof(struct stm32_mdma_hwdesc),
  1246. __alignof__(struct stm32_mdma_hwdesc),
  1247. 0);
  1248. if (!chan->desc_pool) {
  1249. dev_err(chan2dev(chan), "failed to allocate descriptor pool\n");
  1250. return -ENOMEM;
  1251. }
  1252. ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
  1253. if (ret < 0)
  1254. return ret;
  1255. ret = stm32_mdma_disable_chan(chan);
  1256. if (ret < 0)
  1257. pm_runtime_put(dmadev->ddev.dev);
  1258. return ret;
  1259. }
  1260. static void stm32_mdma_free_chan_resources(struct dma_chan *c)
  1261. {
  1262. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1263. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  1264. unsigned long flags;
  1265. dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
  1266. if (chan->busy) {
  1267. spin_lock_irqsave(&chan->vchan.lock, flags);
  1268. stm32_mdma_stop(chan);
  1269. chan->desc = NULL;
  1270. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  1271. }
  1272. pm_runtime_put(dmadev->ddev.dev);
  1273. vchan_free_chan_resources(to_virt_chan(c));
  1274. dmam_pool_destroy(chan->desc_pool);
  1275. chan->desc_pool = NULL;
  1276. }
  1277. static bool stm32_mdma_filter_fn(struct dma_chan *c, void *fn_param)
  1278. {
  1279. struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
  1280. struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
  1281. /* Check if chan is marked Secure */
  1282. if (dmadev->chan_reserved & BIT(chan->id))
  1283. return false;
  1284. return true;
  1285. }
  1286. static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
  1287. struct of_dma *ofdma)
  1288. {
  1289. struct stm32_mdma_device *dmadev = ofdma->of_dma_data;
  1290. dma_cap_mask_t mask = dmadev->ddev.cap_mask;
  1291. struct stm32_mdma_chan *chan;
  1292. struct dma_chan *c;
  1293. struct stm32_mdma_chan_config config;
  1294. if (dma_spec->args_count < 5) {
  1295. dev_err(mdma2dev(dmadev), "Bad number of args\n");
  1296. return NULL;
  1297. }
  1298. memset(&config, 0, sizeof(config));
  1299. config.request = dma_spec->args[0];
  1300. config.priority_level = dma_spec->args[1];
  1301. config.transfer_config = dma_spec->args[2];
  1302. config.mask_addr = dma_spec->args[3];
  1303. config.mask_data = dma_spec->args[4];
  1304. if (config.request >= dmadev->nr_requests) {
  1305. dev_err(mdma2dev(dmadev), "Bad request line\n");
  1306. return NULL;
  1307. }
  1308. if (config.priority_level > STM32_MDMA_VERY_HIGH_PRIORITY) {
  1309. dev_err(mdma2dev(dmadev), "Priority level not supported\n");
  1310. return NULL;
  1311. }
  1312. c = __dma_request_channel(&mask, stm32_mdma_filter_fn, &config, ofdma->of_node);
  1313. if (!c) {
  1314. dev_err(mdma2dev(dmadev), "No more channels available\n");
  1315. return NULL;
  1316. }
  1317. chan = to_stm32_mdma_chan(c);
  1318. chan->chan_config = config;
  1319. return c;
  1320. }
  1321. static const struct of_device_id stm32_mdma_of_match[] = {
  1322. { .compatible = "st,stm32h7-mdma", },
  1323. { /* sentinel */ },
  1324. };
  1325. MODULE_DEVICE_TABLE(of, stm32_mdma_of_match);
  1326. static int stm32_mdma_probe(struct platform_device *pdev)
  1327. {
  1328. struct stm32_mdma_chan *chan;
  1329. struct stm32_mdma_device *dmadev;
  1330. struct dma_device *dd;
  1331. struct device_node *of_node;
  1332. struct resource *res;
  1333. struct reset_control *rst;
  1334. u32 nr_channels, nr_requests;
  1335. int i, count, ret;
  1336. of_node = pdev->dev.of_node;
  1337. if (!of_node)
  1338. return -ENODEV;
  1339. ret = device_property_read_u32(&pdev->dev, "dma-channels",
  1340. &nr_channels);
  1341. if (ret) {
  1342. nr_channels = STM32_MDMA_MAX_CHANNELS;
  1343. dev_warn(&pdev->dev, "MDMA defaulting on %i channels\n",
  1344. nr_channels);
  1345. }
  1346. ret = device_property_read_u32(&pdev->dev, "dma-requests",
  1347. &nr_requests);
  1348. if (ret) {
  1349. nr_requests = STM32_MDMA_MAX_REQUESTS;
  1350. dev_warn(&pdev->dev, "MDMA defaulting on %i request lines\n",
  1351. nr_requests);
  1352. }
  1353. count = device_property_count_u32(&pdev->dev, "st,ahb-addr-masks");
  1354. if (count < 0)
  1355. count = 0;
  1356. dmadev = devm_kzalloc(&pdev->dev,
  1357. struct_size(dmadev, ahb_addr_masks, count),
  1358. GFP_KERNEL);
  1359. if (!dmadev)
  1360. return -ENOMEM;
  1361. dmadev->nr_channels = nr_channels;
  1362. dmadev->nr_requests = nr_requests;
  1363. device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
  1364. dmadev->ahb_addr_masks,
  1365. count);
  1366. dmadev->nr_ahb_addr_masks = count;
  1367. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1368. dmadev->base = devm_ioremap_resource(&pdev->dev, res);
  1369. if (IS_ERR(dmadev->base))
  1370. return PTR_ERR(dmadev->base);
  1371. dmadev->clk = devm_clk_get(&pdev->dev, NULL);
  1372. if (IS_ERR(dmadev->clk))
  1373. return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk),
  1374. "Missing clock controller\n");
  1375. ret = clk_prepare_enable(dmadev->clk);
  1376. if (ret < 0) {
  1377. dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
  1378. return ret;
  1379. }
  1380. rst = devm_reset_control_get(&pdev->dev, NULL);
  1381. if (IS_ERR(rst)) {
  1382. ret = PTR_ERR(rst);
  1383. if (ret == -EPROBE_DEFER)
  1384. goto err_clk;
  1385. } else {
  1386. reset_control_assert(rst);
  1387. udelay(2);
  1388. reset_control_deassert(rst);
  1389. }
  1390. dd = &dmadev->ddev;
  1391. dma_cap_set(DMA_SLAVE, dd->cap_mask);
  1392. dma_cap_set(DMA_PRIVATE, dd->cap_mask);
  1393. dma_cap_set(DMA_CYCLIC, dd->cap_mask);
  1394. dma_cap_set(DMA_MEMCPY, dd->cap_mask);
  1395. dd->device_alloc_chan_resources = stm32_mdma_alloc_chan_resources;
  1396. dd->device_free_chan_resources = stm32_mdma_free_chan_resources;
  1397. dd->device_tx_status = stm32_mdma_tx_status;
  1398. dd->device_issue_pending = stm32_mdma_issue_pending;
  1399. dd->device_prep_slave_sg = stm32_mdma_prep_slave_sg;
  1400. dd->device_prep_dma_cyclic = stm32_mdma_prep_dma_cyclic;
  1401. dd->device_prep_dma_memcpy = stm32_mdma_prep_dma_memcpy;
  1402. dd->device_config = stm32_mdma_slave_config;
  1403. dd->device_pause = stm32_mdma_pause;
  1404. dd->device_resume = stm32_mdma_resume;
  1405. dd->device_terminate_all = stm32_mdma_terminate_all;
  1406. dd->device_synchronize = stm32_mdma_synchronize;
  1407. dd->descriptor_reuse = true;
  1408. dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  1409. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  1410. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
  1411. BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
  1412. dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
  1413. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
  1414. BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
  1415. BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
  1416. dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
  1417. BIT(DMA_MEM_TO_MEM);
  1418. dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  1419. dd->max_burst = STM32_MDMA_MAX_BURST;
  1420. dd->dev = &pdev->dev;
  1421. INIT_LIST_HEAD(&dd->channels);
  1422. for (i = 0; i < dmadev->nr_channels; i++) {
  1423. chan = &dmadev->chan[i];
  1424. chan->id = i;
  1425. if (stm32_mdma_read(dmadev, STM32_MDMA_CCR(i)) & STM32_MDMA_CCR_SM)
  1426. dmadev->chan_reserved |= BIT(i);
  1427. chan->vchan.desc_free = stm32_mdma_desc_free;
  1428. vchan_init(&chan->vchan, dd);
  1429. }
  1430. dmadev->irq = platform_get_irq(pdev, 0);
  1431. if (dmadev->irq < 0) {
  1432. ret = dmadev->irq;
  1433. goto err_clk;
  1434. }
  1435. ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
  1436. 0, dev_name(&pdev->dev), dmadev);
  1437. if (ret) {
  1438. dev_err(&pdev->dev, "failed to request IRQ\n");
  1439. goto err_clk;
  1440. }
  1441. ret = dmaenginem_async_device_register(dd);
  1442. if (ret)
  1443. goto err_clk;
  1444. ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev);
  1445. if (ret < 0) {
  1446. dev_err(&pdev->dev,
  1447. "STM32 MDMA DMA OF registration failed %d\n", ret);
  1448. goto err_clk;
  1449. }
  1450. platform_set_drvdata(pdev, dmadev);
  1451. pm_runtime_set_active(&pdev->dev);
  1452. pm_runtime_enable(&pdev->dev);
  1453. pm_runtime_get_noresume(&pdev->dev);
  1454. pm_runtime_put(&pdev->dev);
  1455. dev_info(&pdev->dev, "STM32 MDMA driver registered\n");
  1456. return 0;
  1457. err_clk:
  1458. clk_disable_unprepare(dmadev->clk);
  1459. return ret;
  1460. }
  1461. #ifdef CONFIG_PM
  1462. static int stm32_mdma_runtime_suspend(struct device *dev)
  1463. {
  1464. struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
  1465. clk_disable_unprepare(dmadev->clk);
  1466. return 0;
  1467. }
  1468. static int stm32_mdma_runtime_resume(struct device *dev)
  1469. {
  1470. struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
  1471. int ret;
  1472. ret = clk_prepare_enable(dmadev->clk);
  1473. if (ret) {
  1474. dev_err(dev, "failed to prepare_enable clock\n");
  1475. return ret;
  1476. }
  1477. return 0;
  1478. }
  1479. #endif
  1480. #ifdef CONFIG_PM_SLEEP
  1481. static int stm32_mdma_pm_suspend(struct device *dev)
  1482. {
  1483. struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
  1484. u32 ccr, id;
  1485. int ret;
  1486. ret = pm_runtime_resume_and_get(dev);
  1487. if (ret < 0)
  1488. return ret;
  1489. for (id = 0; id < dmadev->nr_channels; id++) {
  1490. ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
  1491. if (ccr & STM32_MDMA_CCR_EN) {
  1492. dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
  1493. return -EBUSY;
  1494. }
  1495. }
  1496. pm_runtime_put_sync(dev);
  1497. pm_runtime_force_suspend(dev);
  1498. return 0;
  1499. }
  1500. static int stm32_mdma_pm_resume(struct device *dev)
  1501. {
  1502. return pm_runtime_force_resume(dev);
  1503. }
  1504. #endif
  1505. static const struct dev_pm_ops stm32_mdma_pm_ops = {
  1506. SET_SYSTEM_SLEEP_PM_OPS(stm32_mdma_pm_suspend, stm32_mdma_pm_resume)
  1507. SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend,
  1508. stm32_mdma_runtime_resume, NULL)
  1509. };
  1510. static struct platform_driver stm32_mdma_driver = {
  1511. .probe = stm32_mdma_probe,
  1512. .driver = {
  1513. .name = "stm32-mdma",
  1514. .of_match_table = stm32_mdma_of_match,
  1515. .pm = &stm32_mdma_pm_ops,
  1516. },
  1517. };
  1518. static int __init stm32_mdma_init(void)
  1519. {
  1520. return platform_driver_register(&stm32_mdma_driver);
  1521. }
  1522. subsys_initcall(stm32_mdma_init);
  1523. MODULE_DESCRIPTION("Driver for STM32 MDMA controller");
  1524. MODULE_AUTHOR("M'boumba Cedric Madianga <[email protected]>");
  1525. MODULE_AUTHOR("Pierre-Yves Mordret <[email protected]>");
  1526. MODULE_LICENSE("GPL v2");